input
stringlengths
2.65k
237k
output
stringclasses
1 value
# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.common.utils.windows.remote_client import WinRemoteClient from tempest.lis import manager from oslo_log import log as logging from tempest.scenario import utils as test_utils from tempest import test from tempest.lib import exceptions import os import random import time CONF = config.CONF LOG = logging.getLogger(__name__) load_tests = test_utils.load_tests_input_scenario_utils class Network(manager.LisBase): """ This smoke test case follows this basic set of operations: * Create a keypair for use in launching an instance * Create a security group to control network access in instance * Add simple permissive rules to the security group * Launch an instance * Pause/unpause the instance * Suspend/resume the instance * Terminate the instance """ def setUp(self): super(Network, self).setUp() # Setup image and flavor the test instance # Support both configured and injected values if not hasattr(self, 'image_ref'): self.image_ref = CONF.compute.image_ref if not hasattr(self, 'flavor_ref'): self.flavor_ref = CONF.compute.flavor_ref self.image_utils = test_utils.ImageUtils(self.manager) if not self.image_utils.is_flavor_enough(self.flavor_ref, self.image_ref): raise self.skipException( '{image} does not fit in {flavor}'.format( image=self.image_ref, flavor=self.flavor_ref ) ) self.host_name = "" self.instance_name = "" self.run_ssh = CONF.validation.run_validation and \ self.image_utils.is_sshable_image(self.image_ref) self.ssh_user = self.image_utils.ssh_user(self.image_ref) self.image_ssh_user = CONF.validation.image_ssh_user self.host_username = CONF.host_credentials.host_user_name self.host_password = CONF.host_credentials.host_password if CONF.host_credentials.host_net_interface is not None: self.host_net_interface = '\'' +\ CONF.host_credentials.host_net_interface\ + '\'' if CONF.host_credentials.host_external_sw is not None: self.host_external_sw = '\'' +\ CONF.host_credentials.host_external_sw\ + '\'' self.scriptfolder = CONF.host_credentials.host_setupscripts_folder self.lis_private_network = CONF.lis.private_network LOG.debug('Starting test for i:{image}, f:{flavor}. ' 'Run ssh: {ssh}, user: {ssh_user}'.format( image=self.image_ref, flavor=self.flavor_ref, ssh=self.run_ssh, ssh_user=self.ssh_user)) def _initiate_wsman(self, host_name): try: self.wsmancmd = WinRemoteClient( host_name, self.host_username, self.host_password) except Exception as exc: LOG.exception(exc) raise exc def verify_ssh(self): if self.run_ssh: # Obtain a floating IP floating_network_id = CONF.network.public_network_id self.floating_ip = self.floating_ips_client.create_floatingip( floating_network_id=floating_network_id) self.addCleanup(self.delete_wrapper, self.floating_ips_client.delete_floatingip, self.floating_ip['floatingip'][ 'floating_ip_address']) # Attach a floating IP self.compute_floating_ips_client.associate_floating_ip_to_server( self.floating_ip['floatingip']['floating_ip_address'], self.instance['id']) # Check ssh try: self.get_remote_client( ip_address=self.floating_ip[ 'floatingip']['floating_ip_address'], username=self.image_utils.ssh_user(self.image_ref), private_key=self.keypair['private_key']) except Exception: LOG.exception('ssh to server failed') self._log_console_output() raise def verify_external_ping(self, destination_ip): if self.run_ssh: # Obtain a floating IP floating_network_id = CONF.network.public_network_id self.floating_ip = self.floating_ips_client.create_floatingip( floating_network_id=floating_network_id) self.addCleanup(self.delete_wrapper, self.floating_ips_client.delete_floatingip, self.floating_ip['floatingip'][ 'floating_ip_address']) # Attach a floating IP self.compute_floating_ips_client.associate_floating_ip_to_server( self.floating_ip['floatingip']['floating_ip_address'], self.instance['id']) # Check lis presence try: linux_client = self.get_remote_client( ip_address=self.floating_ip[ 'floatingip']['floating_ip_address'], username=self.image_utils.ssh_user(self.image_ref), private_key=self.keypair['private_key']) output = linux_client.verify_ping(destination_ip) LOG.info('Ping results ${0}'.format(output)) self.assertNotEqual(0, output) except Exception: LOG.exception('ssh to server failed') self._log_console_output() raise @staticmethod def _remove_vswitch(host_client, sw_name=None): """Cleanup for vSwitch disks""" if sw_name is None: raise Exception('Please specify the switch to be removed') host_client.run_powershell_cmd( 'Remove-VMSwitch -Name {sw_name} -Force ' '-ErrorAction SilentlyContinue'.format(sw_name=sw_name)) @staticmethod def _gen_random_mac(): """ Generate a MAC address in HyperV reserved pool. :return: MAC address, e.g. 00:15:5d:11:11:11 :rtype: String """ new_mac = [0x00, 0x15, 0x5d, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(format(x, '02x') for x in new_mac) def _get_floating_ip(self): """ Request create a floating IP. :return: floating IP """ floating_network_id = CONF.network.public_network_id floating_ip = self.floating_ips_client.create_floatingip( floating_network_id=floating_network_id) self.addCleanup(self.delete_wrapper, self.floating_ips_client.delete_floatingip, floating_ip['floatingip']['floating_ip_address']) return floating_ip['floatingip']['floating_ip_address'] def _create_vm(self, key_pair=None, security_groups=None, av_zone=None): """ Create VM/Instance and return dict info. :param key_pair: :param security_groups: :param av_zone: availability_zone to force instance spawning on a host :return: created server dict :rtype: Dict """ if not key_pair: key_pair = self.create_keypair() if not security_groups: security_group = self._create_security_group() security_groups = [{'name': security_group['name']}] kw_args = dict() if av_zone is not None: kw_args['availability_zone'] = av_zone instance = self.create_server(flavor=self.flavor_ref, image_id=self.image_ref, key_name=key_pair['name'], security_groups=security_groups, wait_until='ACTIVE', **kw_args) # Obtain a floating IP floating_ip = self._get_floating_ip() # Attach a floating IP self.compute_floating_ips_client.associate_floating_ip_to_server( floating_ip, instance['id']) instance['floating_ip'] = floating_ip return instance def _add_nic_to_vm(self, instance, switch_name, host_client, static_mac=True, is_legacy=False, vlan=None): """ Add a new network adapter to the VM with specific parameters. :param instance: :param switch_name: :param host_client: :param static_mac: Bool - generate static random mac; True by default :param is_legacy: Bool - create a legacy nic when True :param vlan: specify the vlan tag :return: MAC address or None if it is dynamic setup :rtype: Dict e.g. ps_args['VMName'], ps_args['VSwitchName'], ps_args['NICName'], ps_args['MAC'], ps_args['IsLegacy'], ps_args['VLAN'] """ naming_suffix = str(time.time()) self.stop_vm(instance['id']) ps_args = dict() ps_args['VMName'] = instance["OS-EXT-SRV-ATTR:instance_name"] ps_args['VSwitchName'] = switch_name ps_args['NICName'] = 'nic' + naming_suffix if static_mac is True: ps_args['MAC'] = self._gen_random_mac() if is_legacy is True: ps_args['IsLegacy'] = is_legacy if vlan is not None: ps_args['VLAN'] = vlan add_nic = '{}{}'.format(self.script_folder, 'setupscripts\\add_nic_to_VM.ps1') host_client.run_powershell_cmd(add_nic, **ps_args) self.start_vm(instance['id']) return ps_args def _set_vm_ip(self, instance, key_pair, mac, ip=None, net_mask=None): """ Set VM/Instance IP using remote script SetStaticIp.sh when the 'ip' and 'net_mask' are provided, otherwise grab using dhcp. :param instance: :param key_pair: :param mac: :param ip: :param net_mask: :return: linux_client, new_nic_name :rtype: Tuple """ instance_ip = instance['floating_ip'] linux_client = self.get_remote_client( ip_address=instance_ip, username=self.image_ssh_user, private_key=key_pair['private_key'] ) nic_name = linux_client.get_nic_name_by_mac(mac) if ip and net_mask: script_name = 'SetStaticIp.sh' script_path = '/scripts/' + script_name destination = '/tmp/' my_path = os.path.abspath( os.path.normpath(os.path.dirname(__file__))) full_script_path = my_path + script_path cmd_params = [ip, net_mask, nic_name] linux_client.execute_script(script_name, cmd_params, full_script_path, destination) else: # assuming IP can be assigned by DHCP linux_client.exec_command('sudo dhclient {}'.format(nic_name)) return linux_client, nic_name def _create_vswitch(self, host_name, internal_sw=False, private_sw=False, external_sw=False, vlan=None): """ Create a new specific vSwitch on the HyperV. :param host_name: :param internal_sw: :param private_sw: :param external_sw: :param vlan: used to set external and internal networks vlan :return: host_client, switch_names dict or None if no switch type is specified :rtype: tuple """ host_client = WinRemoteClient(host_name, self.host_username, self.host_password) naming_sf = str(time.time()) ps_args = dict() if vlan is not None: ps_args['VLAN'] = vlan if internal_sw is True: ps_args['internalSwitch'] = 'tempest_internal' + naming_sf if private_sw is True: ps_args['privateSwitch'] = 'tempest_private' + naming_sf if external_sw is True: ps_args['externalSwitch'] = 'tempest_external' + naming_sf ps_args['netInterface'] = self.host_net_interface if ps_args: add_vswitch = '{}{}'.format(self.script_folder, 'setupscripts\\create_vswitch.ps1') host_client.run_powershell_cmd(add_vswitch, **ps_args) else: raise Exception('No valid arguments found. Please specify the ' 'switch type to be created') for key in ps_args: if 'Switch' in key and ps_args[key]: # adding cleanup last to avoid interference with other methods self._cleanups.insert(0, (self._remove_vswitch, (host_client,), {'sw_name': ps_args[key]})) return host_client, ps_args def _config_hyperv_nic(self, host_client, sw_name, ip, net_prefix): """ Config static IP on the Hyper-V virtual network interface created as a result of the virtual switch. :param host_client: :param sw_name: :param ip: :param net_prefix: :return: None """ config_hyperv_sw_int_ip = '{}{}'.format( self.script_folder, 'setupscripts\\config_host_nic.ps1') host_client.run_powershell_cmd( config_hyperv_sw_int_ip, Name='\'vEthernet (' + sw_name + ')\'', IP=ip, Prefix=net_prefix) @staticmethod def _config_hyperv_vm_vlan_tagging(host_client, instance, nic_name, vlan_list, base_vlan): """ Configure Hyper-V VM Network adapter Vlan. :param host_client: :param instance: :param nic_name: :param vlan_list: :param base_vlan: :return: None """ host_client.run_powershell_cmd( 'Set-VMNetworkAdapterVlan -VMName {vm_name} ' '-VMNetworkAdapterName {nic_name} ' '-Trunk -AllowedVlanIdList {vlan_list} -NativeVlanId {base_vlan}'. format(vm_name=instance["OS-EXT-SRV-ATTR:instance_name"], nic_name=nic_name, vlan_list=vlan_list, base_vlan=base_vlan) ) def external_network_setup(self, vlan=None, create_sw=False): """ Internal network setup with 2 dhcp IP instances and vSwitch creation attaching the tempest.conf Hyper-V interface (assuming this has an IP) @CONF.host_credentials.host_net_interface. :param: vlan: specify vlan tag for instances :return: Dict with: external_setup['instances'] = [inst1, inst2] external_setup['linux_clients'] = [linux_client1, linux_client2] external_setup['new_nics'] = [inst1_new_nic_name, inst2_new_nic_name] external_setup['hyperv_nics'] = [inst1_nic_args['NICName'], inst2_nic_args['NICName']] external_setup['new_macs'] = [inst1_nic_args['MAC'], inst2_nic_args['MAC']] external_setup['float_ips'] = [ip1, ip2] external_setup['key_pair'] = key_pair external_setup['host_client'] = host_client external_setup['host_name'] = host_name """ # use existing external network assigning nova floating ips key_pair = self.create_keypair() security_group = self._create_security_group() security_groups = [{'name': security_group['name']}] inst1 = self._create_vm(key_pair=key_pair, security_groups=security_groups) host_name = inst1["OS-EXT-SRV-ATTR:hypervisor_hostname"] host_zone = inst1['OS-EXT-AZ:availability_zone'] av_zone = host_zone + ':' + host_name inst2 = self._create_vm(key_pair=key_pair, security_groups=security_groups, av_zone=av_zone) if create_sw is True: host_client, sw_names = self._create_vswitch(host_name, external_sw=True, vlan=vlan) else: host_client = WinRemoteClient(host_name, self.host_username, self.host_password) sw_names = dict() sw_names['externalSwitch'] = self.host_external_sw # Obtain a floating IPs and assign manually to new NIC ip1 = self._get_floating_ip() ip2 = self._get_floating_ip() net_mask = '24' inst1_nic_args = self._add_nic_to_vm(inst1, sw_names['externalSwitch'], host_client, vlan=vlan) linux_client1, inst1_new_nic_name = self._set_vm_ip( inst1, key_pair, inst1_nic_args['MAC'], ip1, net_mask) inst2_nic_args = self._add_nic_to_vm(inst2, sw_names['externalSwitch'], host_client, vlan=vlan) linux_client2, inst2_new_nic_name = self._set_vm_ip( inst2, key_pair, inst2_nic_args['MAC'], ip2, net_mask) external_setup = dict() external_setup['instances'] = [inst1, inst2] external_setup['linux_clients'] = [linux_client1, linux_client2] external_setup['new_nics'] = [inst1_new_nic_name, inst2_new_nic_name] external_setup['hyperv_nics'] = [inst1_nic_args['NICName'], inst2_nic_args['NICName']] external_setup['new_macs'] = [inst1_nic_args['MAC'], inst2_nic_args['MAC']] external_setup['float_ips'] = [ip1, ip2] external_setup['key_pair'] = key_pair external_setup['host_client']
pkt, addr = netc.recvfrom(2048) idx = addr[-1] i = len(pkt) if i < 64: continue rqv = memoryview(pkt) rq = bytearray(rqv[:2]) if rq[0] in (1, 3): # dhcpv6 solicit process_dhcp6req(handler, rqv, addr, netc, cfg, nodeguess) except Exception as e: tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace) def process_dhcp6req(handler, rqv, addr, net, cfg, nodeguess): ip = addr[0] req, disco = v6opts_to_dict(bytearray(rqv[4:])) req['txid'] = rqv[1:4] req['rqtype'] = bytearray(rqv[:1])[0] if not disco.get('uuid', None) or not disco.get('arch', None): return if disco['uuid'] == '03000200-0400-0500-0006-000700080009': # Ignore common malformed dhcpv6 request from firmware return mac = neighutil.get_hwaddr(ip.split('%', 1)[0]) if not mac: net.sendto(b'\x00', addr) tries = 5 while tries and not mac: eventlet.sleep(0.01) tries -= 1 mac = neighutil.get_hwaddr(ip.split('%', 1)[0]) info = {'hwaddr': mac, 'uuid': disco['uuid'], 'architecture': disco['arch'], 'services': ('pxe-client',)} if ignoredisco.get(mac, 0) + 90 < time.time(): ignoredisco[mac] = time.time() handler(info) consider_discover(info, req, net, cfg, None, nodeguess, addr) def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv): rq = bytearray(rqv) addrlen = rq[2] if addrlen > 16 or addrlen == 0: return rawnetaddr = rq[28:28+addrlen] netaddr = ':'.join(['{0:02x}'.format(x) for x in rawnetaddr]) optidx = 0 try: optidx = rq.index(b'\x63\x82\x53\x63') + 4 except ValueError: return txid = rq[4:8] # struct.unpack('!I', rq[4:8])[0] rqinfo, disco = opts_to_dict(rq, optidx) vivso = disco.get('vivso', None) if vivso: # info['modelnumber'] = info['attributes']['enclosure-machinetype-model'][0] info = {'hwaddr': netaddr, 'uuid': disco['uuid'], 'architecture': vivso.get('arch', ''), 'services': (vivso['service-type'],), 'netinfo': {'ifidx': idx, 'recvip': recv, 'txid': txid}, 'attributes': {'enclosure-machinetype-model': [vivso.get('machine', '')]}} if time.time() > ignoredisco.get(netaddr, 0) + 90: ignoredisco[netaddr] = time.time() handler(info) #consider_discover(info, rqinfo, net4, cfg, rqv) return # We will fill out service to have something to byte into, # but the nature of the beast is that we do not have peers, # so that will not be present for a pxe snoop info = {'hwaddr': netaddr, 'uuid': disco['uuid'], 'architecture': disco['arch'], 'netinfo': {'ifidx': idx, 'recvip': recv, 'txid': txid}, 'services': ('pxe-client',)} if (disco['uuid'] and time.time() > ignoredisco.get(netaddr, 0) + 90): ignoredisco[netaddr] = time.time() handler(info) consider_discover(info, rqinfo, net4, cfg, rqv, nodeguess) def clear_nodes(nodes): for nodename in nodes: for ent in list(macmap): if macmap[ent] == nodename: del macmap[ent] for ent in list(uuidmap): if uuidmap[ent] == nodename: del uuidmap[ent] def new_nodes(added, deleting, renamed, configmanager): global attribwatcher configmanager.remove_watcher(attribwatcher) alldeleting = set(deleting) | set(renamed) clear_nodes(alldeleting) attribwatcher = configmanager.watch_attributes(configmanager.list_nodes(), ('id.uuid', 'net.*hwaddr'), remap_nodes) def remap_nodes(nodeattribs, configmanager): global macmap global uuidmap updates = configmanager.get_node_attributes(nodeattribs, ('id.uuid', 'net.*hwaddr')) clear_nodes(nodeattribs) for node in updates: for attrib in updates[node]: if attrib == 'id.uuid': uuidmap[updates[node][attrib]['value'].lower()] = node elif 'hwaddr' in attrib: macmap[updates[node][attrib]['value'].lower()] = node def get_deployment_profile(node, cfg, cfd=None): if not cfd: cfd = cfg.get_node_attributes(node, ('deployment.*', 'collective.managercandidates')) profile = cfd.get(node, {}).get('deployment.pendingprofile', {}).get('value', None) if not profile: return None candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None) if candmgrs: candmgrs = noderange.NodeRange(candmgrs, cfg).nodes if collective.get_myname() not in candmgrs: return None return profile staticassigns = {} myipbypeer = {} def check_reply(node, info, packet, sock, cfg, reqview, addr): httpboot = info['architecture'] == 'uefi-httpboot' cfd = cfg.get_node_attributes(node, ('deployment.*', 'collective.managercandidates')) profile = get_deployment_profile(node, cfg, cfd) if not profile: if time.time() > ignoremacs.get(info['hwaddr'], 0) + 90: ignoremacs[info['hwaddr']] = time.time() log.log({'info': 'Ignoring boot attempt by {0} no deployment profile specified (uuid {1}, hwaddr {2})'.format( node, info['uuid'], info['hwaddr'] )}) return if addr: if not httpboot: log.log({'info': 'IPv6 PXE boot attempt by {0}, but IPv6 PXE is not supported, try IPv6 HTTP boot or IPv4 boot'.format(node)}) return return reply_dhcp6(node, addr, cfg, packet, cfd, profile, sock) else: return reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile) def reply_dhcp6(node, addr, cfg, packet, cfd, profile, sock): myaddrs = netutil.get_my_addresses(addr[-1], socket.AF_INET6) if not myaddrs: log.log({'info': 'Unable to provide IPv6 boot services to {0}, no viable IPv6 configuration on interface index "{1}" to respond through.'.format(node, addr[-1])}) return niccfg = netutil.get_nic_config(cfg, node, ifidx=addr[-1]) ipv6addr = niccfg.get('ipv6_address', None) ipv6prefix = niccfg.get('ipv6_prefix', None) ipv6method = niccfg.get('ipv6_method', 'static') ipv6srvaddr = niccfg.get('deploy_server_v6', None) if not ipv6srvaddr: log.log({'info': 'Unable to determine an appropriate ipv6 server ip for {}'.format(node)}) return insecuremode = cfd.get(node, {}).get('deployment.useinsecureprotocols', {}).get('value', 'never') if not insecuremode: insecuremode = 'never' proto = 'https' if insecuremode == 'never' else 'http' bootfile = '{0}://[{1}]/confluent-public/os/{2}/boot.img'.format( proto, ipv6srvaddr, profile ) if not isinstance(bootfile, bytes): bootfile = bootfile.encode('utf8') ipass = [] if ipv6method not in ('dhcp', 'firmwaredhcp') and ipv6addr: if not ipv6prefix: log.log({'info': 'Unable to determine prefix to serve to address {} for node {}'.format(ipv6addr, node)}) return ipass = bytearray(40) ipass[:4] = packet[3][:4] # pass iaid back ipass[4:16] = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x18' ipass[16:32] = socket.inet_pton(socket.AF_INET6, ipv6addr) ipass[32:40] = b'\<PASSWORD>\<PASSWORD>' #1 msgtype #3 txid #22 - server ident #len(packet[1]) + 4 - client ident #len(ipass) + 4 or 0 #len(url) + 4 replylen = 50 + len(bootfile) + len(packet[1]) + 4 if len(ipass): replylen += len(ipass) reply = bytearray(replylen) reply[0] = 2 if packet['rqtype'] == 1 else 7 reply[1:4] = packet['txid'] offset = 4 struct.pack_into('!HH', reply, offset, 1, len(packet[1])) offset += 4 reply[offset:offset+len(packet[1])] = packet[1] offset += len(packet[1]) struct.pack_into('!HHH', reply, offset, 2, 18, 4) offset += 6 reply[offset:offset+16] = get_my_duid() offset += 16 if ipass: struct.pack_into('!HH', reply, offset, 3, len(ipass)) offset += 4 reply[offset:offset + len(ipass)] = ipass offset += len(ipass) struct.pack_into('!HH', reply, offset, 59, len(bootfile)) offset += 4 reply[offset:offset + len(bootfile)] = bootfile offset += len(bootfile) # Need the HTTPClient in the vendor class for reply struct.pack_into('!HHIH', reply, offset, 16, 16, 0, 10) offset += 10 reply[offset:offset + 10] = b'HTTPClient' sock.sendto(reply, addr) _myuuid = None def get_my_duid(): global _myuuid if not _myuuid: _myuuid = uuid.uuid4().bytes return _myuuid def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile): replen = 275 # default is going to be 286 # while myipn is describing presumed destination, it's really # vague in the face of aliases, need to convert to ifidx and evaluate # aliases for best match to guess rqtype = packet[53][0] insecuremode = cfd.get(node, {}).get('deployment.useinsecureprotocols', {}).get('value', 'never') if not insecuremode: insecuremode = 'never' if insecuremode == 'never' and not httpboot: if rqtype == 1 and info['architecture']: log.log( {'info': 'Boot attempt by {0} detected in insecure mode, but ' 'insecure mode is disabled. Set the attribute ' '`deployment.useinsecureprotocols` to `firmware` or ' '`always` to enable support, or use UEFI HTTP boot ' 'with HTTPS.'.format(node)}) return reply = bytearray(512) repview = memoryview(reply) repview[:20] = iphdr repview[20:28] = udphdr orepview = repview repview = repview[28:] repview[0:1] = b'\x02' repview[1:10] = reqview[1:10] # duplicate txid, hwlen, and others repview[10:11] = b'\x80' # always set broadcast repview[28:44] = reqview[28:44] # copy chaddr field gateway = None netmask = None niccfg = netutil.get_nic_config(cfg, node, ifidx=info['netinfo']['ifidx']) nicerr = niccfg.get('error_msg', False) if nicerr: log.log({'error': nicerr}) if niccfg.get('ipv4_broken', False): # Received a request over a nic with no ipv4 configured, ignore it log.log({'error': 'Skipping boot reply to {0} due to no viable IPv4 configuration on deployment system'.format(node)}) return clipn = None if niccfg['ipv4_address'] and niccfg['ipv4_method'] != 'firmwaredhcp': clipn = socket.inet_aton(niccfg['ipv4_address']) repview[16:20] = clipn gateway = niccfg['ipv4_gateway'] netmask = niccfg['prefix'] if gateway: gateway = socket.inet_aton(gateway) if not netutil.ipn_on_same_subnet(socket.AF_INET, clipn, gateway, netmask): log.log( {'warning': 'Ignoring gateway {0} due to mismatch with address {1}/{2}'.format(niccfg['ipv4_gateway'], niccfg['ipv4_address'], netmask)}) gateway = None netmask = (2**32 - 1) ^ (2**(32 - netmask) - 1) netmask = struct.pack('!I', netmask) myipn = niccfg['deploy_server'] if not myipn: myipn = info['netinfo']['recvip'] if httpboot: proto = 'https' if insecuremode == 'never' else 'http' bootfile = '{0}://{1}/confluent-public/os/{2}/boot.img'.format( proto, myipn, profile ) if not isinstance(bootfile, bytes): bootfile = bootfile.encode('utf8') if len(bootfile) > 127: log.log( {'info': 'Boot offer cannot be made to {0} as the ' 'profile name "{1}" is {2} characters longer than is supported ' 'for this boot method.'.format( node, profile, len(bootfile) - 127)}) return repview[108:108 + len(bootfile)] = bootfile myipn = socket.inet_aton(myipn) orepview[12:16] = myipn repview[20:24] = myipn repview[236:240] = b'\x63\x82\x53\x63' repview[240:242] = b'\x35\x01' if rqtype == 1: # if discover, then offer repview[242:243] = b'\x02' elif rqtype == 3: # if request, then ack repview[242:243] = b'\x05' repview[243:245] = b'\x36\x04' # DHCP server identifier repview[245:249] = myipn repview[249:255] = b'\x33\x04\x00\x00\x00\xf0' # fixed short lease time repview[255:257] = b'\x61\x11' repview[257:274] = packet[97] # Note that sending PXEClient kicks off the proxyDHCP procedure, ignoring # boot filename and such in the DHCP packet
<reponame>lento234/pyJHTDB<gh_stars>0 ######################################################################## # # Copyright 2014 Johns Hopkins University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Contact: <EMAIL> # Website: http://turbulence.pha.jhu.edu/ # ######################################################################## import os import sys import numpy as np import ctypes import inspect import h5py import pyJHTDB from pyJHTDB.dbinfo import interpolation_code class ThresholdInfo(ctypes.Structure): _fields_ = [('x', ctypes.c_int), ('y', ctypes.c_int), ('z', ctypes.c_int), ('value', ctypes.c_float)] class libJHTDB(object): def __init__(self, auth_token = pyJHTDB.auth_token): self.libname = 'libJHTDB' lib_location = os.path.dirname(inspect.getfile(pyJHTDB)) self.lib = np.ctypeslib.load_library( self.libname, os.path.abspath(os.path.join(lib_location, os.path.pardir))) self.authToken = ctypes.c_char_p(auth_token.encode('ascii')) self.connection_on = False self.hdf5_file_list = [] self.hdf5_file_desc = {} return None def initialize(self, exit_on_error = True): #initialize gSOAP self.lib.soapinit() if exit_on_error: #enable exit on error self.lib.turblibSetExitOnError(ctypes.c_int(1)) self.connection_on = True return None def finalize(self): #free gSOAP resources self.lib.soapdestroy() self.connection_on = False return None def add_token(self,token): self.authToken = ctypes.c_char_p(token.encode('ascii')) # def add_hdf5_file(self, filename): # if pyJHTDB.found_h5py and (not filename in self.hdf5_file_list): # self.hdf5_file_list.append(filename) # data = pyJHTDB.h5py.File(filename + '.h5', mode = 'r') # self.hdf5_file_desc[filename] = {} # for key in ['_contents', '_dataset', '_size', '_start']: # self.hdf5_file_desc[filename][key] = data[key][:] # data.close() # return self.lib.turblibAddLocalSource(ctypes.c_char_p((filename + '.h5').encode('ascii'))) # else: # return 0 def getData(self, time, point_coords, sinterp=0, tinterp=0, data_set='isotropic1024coarse', getFunction='getVelocity', make_modulo=False): if not self.connection_on: print('you didn\'t connect to the database') sys.exit() if not (point_coords.shape[-1] == 3): print ('wrong number of values for coordinates in getData') sys.exit() return None if not (point_coords.dtype == np.float32): print('point coordinates in getData must be floats. stopping.') sys.exit() return None if (type(sinterp) == str): sinterp = interpolation_code[sinterp] if (type(tinterp) == str): tinterp = interpolation_code[tinterp] npoints = point_coords.shape[0] for i in range(1, len(point_coords.shape) - 1): npoints *= point_coords.shape[i] if make_modulo: pcoords = np.zeros(point_coords.shape, np.float64) pcoords[:] = point_coords np.mod(pcoords, 2 * np.pi, point_coords) if not getFunction[0:3] == 'get': getFunction = 'get' + getFunction get_data = getattr(self.lib, getFunction) if getFunction in ['getVelocity', 'getForce', 'getMagneticField', 'getMagneticFieldDebug', 'getBunit', 'getVectorPotential', 'getPressureGradient', 'getVelocityLaplacian', 'getMagneticFieldLaplacian', 'getVectorPotentialLaplacian', 'getPressureGradient', 'getTemperatureGradient']: result_dim = 3 elif getFunction in ['getVelocityAndPressure', 'getVelocityAndTemperature']: result_dim = 4 elif getFunction in ['getPressureHessian', 'getTemperatureHessian']: result_dim = 6 elif getFunction in ['getVelocityGradient', 'getMagneticFieldGradient', 'getVectorPotentialGradient']: result_dim = 9 elif getFunction in ['getVelocityHessian', 'getMagneticFieldHessian', 'getVectorPotentialHessian']: result_dim = 18 elif getFunction in ['getPressure', 'getTemperature']: result_dim = 1 elif getFunction in ['getInvariant']: result_dim = 2 else: print(('wrong result type requested in getData\n' + 'maybe it\'s just missing from the list?')) sys.exit() return None newshape = list(point_coords.shape[0:len(point_coords.shape) - 1]) newshape.append(result_dim) result_array = np.empty(newshape, dtype=np.float32) get_data(self.authToken, ctypes.c_char_p(data_set.encode('ascii')), ctypes.c_float(time), ctypes.c_int(sinterp), ctypes.c_int(tinterp), ctypes.c_int(npoints), point_coords.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_float))), result_array.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_float)))) return result_array def getRawData( self, time=0, start=np.array([0, 0, 0], dtype=np.int), size=np.array([8, 8, 8], dtype=np.int), data_set='channel', getFunction='Velocity'): print(('This function is no longer supported. Please use getbigCutout instead.')) sys.exit() return None # if not self.connection_on: # print('you didn\'t connect to the database') # sys.exit() # if getFunction in ['Velocity', # 'MagneticField', # 'VectorPotential']: # result_dim = 3 # elif getFunction in ['Pressure', 'Temperature']: # result_dim = 1 # else: # print(('wrong result type requested in getRawData\n' # + 'maybe it\'s just missing from the list?')) # sys.exit() # return None # getFunction = 'getRaw' + getFunction # get_data = getattr(self.lib, getFunction) # result_array = np.empty(tuple(list(size[::-1]) + [result_dim]), dtype=np.float32) # get_data(self.authToken, # ctypes.c_char_p(data_set.encode('ascii')), # ctypes.c_int(time), # ctypes.c_int(start[0]), # ctypes.c_int(start[1]), # ctypes.c_int(start[2]), # ctypes.c_int(size[0]), # ctypes.c_int(size[1]), # ctypes.c_int(size[2]), # result_array.ctypes.data_as(ctypes.POINTER(ctypes.c_char))) # return result_array def getCutout( self, data_set='isotropic1024coarse', field='u', time_step=int(0), start=np.array([1, 1, 1], dtype=np.int), end=np.array([8, 8, 8], dtype=np.int), step=np.array([1, 1, 1], dtype=np.int), filter_width=1): if not self.connection_on: print('you didn\'t connect to the database') sys.exit() time_step=int(time_step) if field in ['u', 'a', 'b']: result_dim = 3 elif field in ['p', 'd', 't']: result_dim = 1 else: print(('wrong result type requested in getCutout\n' + 'maybe it\'s just missing from the list?')) sys.exit() return None tempa=np.arange(start[0], end[0]+1, step[0]) tempb=np.arange(start[1], end[1]+1, step[1]) tempc=np.arange(start[2], end[2]+1, step[2]) real_size=np.array([np.size(tempa), np.size(tempb), np.size(tempc)], dtype=np.int) getFunction = 'getCutout' get_data = getattr(self.lib, getFunction) result_array = np.empty(tuple(list(real_size[::-1]) + [result_dim]), dtype=np.float32) try: get_data(self.authToken, ctypes.c_char_p(data_set.encode('ascii')), ctypes.c_char_p(field.encode('ascii')), ctypes.c_int(time_step), ctypes.c_int(start[0]), ctypes.c_int(start[1]), ctypes.c_int(start[2]), ctypes.c_int(end[0]), ctypes.c_int(end[1]), ctypes.c_int(end[2]), ctypes.c_int(step[0]), ctypes.c_int(step[1]), ctypes.c_int(step[2]), ctypes.c_int(filter_width), result_array.ctypes.data_as(ctypes.POINTER(ctypes.c_char))) except Exception as es: print(es) raise return result_array def getbigCutout( self, data_set='isotropic1024coarse', fields='u', t_start=int(1), t_end=int(1), t_step=int(1), start=np.array([1, 1, 1], dtype=np.int), end=np.array([8, 8, 8], dtype=np.int), step=np.array([1, 1, 1], dtype=np.int), filter_width=1, filename='N/A'): #hdf5_output=True): if not self.connection_on: print('you didn\'t connect to the database') sys.exit() if (filename.lower()=='n/a' or filename.lower()=='na'): hdf5_output=False else: hdf5_output=True idx_t=np.arange(t_start, t_end+1, t_step) idx_x=np.arange(start[0], end[0]+1, step[0]) idx_y=np.arange(start[1], end[1]+1, step[1]) idx_z=np.arange(start[2], end[2]+1, step[2]) nnt=np.size(idx_t) nnx=np.size(idx_x) nny=np.size(idx_y) nnz=np.size(idx_z) npoints=nnx*nny*nnz tem=0 for field in fields: if field == 'u': tem = tem + 3 elif field == 'a': tem = tem + 3 elif field == 'b': tem = tem + 3 elif field == 'p': tem = tem + 1 elif field == 'd': tem = tem + 1 elif field == 't': tem = tem + 1 else: print(('wrong field type requested in getCutout\n' + 'maybe it\'s just missing from the list?')) sys.exit() return None if (npoints*nnt*tem>(1024**3)*4): #a full snapshot of 1024^3 with u and p print(('The file size would exceed our limit 16GB. Please reduce the file size.')) sys.exit() return None if (hdf5_output): nl = '\r\n' hdf5_file, xdmf_file, shape=self.hdf5_init(filename, data_set,t_start,t_end,t_step,start,end,step,filter_width,idx_x,idx_y,idx_z) for field in fields: if field == 'u': VarName="Velocity" dim = 3 elif field == 'a': VarName="VectorPotential" dim = 3 elif field == 'b': VarName="MagneticField" dim = 3 elif field == 'p': VarName="Pressure" dim = 1 elif field == 'd': VarName="Density" dim = 1 elif field == 't': VarName="Temperature" dim = 1 else: print(('wrong field type requested in getCutout\n' + 'maybe it\'s just missing from the list?')) sys.exit() return None split_no=int(np.ceil(npoints/(192000000/dim))) tmp=np.array_split(np.arange(npoints).reshape(nnx,nny,nnz), split_no) if (hdf5_output): print(f" <Grid Name=\"{VarName}\" GridType=\"Collection\" CollectionType=\"Temporal\">{nl}", file=xdmf_file) for time_step in np.arange(t_start, t_end+1, t_step): result=np.zeros((nnz,nny,nnx,dim),dtype='float32') for t in range(split_no): xyzs0 = np.unravel_index(tmp[t][0,0,0], (nnx,nny,nnz)) xyze0 = np.unravel_index(tmp[t][-1,-1,-1], (nnx,nny,nnz)) xyzs1 = (idx_x[xyzs0[0]], idx_y[xyzs0[1]], idx_z[xyzs0[2]]) xyze1 = (idx_x[xyze0[0]], idx_y[xyze0[1]], idx_z[xyze0[2]]) temp = self.getCutout( data_set=data_set, field=field, time_step=time_step, start=np.array(xyzs1, dtype = np.int), end=np.array(xyze1, dtype = np.int), step=np.array(step, dtype = np.int), filter_width=filter_width) result[xyzs0[2]:xyze0[2]+1, xyzs0[1]:xyze0[1]+1, xyzs0[0]:xyze0[0]+1,:] = temp if (hdf5_output): self.hdf5_writing(filename,result,data_set,VarName,dim,time_step,hdf5_file,xdmf_file,shape) if (hdf5_output): print(f" </Grid>{nl}", file=xdmf_file) if (hdf5_output): self.hdf5_end(hdf5_file,xdmf_file) return result def hdf5_init( self, filename, data_set, t_start, t_end, t_step, start, end, step, filter_width, idx_x,idx_y,idx_z): idx_x=idx_x-1 idx_y=idx_y-1 idx_z=idx_z-1 if data_set in ["channel","channel5200", "transition_bl"]: if data_set == "channel": ygrid = pyJHTDB.dbinfo.channel['ynodes'] dx=8.0*np.pi/2048 dz=3.0*np.pi/1536 x_offset=0 elif data_set == "channel5200": ygrid = pyJHTDB.dbinfo.channel5200['ynodes'] dx=8.0*np.pi/10240.0 dz=3.0*np.pi/7680.0 x_offset=0 elif data_set == "transition_bl": ygrid = pyJHTDB.dbinfo.transition_bl['ynodes'] dx=0.292210466240511 dz=0.117244748412311 x_offset=30.218496172581567 xcoor=idx_x*dx+x_offset ycoor=ygrid[idx_y] zcoor=idx_z*dz else: if data_set in ["isotropic1024coarse", "isotropic1024fine", "mhd1024", "mixing"]: dx=2.0*np.pi/1024.0 elif data_set in ["isotropic4096", "rotstrat4096"]: dx=2.0*np.pi/4096.0 elif data_set in ["isotropic8192"]: dx=2.0*np.pi/8192.0 xcoor=idx_x*dx ycoor=idx_y*dx zcoor=idx_z*dx #filename=data_set fh = h5py.File(filename+'.h5', 'x', driver='core', block_size=16, backing_store=True) fh.attrs["dataset"] = np.string_(data_set) #fh.attrs["timeStep"] = time_step fh.attrs["t_start"] = t_start fh.attrs["t_end"] = t_end fh.attrs["t_step"] = t_step fh.attrs["x_start"] = start[0] fh.attrs["y_start"] = start[1] fh.attrs["z_start"] = start[2] fh.attrs["x_end"] = end[0] fh.attrs["y_end"] = end[1] fh.attrs["z_end"] = end[2] fh.attrs["x_step"] = step[0] fh.attrs["y_step"] = step[1] fh.attrs["z_step"] = step[2] fh.attrs["filterWidth"] = filter_width shape = [0]*3 shape[0] = np.size(idx_z) shape[1] = np.size(idx_y) shape[2] = np.size(idx_x) dset = fh.create_dataset("xcoor", (shape[2],), maxshape=(shape[2],)) dset[...]=xcoor dset = fh.create_dataset("ycoor", (shape[1],), maxshape=(shape[1],)) dset[...]=ycoor dset = fh.create_dataset("zcoor", (shape[0],), maxshape=(shape[0],)) dset[...]=zcoor nl = '\r\n' tf=open(filename+".xmf", "w") print(f"<?xml version=\"1.0\" ?>{nl}", file=tf) print(f"<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>{nl}", file=tf) print(f"<Xdmf Version=\"2.0\">{nl}", file=tf) print(f" <Domain>{nl}", file=tf) return fh, tf, shape def hdf5_writing( self, filename, result, data_set, VarName, dim, time_step, fh, tf, shape): H5_ds_name='{0}_{1:04d}'.format(VarName,time_step) dset = fh.create_dataset(H5_ds_name, (shape[0], shape[1], shape[2], dim), maxshape=(shape[0], shape[1], shape[2], dim)) dset[...]=result if (dim==3): Attribute_Type="Vector" elif (dim==1): Attribute_Type="Scalar" #filename=data_set nl = '\r\n' print(f" <Grid Name=\"Structured Grid\" GridType=\"Uniform\">{nl}", file=tf) print(f" <Time Value=\"{time_step}\" />{nl}", file=tf) print(f" <Topology TopologyType=\"3DRectMesh\" NumberOfElements=\"{shape[0]} {shape[1]} {shape[2]}\"/>{nl}", file=tf) print(f" <Geometry GeometryType=\"VXVYVZ\">{nl}", file=tf) print(f" <DataItem Name=\"Xcoor\" Dimensions=\"{shape[2]}\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">{nl}", file=tf) print(f" {filename}.h5:/xcoor{nl}", file=tf) print(f" </DataItem>{nl}", file=tf) print(f" <DataItem Name=\"Ycoor\" Dimensions=\"{shape[1]}\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">{nl}", file=tf) print(f" {filename}.h5:/ycoor{nl}", file=tf) print(f" </DataItem>{nl}", file=tf) print(f" <DataItem Name=\"Zcoor\" Dimensions=\"{shape[0]}\"
i], (self.B, 1))) denom = np.tile(denom, (1, 1+self.B)) gradient += delta/denom gradient -= Delta # gradient -= 1. # Prior return gradient[0] if len(gradient) == 1 else gradient def loglik_hess(self, gamma, xi): """ Compute the Hessian matrix evaluated at gamma, xi. """ if not self.suff_stats_cached: self.update_suff_stats() hessian = np.zeros((1 + self.B, 1 + self.B)) for u in range(self.N): for v in range(self.N): for i in range(self.num_node_events(u, v)): delta = np.insert(self.delta[(u, v)][:, i], 0, 1.) temp = np.outer(delta, delta) denom = (gamma + np.dot(xi, self.delta[(u, v)][:, i]))**2 hessian -= temp/denom return hessian def mle(self, method='grad-ascent', **kwargs): """ Computes the MLE with all parameters tied. """ def neg_loglik_obj(x): """ Computes the negative log-likelihood value. Args: x: array; x[0] := gamma and x[1:] := xi[:-1] """ gamma, xi = self.unpack_params(x) return -self.loglik(gamma, xi) def neg_loglik_obj_grad(x): """ Computes the negative log-likelihood value. Args: x: array; x[0] := gamma and x[1:] := xi[:-1] """ gamma, xi = self.unpack_params(x) return -self.loglik_grad(gamma, xi) if method == 'grad-ascent': bounds = zip([_EPS] + [_EPS] * self.B, [None] + [None] * self.B) # 1-_EPS gamma_init = rand.uniform() xi_init = rand.uniform(size=self.B) x_init = np.hstack((gamma_init, xi_init)) res = minimize(neg_loglik_obj, jac=neg_loglik_obj_grad, x0=x_init, method='L-BFGS-B', bounds=bounds, **kwargs) assert res.success, "MLE optimization failed ..." x = res.x if res.success else None elif method == 'coord-ascent': x, _, _ = coord_descent(obj_fun=neg_loglik_obj, num_params=1+n, **kwargs) else: print "MLE %s method not understood!" % method mle_params = self.unpack_params(x) self.set_mle_params(mle_params) return mle_params # ----------------------------------------------------------------------- # # Book-keeping # ----------------------------------------------------------------------- # def unpack_params(self, x): """ Args: x: array; x[0] := gamma and x[1:] := xi[:-1] """ assert_equal(len(x), 1+self.B) gamma = x[0] xi = x[1:] assert_ge(gamma, 0) assert all_pos(xi) return gamma, xi def set_mle_params(self, res): """ Given an array containing the unpacked parameter set their values accordingly. """ gamma, xi = res self.gamma[:] = gamma self.xi[:] = xi return # ----------------------------------------------------------------------- # # Variational inference # ----------------------------------------------------------------------- # def elbo_mc(self, params, num_mc_iters=200): """ Computes the evidence lower bound for all pairs of nodes, assuming tied parameters with the priors gamma ~ Gamma(priors[0][:]) xi[b] ~ Gamma(priors[b][:]) for b = 1, ..., B, and posteriors gamma ~ Gamma(pvec[0], qvec[0]) xi[b] ~ Gamma(pvec[b], qvec[b]) for b = 1, ..., B, by evalutaing the intergal in the expected log-likelihood using Monte Carlo. Args: pvec: array of length B+1 containing the posterior shape params for the base rate and each kernel; pvec: array of length B+1 containing the posterior scale params for the base rate and each kernel. NOTE: The implementation supports vectorized computation. Hence, pvec and qvec can be 2-D arrays of shape (*, B+1), and the returned ELBO value will be a 1-D array of length *. """ if not self.suff_stats_cached: self.update_suff_stats() # Unroll params = np.array(params) if len(params.shape) == 1: # 0-D or 1-D array params = params[np.newaxis] assert_equal(params.dtype, float) assert_equal(params.shape[1], 2 * (1+self.B)) assert all_pos(params), params pvec = params[:, :(1+self.B)] # Shape params qvec = params[:, (1+self.B):] # Scale params # Monte Carlo estimate of log-likelihood logliks = np.zeros((params.shape[0], num_mc_iters)) for k in range(num_mc_iters): # Monte Carlo iteration if (k+1) % 20 == 0: print "Computing Monte Carlo estimate: %d / %d ..." % \ (k+1, num_mc_iters) # xi = rand.lognormal(mean=pvec, sigma=qvec) # Including gamma xi = rand.gamma(shape=pvec, scale=1/qvec) # Including gamma temp = 0 for u in range(self.N): for v in range(self.N): Delta = np.insert(self.Delta[u, v], 0, self.T) # (1+B)-dim temp -= np.dot(xi, Delta) for i in range(self.num_node_events(u, v)): delta = np.insert(self.delta[(u, v)][:, i], 0, 1.) temp += np.log(np.dot(xi, delta)) logliks[:, k] = temp exloglik = np.mean(logliks, axis=1) # Monte Carlo average print "Estimated expected loglik = %s, std.dev = %s" % \ (exloglik, np.std(logliks, axis=1)) # KL-divergence terms kl_terms = kl_gamma(pvec, qvec, np.tile(self.priors[:, 0], (pvec.shape[0], 1)), np.tile(self.priors[:, 1], (pvec.shape[0], 1))) kl_sum = np.sum(kl_terms, axis=1) res = exloglik - kl_sum return res[0] if len(res) == 1 else res def elbo(self, params): """ Computes the evidence lower bound for all pairs of nodes, assuming tied parameters with the priors gamma ~ Gamma(priors[0][:]) xi[b] ~ Gamma(priors[b][:]) for b = 1, ..., B, and posteriors gamma ~ Gamma(pvec[0], qvec[0]) xi[b] ~ Gamma(pvec[b], qvec[b]) for b = 1, ..., B. Args: pvec: array of length B+1 containing the posterior shape params for the base rate and each kernel; pvec: array of length B+1 containing the posterior scale params for the base rate and each kernel. NOTE: The implementation supports vectorized computation. Hence, pvec and qvec can be 2-D arrays of shape (*, B+1), and the returned ELBO value will be a 1-D array of length *. """ if not self.suff_stats_cached: self.update_suff_stats() # Unroll params = np.array(params) if len(params.shape) == 1: # 0-D or 1-D array params = params[np.newaxis] assert_equal(params.dtype, float) assert_equal(params.shape[1], 2 * (1+self.B)) assert all_pos(params), params pvec = params[:, :(1+self.B)] # Shape params qvec = params[:, (1+self.B):] # Scale params # Expected log-likelihood exloglik = 0. for u in range(self.N): for v in range(self.N): Delta = np.insert(self.Delta[u, v], 0, self.T) # (1+B)-dim term = -np.dot(pvec/qvec, Delta) lterm = 0. for i in range(self.num_node_events(u, v)): delta = np.insert(self.delta[(u, v)][:, i], 0, 1.) temp = np.exp(digamma(pvec) - np.log(qvec)) lterm += np.log(np.dot(temp, delta)) exloglik += term + lterm # Expected log-likelihood # KL-divergence terms kl_terms = kl_gamma(pvec, qvec, np.tile(self.priors[:, 0], (pvec.shape[0], 1)), np.tile(self.priors[:, 1], (qvec.shape[0], 1))) kl_sum = np.sum(kl_terms, axis=1) res = exloglik - kl_sum return res[0] if len(res) == 1 else res def coord_ascent(self, monte_carlo=False, **kwargs): """ Performs coordinate ascent to maximize the evidence lower bound. Returns: x: array of length 2 * (1+B), converged parameter values. x_vals: array of shape (1+max_iter, 2 * (1+B)), stores previous params values after each full coordinate descent iteration. obj_vals: array of length (1+max_iter), stores previous objective values after each full coordinate descent iteration. """ if not self.suff_stats_cached: self.update_suff_stats() elbo = self.elbo_mc if monte_carlo else self.elbo return coord_ascent(obj_fun=elbo, num_params=self.num_params, **kwargs) # ----------------------------------------------------------------------- # # MCMC # ----------------------------------------------------------------------- # def metropolis(self, num_samples=1000, burnin=500): """ Metropolis-Hastings sampling to infer gamma and xi. """ def log_exponential_pdf(x, l): """ Log pdf for the Exp(l) distribution evaluated at x. """ return np.log(l) - l * x def llik_func(x): gamma, xi = self.unpack_params(x) return self.loglik(gamma, xi) res = np.zeros((num_samples+1, 1+self.B)) res[0] = rand.normal(loc=.1, scale=.02, size=(1+self.B)) # Initialize # res[0] = rand.exponential(size=(1+self.B)) # Initialize for i in range(1, num_samples+1): if i > 0 and i % 50 == 0: print "M-H sampled %d samples ..." % i x_old = res[i-1] x_new = rand.normal(loc=x_old, scale=.02) # Proposal # x_new = rand.exponential(scale=1./x_old) # Proposal # # Acceptance ratio # temp = llik_func(x_new) - llik_func(x_old) # temp += np.sum(log_exponential_pdf(x_old, x_new)) # temp -= np.sum(log_exponential_pdf(x_new, x_old)) # ratio = np.exp(min(0, temp)) ratio = np.exp(min(0, llik_func(x_new) - llik_func(x_old))) \ if np.all(x_new > 0) else 0 # print x_old, x_new, ratio res[i] = x_new if rand.uniform() < ratio else x_old return res[(burnin+1):] def slice_sample(self, num_samples=1000): """ Slice sampling to infer gamma and xi. """ def llik_func(x): gamma, xi = self.unpack_params(x) return self.loglik(gamma, xi) res = np.zeros((num_samples+1, 1+self.B)) res[0] = rand.uniform(size=(1+self.B)) # Initialize for i in range(1, num_samples+1): if i > 0 and i % 50 == 0: print "Slice-sampled %d samples ..." % i res[i] = multivariate_slice_sample( x_init=res[i-1], ll_func=llik_func, window_size=1, L_bound=_EPS) return res[1:] # ----------------------------------------------------------------------- # # Simulation # ----------------------------------------------------------------------- # def set_params(self, num_nodes=None, events=None, end_time=None, gamma=None, xi=None): """ Manually set all (or a subset of) the parameters for the Hawkes-IRM. Args: See self.__init__() description. """ if num_nodes is not None: self.N = num_nodes if events is not None: self.node_events = dict() self.num_events = 0 self.process_node_events(events) self.T = max(flatten(self.node_events.values())) \ if self.node_events else 0 self.update_suff_stats() if end_time is not None: assert_ge(end_time, self.T) self.T = end_time if gamma is not None: self.gamma = gamma if xi is not None: self.xi = xi return def simulate_single(self, c): """ Simulate a single 1-d self-exciting Hawkes process with intensity \lambda_{cc}(t). Args: c: Integer, node index (in 0, ..., self.N).
index in range(len(structure)): i = structure[index] rotation = 0 if (i[0] in (3,7,9,11,13)): rotation = 90 f.write('<Block type="%s" material="wood" x="%s" y="%s" rotation="%s" />\n' % (block_names[str(i[0])], str(i[1]), str(i[2]), str(rotation))) for index in range(len(others)): i = others[index] rotation = 0 if (i[0] in (3,7,9,11,13)): rotation = 90 f.write('<Block type="%s" material="wood" x="%s" y="%s" rotation="%s" />\n' % (additional_objects[str(i[0])], str(i[1]), str(i[2]), str(rotation))) for index in range(len(pigs)): i = pigs[index] f.write('<Pig type="BasicSmall" material="" x="%s" y="%s" rotation="0" />\n' % (str(i[0]),str(i[1]))) for index in range(len(tnts)): i = tnts[index] f.write('<TNT type="" x="%s" y="%s" rotation="0" />\n' % (str(i[0]),str(i[1]))) f.write('</GameObjects>\n') f.write('</Level>\n') f.close() structure_num = structure_num + 1 # randomly swap some blocks with other blocks that have the same height # (and do not overlap other blocks and fulfill support requirements) def swap_blocks(complete_locations, final_pig_positions, final_platforms): if (block_swapping == True): total_swaps = 0 for i in range(len(complete_locations)): for j in range(len(complete_locations[i])): test_blocks = [] test_complete_locations = deepcopy(complete_locations) test_complete_locations[i].pop(j); for key,value in blocks.items(): if probability_table_blocks[key] > 0.0: if key != str(complete_locations[i][j][0]): if blocks[str(complete_locations[i][j][0])][1] == value[1]: test_block_temp = deepcopy(complete_locations[i][j]) test_block_temp[0] = int(key) test_blocks.append(test_block_temp) shuffle(test_blocks) total_prob_amount = 0 for block in test_blocks: total_prob_amount = total_prob_amount + probability_table_blocks[str(block[0])] for block in test_blocks: if uniform(0.0,1.0) < (probability_table_blocks[str(block[0])]/total_prob_amount): temp_block = deepcopy(block) test_blocks.remove(block) test_blocks.insert(0,block) swapped = 0 for test_block in test_blocks: # check no overlap if (swapped == 0): overlap = False valid = True pigs_supported = True error_buffer = 0.01 for structure in test_complete_locations: for block in structure: if ( round((test_block[1] - (blocks[str(test_block[0])][0]/2.0)) + error_buffer,10) <= round((block[1] + blocks[str(block[0])][0]/2),10) and round((test_block[1] + (blocks[str(test_block[0])][0]/2.0)) - error_buffer,10) >= round((block[1] - blocks[str(block[0])][0]/2),10) and round((test_block[2] + (blocks[str(test_block[0])][1]/2.0)) - error_buffer,10) >= round((block[2] - blocks[str(block[0])][1]/2),10) and round((test_block[2] - (blocks[str(test_block[0])][1]/2.0)) + error_buffer,10) <= round((block[2] + blocks[str(block[0])][1]/2),10)): overlap = True for platforms in final_platforms: for platform in platforms: if ( round((test_block[1] - (blocks[str(test_block[0])][0]/2.0)) + error_buffer,10) <= round((platform[0] + platform_distance_buffer + platform_size[0]/2),10) and round((test_block[1] + (blocks[str(test_block[0])][0]/2.0)) - error_buffer,10) >= round((platform[0] - platform_distance_buffer - platform_size[0]/2),10) and round((test_block[2] + (blocks[str(test_block[0])][1]/2.0)) - error_buffer,10) >= round((platform[1] - platform_distance_buffer - platform_size[1]/2),10) and round((test_block[2] - (blocks[str(test_block[0])][1]/2.0)) + error_buffer,10) <= round((platform[1] + platform_distance_buffer + platform_size[1]/2),10)): overlap = True for pig in final_pig_positions: if ( round((test_block[1] - (blocks[str(test_block[0])][0]/2.0)) + error_buffer,10) <= round((pig[0] + pig_size[0]/2),10) and round((test_block[1] + (blocks[str(test_block[0])][0]/2.0)) - error_buffer,10) >= round((pig[0] - pig_size[0]/2),10) and round((test_block[2] + (blocks[str(test_block[0])][1]/2.0)) - error_buffer,10) >= round((pig[1] - pig_size[1]/2),10) and round((test_block[2] - (blocks[str(test_block[0])][1]/2.0)) + error_buffer,10) <= round((pig[1] + pig_size[1]/2),10)): overlap = True # check that all stability requirements are still met for all blocks/pigs in rows above and below (and for self) above_blocks = find_above_blocks (complete_locations[i][j], complete_locations) below_blocks = find_below_blocks (complete_locations[i][j], complete_locations) blocks_to_test = above_blocks+below_blocks blocks_to_test.append(test_block) test_complete_locations2 = deepcopy(complete_locations) test_complete_locations2[i][j] = test_block for test_blockx in blocks_to_test: center = test_blockx[1] edge1 = test_blockx[1] - (blocks[str(test_blockx[0])][0])/2 + check_buffer edge2 = test_blockx[1] + (blocks[str(test_blockx[0])][0])/2 - check_buffer center_supported = False edge1_supported = False edge2_supported = False for block in find_below_blocks(test_blockx, test_complete_locations2): if ((block[1] - (blocks[str(block[0])][0])/2) <= center and (block[1] + (blocks[str(block[0])][0])/2) >= center): center_supported = True if ((block[1] - (blocks[str(block[0])][0])/2) <= edge1 and (block[1] + (blocks[str(block[0])][0])/2) >= edge1): edge1_supported = True if ((block[1] - (blocks[str(block[0])][0])/2) <= edge2 and (block[1] + (blocks[str(block[0])][0])/2) >= edge2): edge2_supported = True push_down = 0.01 for platforms in final_platforms: for platform in platforms: if ( round(test_blockx[1],10) <= round((platform[0] + platform_size[0]/2),10) and round(test_blockx[1],10) >= round((platform[0] - platform_size[0]/2),10) and (test_blockx[2] > platform[1]) and round((test_blockx[2] - push_down - (blocks[str(test_blockx[0])][1]/2.0)),10) <= round((platform[1] + platform_size[1]/2),10)): center_supported = True if ( round((test_blockx[1] - (blocks[str(test_blockx[0])][0]/2.0)),10) <= round((platform[0] + platform_size[0]/2),10) and round((test_blockx[1] - (blocks[str(test_blockx[0])][0]/2.0)),10) >= round((platform[0] - platform_size[0]/2),10) and (test_blockx[2] > platform[1]) and round((test_blockx[2] - push_down - (blocks[str(test_blockx[0])][1]/2.0)),10) <= round((platform[1] + platform_size[1]/2),10)): edge1_supported = True if ( round((test_blockx[1] + (blocks[str(test_blockx[0])][0]/2.0)),10) <= round((platform[0] + platform_size[0]/2),10) and round((test_blockx[1] + (blocks[str(test_blockx[0])][0]/2.0)),10) >= round((platform[0] - platform_size[0]/2),10) and (test_blockx[2] > platform[1]) and round((test_blockx[2] - push_down - (blocks[str(test_blockx[0])][1]/2.0)),10) <= round((platform[1] + platform_size[1]/2),10)): edge2_supported = True if (round((test_blockx[2] - push_down - (blocks[str(test_blockx[0])][1]/2.0)),10) <= absolute_ground): center_supported = True edge1_supported = True edge2_supported = True if robustness == 1: if center_supported == True or (edge1_supported == True and edge2_supported == True): continue else: valid = False if robustness == 2: if edge1_supported == True and edge2_supported == True: continue else: valid = False if robustness == 3: if center_supported == True and edge1_supported == True and edge2_supported == True: continue else: valid = False for pig in final_pig_positions: pig_supported = False for structure in test_complete_locations2: for block in structure: if ( round((block[1] - (blocks[str(block[0])][0]/2.0)) + error_buffer,10) <= round((pig[0]),10) and round((block[1] + (blocks[str(block[0])][0]/2.0)) - error_buffer,10) >= round((pig[0]),10) and round((block[2] + (blocks[str(block[0])][1]/2.0)) - error_buffer,10) >= round((pig[1] - pig_size[1]/2 - 0.01),10) and round((block[2] - (blocks[str(block[0])][1]/2.0)) + error_buffer,10) <= round((pig[1] - pig_size[1]/2 - 0.01),10)): pig_supported = True if pig_supported == False: pigs_supported = False if (overlap == False and valid == True and pigs_supported == True): ran_num = uniform(0.0,1.0) if ran_num < prob_swap: total_swaps = total_swaps + 1 swapped = 1 complete_locations[i][j] = test_block print("") print("total number of block swaps: ", total_swaps) return complete_locations # attempt to protect vulnerable blocks in structures def protect_vulnerable_blocks(complete_locations, complete_ground_locations, final_platforms, final_pig_positions, selected_other): vulnerable_blocks = [] if (vulnerability_analysis == True): vulnerable_blocks = find_vulnerable_blocks(complete_locations,final_pig_positions,selected_other,final_platforms) print("") print ("vulnerable blocks: ", vulnerable_blocks) temp_complete_locations = deepcopy(complete_locations) if (protection_method1 == True): complete_locations = protect_vulnerable_blocks1(complete_locations, complete_ground_locations, final_platforms, vulnerable_blocks, final_pig_positions, selected_other) if (vulnerable_blocks != []) and (temp_complete_locations != complete_locations): vulnerable_blocks = find_vulnerable_blocks(complete_locations,final_pig_positions,selected_other,final_platforms) print("") print ("vulnerable blocks: ", vulnerable_blocks) temp_complete_locations = deepcopy(complete_locations) if (protection_method2 == True): complete_locations = protect_vulnerable_blocks2(complete_locations,final_platforms,final_pig_positions,selected_other, vulnerable_blocks) if (vulnerable_blocks != []) and (temp_complete_locations != complete_locations): vulnerable_blocks = find_vulnerable_blocks(complete_locations,final_pig_positions,selected_other,final_platforms) print("") print ("vulnerable blocks: ", vulnerable_blocks) return vulnerable_blocks # set the material of each block def set_materials(complete_locations, final_pig_positions, vulnerable_blocks): final_materials = [] final_blocks = [] for ii in complete_locations: for jj in ii: final_blocks.append(jj) for i in final_blocks: final_materials.append(0) if (protection_method3 == True): for i in range(len(final_blocks)): if final_blocks[i] in vulnerable_blocks: final_materials[i] = 3 index = 0 blocks_in_way_dup = find_blocks_in_way(complete_locations,final_pig_positions,selected_other,final_platforms) blocks_in_way_merged = [] blocks_in_way = [] for trajectory in blocks_in_way_dup: found = 0 for pig_traj in blocks_in_way_merged: if trajectory[0] == pig_traj[0]: pig_traj[1] = pig_traj[1]+trajectory[1] found = 1 if found == 0: blocks_in_way_merged.append(trajectory) for traj_new in blocks_in_way_merged: blocks_in_way.append(traj_new[1]) for grouping in blocks_in_way: if (uniform(0.0,1.0) < trajectory_chance): material_choice = choose_item(probability_table_materials_trajectory) for block in grouping: for j in range(len(final_blocks)): if block == final_blocks[j]: if final_materials[j] == 0: final_materials[j] = material_choice for structure in complete_locations: if uniform(0.0,1.0) < cluster_chance: all_set = 0 current_point = randint(0,len(structure)-1) #current_point = 0 start_point = current_point material_choice = choose_item(probability_table_materials) while (all_set == 0): final_materials[index+current_point] = material_choice smallest_distance = 9999 for i in range(len(structure)): if final_materials[index+i] == 0: if sqrt( ((structure[i][1]-structure[start_point][1]) * (structure[i][1]-structure[start_point][1])) + ((structure[i][2]-structure[start_point][2]) * (structure[i][2]-structure[start_point][2])) ) < smallest_distance: smallest_distance = sqrt( ((structure[i][1]-structure[start_point][1]) * (structure[i][1]-structure[start_point][1])) + ((structure[i][2]-structure[start_point][2]) * (structure[i][2]-structure[start_point][2])) ) current_point = i if uniform(0.0,1.0) < cluster_swap_prob: material_choice = choose_item(probability_table_materials) start_point = current_point if smallest_distance == 9999: all_set = 1 index = index + len(structure) elif uniform(0.0,1.0) < random_chance: for block in structure: material_choice = choose_item(probability_table_materials) if final_materials[index] == 0: final_materials[index] = material_choice index = index + 1 elif len(structure) <= small_threshold: material_choice = choose_item(probability_table_materials) for block in structure: if final_materials[index] == 0: final_materials[index] = material_choice index = index + 1 else: current_y = 999 for block in structure: if block[2] != current_y: material_choice = choose_item(probability_table_materials) current_y = block[2] if final_materials[index] == 0: final_materials[index] = material_choice index = index + 1 return final_materials, final_blocks # selects the type and order of the birds, based on level properties def find_bird_order(complete_locations, final_pig_positions, final_platforms, selected_other, final_materials): number_wood = 0 number_ice = 0 number_stone = 0 number_protected = 0 number_unprotected = 0 total_number_blocks = len(final_materials) total_number_pigs = len(final_pig_positions) for i in final_materials: if i == 1: number_wood = number_wood + 1 if i == 2: number_ice = number_ice + 1 if i == 3: number_stone = number_stone + 1 hittable_dup = find_hittable_pigs(complete_locations,final_pig_positions,selected_other,final_platforms) hittable_final = [] for i in hittable_dup: if i not in hittable_final: hittable_final.append(i) number_protected = total_number_pigs-len(hittable_final) unprotected_dup = find_unprotected_pigs(complete_locations,final_pig_positions,selected_other,final_platforms) unprotected_final = [] for i in unprotected_dup: if i not in
""" Copyright (c) Microsoft Corporation. Licensed under the MIT license. VCR dataset """ import copy import json import torch from torch.nn.utils.rnn import pad_sequence from toolz.sandbox import unzip from cytoolz import concat from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb, TxtLmdb, get_ids_and_lens, pad_tensors, get_gather_index) from utils.make_dict import make_dict def dict_slice(adict): start=0 end=5000 keys = adict.keys() dict_slice = {} index=0 for k in keys: if index>=start and index<=end: dict_slice[k] = adict[k] index+=1 if index>end: break return dict_slice def move_to_cuda(batch): if isinstance(batch, torch.Tensor): return batch.cuda(non_blocking=True) elif isinstance(batch, list): new_batch = [move_to_cuda(t) for t in batch] elif isinstance(batch, tuple): new_batch = tuple(move_to_cuda(t) for t in batch) elif isinstance(batch, dict): new_batch = {n: move_to_cuda(t) for n, t in batch.items()} else: return batch return new_batch class VcrTxtTokLmdb(TxtTokLmdb): def __init__(self, db_dir, max_txt_len=120, task="qa,qar"): # assert task == "qa" or task == "qar" or task == "qa,qar",\ # "VCR only support the following tasks: 'qa', 'qar' or 'qa,qar'" self.task = task if task == "qa,qar": id2len_task = "qar" else: id2len_task = task if max_txt_len == -1: self.id2len = json.load( open(f'{db_dir}/id2len_{id2len_task}.json')) else: self.id2len = { id_: len_ for id_, len_ in json.load( open(f'{db_dir}/id2len_{id2len_task}.json') ).items() if len_ <= max_txt_len } # self.id2len=dict_slice(self.id2len) self.db_dir = db_dir self.db = TxtLmdb(db_dir, readonly=True) meta = json.load(open(f'{db_dir}/meta.json', 'r')) self.cls_ = meta['CLS'] self.sep = meta['SEP'] self.mask = meta['MASK'] self.v_range = meta['v_range'] class VcrDetectFeatTxtTokDataset(DetectFeatTxtTokDataset): def __init__(self, txt_db, img_db_gt=None, img_db=None): # assert not (img_db_gt is None and img_db is None),\ # "img_db_gt and img_db cannot all be None" assert isinstance(txt_db, VcrTxtTokLmdb) assert img_db_gt is None or isinstance(img_db_gt, DetectFeatLmdb) assert img_db is None or isinstance(img_db, DetectFeatLmdb) self.txt_db = txt_db self.img_db = img_db self.img_db_gt = img_db_gt self.ls = img_db_gt self.task = self.txt_db.task txt_lens, self.ids = get_ids_and_lens(txt_db) txt2img = txt_db.txt2img if self.img_db and self.img_db_gt: self.lens = [tl+self.img_db_gt.name2nbb[txt2img[id_][0]] + self.img_db.name2nbb[txt2img[id_][1]] for tl, id_ in zip(txt_lens, self.ids)] elif self.img_db: self.lens = [tl+self.img_db.name2nbb[txt2img[id_][1]] for tl, id_ in zip(txt_lens, self.ids)] elif self.img_db_gt: self.lens = [tl+self.img_db_gt.name2nbb[txt2img[id_][0]] for tl, id_ in zip(txt_lens, self.ids)] else: self.lens = [tl for tl in txt_lens] def _get_img_feat(self, fname_gt, fname): if self.img_db and self.img_db_gt: img_feat_gt, bb_gt = self.img_db_gt[fname_gt] img_bb_gt = torch.cat([bb_gt, bb_gt[:, 4:5]*bb_gt[:, 5:]], dim=-1) img_feat, bb = self.img_db[fname] img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1) img_feat = torch.cat([img_feat_gt, img_feat], dim=0) img_bb = torch.cat([img_bb_gt, img_bb], dim=0) num_bb = img_feat.size(0) elif self.img_db: img_feat, bb = self.img_db[fname] img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1) num_bb = img_feat.size(0) elif self.img_db_gt: img_feat, bb = self.img_db_gt[fname_gt] img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1) num_bb = img_feat.size(0) return img_feat, img_bb, num_bb class VcrDataset(VcrDetectFeatTxtTokDataset): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # assert self.task != "qa,qar",\ # "loading training dataset with each task separately" def _get_input_ids(self, txt_dump): # text input input_ids_q = txt_dump['input_ids'] type_ids_q = [0]*len(input_ids_q) input_ids_as = txt_dump['input_ids_as'] if self.task == "qar": input_ids_rs = txt_dump['input_ids_rs'] answer_label = txt_dump['qa_target'] assert answer_label >= 0, "answer_label < 0" input_ids_gt_a = [self.txt_db.sep] + copy.deepcopy( input_ids_as[answer_label]) type_ids_gt_a = [2] * len(input_ids_gt_a) type_ids_q += type_ids_gt_a input_ids_q += input_ids_gt_a input_ids_for_choices = input_ids_rs else: input_ids_for_choices = input_ids_as return input_ids_q, input_ids_for_choices, type_ids_q def __getitem__(self, i): """ [[txt, img1], [txt, img2]] """ example = super().__getitem__(i) img_feat, img_pos_feat, num_bb = self._get_img_feat( example['img_fname'][0], example['img_fname'][1]) input_ids_q, input_ids_for_choices, type_ids_q = self._get_input_ids( example) label = example['%s_target' % (self.task)] outs = [] for index, input_ids_a in enumerate(input_ids_for_choices): if index == label: target = torch.tensor([1]).long() else: target = torch.tensor([0]).long() input_ids = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) +\ [self.txt_db.sep] + input_ids_a + [self.txt_db.sep] # type_id # 0 -- question # 1 -- region # 2 -- answer # 3 -- rationale type_id_for_choice = 3 if type_ids_q[-1] == 2 else 2 txt_type_ids = [0] + type_ids_q + [type_id_for_choice]*( len(input_ids_a)+2) attn_masks = torch.ones( len(input_ids) + num_bb, dtype=torch.long) input_ids = torch.tensor(input_ids) txt_type_ids = torch.tensor(txt_type_ids) outs.append( (input_ids, txt_type_ids, img_feat, img_pos_feat, attn_masks, target)) return tuple(outs) def vcr_collate(inputs): (input_ids, txt_type_ids, img_feats, img_pos_feats, attn_masks, targets) = map(list, unzip(concat(inputs))) txt_lens = [i.size(0) for i in input_ids] input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0) txt_type_ids = pad_sequence( txt_type_ids, batch_first=True, padding_value=0) position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long ).unsqueeze(0) # image batches num_bbs = [f.size(0) for f in img_feats] img_feat = pad_tensors(img_feats, num_bbs) img_pos_feat = pad_tensors(img_pos_feats, num_bbs) attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0) targets = torch.stack(targets, dim=0) bs, max_tl = input_ids.size() out_size = attn_masks.size(1) gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size) batch = {'input_ids': input_ids, 'txt_type_ids': txt_type_ids, 'position_ids': position_ids, 'img_feat': img_feat, 'img_pos_feat': img_pos_feat, 'attn_masks': attn_masks, 'gather_index': gather_index, 'targets': targets} batch = move_to_cuda(batch) return batch class QRA_VcrDataset(VcrDetectFeatTxtTokDataset): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # assert self.task != "qa,qar",\ # "loading training dataset with each task separately" def _get_input_ids(self, txt_dump): # text input input_ids_q = txt_dump['input_ids'] type_ids_q = [0]*len(input_ids_q) input_ids_as = txt_dump['input_ids_as'] input_ids_rs = txt_dump['input_ids_rs'] ra_label = txt_dump['qar_target'] assert ra_label >= 0, "answer_label < 0" input_ids_gt_a = [self.txt_db.sep] + copy.deepcopy( input_ids_rs[ra_label]) type_ids_gt_a = [3] * len(input_ids_gt_a) type_ids_q += type_ids_gt_a input_ids_q += input_ids_gt_a input_ids_for_choices = input_ids_as return input_ids_q, input_ids_for_choices, type_ids_q def __getitem__(self, i): """ [[txt, img1], [txt, img2]] """ example = super().__getitem__(i) img_feat, img_pos_feat, num_bb = self._get_img_feat( example['img_fname'][0], example['img_fname'][1]) input_ids_q, input_ids_for_choices, type_ids_q = self._get_input_ids( example) label = example['qa_target'] outs = [] for index, input_ids_a in enumerate(input_ids_for_choices): if index == label: target = torch.tensor([1]).long() else: target = torch.tensor([0]).long() input_ids = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) +\ [self.txt_db.sep] + input_ids_a + [self.txt_db.sep] # type_id # 0 -- question # 1 -- region # 2 -- answer # 3 -- rationale type_id_for_choice = 3 if type_ids_q[-1] == 2 else 2 txt_type_ids = [0] + type_ids_q + [type_id_for_choice]*( len(input_ids_a)+2) attn_masks = torch.ones( len(input_ids) + num_bb, dtype=torch.long) input_ids = torch.tensor(input_ids) txt_type_ids = torch.tensor(txt_type_ids) outs.append( (input_ids, txt_type_ids, img_feat, img_pos_feat, attn_masks, target)) return tuple(outs) def QRA_vcr_collate(inputs): (input_ids, txt_type_ids, img_feats, img_pos_feats, attn_masks, targets) = map(list, unzip(concat(inputs))) txt_lens = [i.size(0) for i in input_ids] input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0) txt_type_ids = pad_sequence( txt_type_ids, batch_first=True, padding_value=0) position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long ).unsqueeze(0) # image batches num_bbs = [f.size(0) for f in img_feats] img_feat = pad_tensors(img_feats, num_bbs) img_pos_feat = pad_tensors(img_pos_feats, num_bbs) attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0) targets = torch.stack(targets, dim=0) bs, max_tl = input_ids.size() out_size = attn_masks.size(1) gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size) batch = {'input_ids': input_ids, 'txt_type_ids': txt_type_ids, 'position_ids': position_ids, 'img_feat': img_feat, 'img_pos_feat': img_pos_feat, 'attn_masks': attn_masks, 'gather_index': gather_index, 'targets': targets} batch = move_to_cuda(batch) return batch class QR_VcrDataset(VcrDetectFeatTxtTokDataset): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # assert self.task != "qa,qar",\ # "loading training dataset with each task separately" def _get_input_ids(self, txt_dump): # text input input_ids_q = txt_dump['input_ids'] type_ids_q = [0]*len(input_ids_q) trg = txt_dump['input_ids_rs'][txt_dump['rationale_label']] input_ids_as = txt_dump['input_ids_as'] input_ids_as_all=[] #把所有答案拼在问题后面,用sep隔开 #用2作为type # type_id # 0 -- question # 1 -- region # 2 -- answer # 3 -- rationale for ans_ids in input_ids_as: input_ids_as_all+=[self.txt_db.sep] + copy.deepcopy( ans_ids) type_ids_as_all = [2] * len(input_ids_as_all) type_ids_q += type_ids_as_all input_ids_q += input_ids_as_all return input_ids_q, trg, type_ids_q def __getitem__(self, i): """ [[txt, img1], [txt, img2]] """ qid = self.ids[i] example = super().__getitem__(i) img_feat, img_pos_feat, num_bb = self._get_img_feat( example['img_fname'][0], example['img_fname'][1]) input_ids_q, trg, type_ids_q = self._get_input_ids( example) input_ids = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) + [self.txt_db.sep] trg= [self.txt_db.cls_] + copy.deepcopy(trg) + [self.txt_db.sep] txt_type_ids = [0] + type_ids_q+[2] attn_masks = torch.ones( len(input_ids) + num_bb, dtype=torch.long) input_ids = torch.tensor(input_ids) txt_type_ids = torch.tensor(txt_type_ids) trg_len=len(trg) trg=torch.tensor(trg) label_raw=example['toked_rs'][example['qar_target']] outs = [] outs.append( (input_ids, txt_type_ids, img_feat, img_pos_feat, attn_masks, trg,trg_len,qid,label_raw)) return tuple(outs) def QR_vcr_collate(inputs): (input_ids, txt_type_ids, img_feats, img_pos_feats, attn_masks, targets,trg_len,qids,labels_raw) = map(list, unzip(concat(inputs))) txt_lens = [i.size(0) for i in input_ids] input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0) txt_type_ids = pad_sequence( txt_type_ids, batch_first=True, padding_value=0) position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long ).unsqueeze(0) targets=pad_sequence( targets, batch_first=True, padding_value=0) labels=targets[:,1:] pad = torch.zeros((labels.size(0),1), dtype=torch.int64) labels=torch.cat((labels,pad),dim=1) # image batches num_bbs = [f.size(0) for f in img_feats] img_feat = pad_tensors(img_feats, num_bbs) img_pos_feat = pad_tensors(img_pos_feats, num_bbs) attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0) bs, max_tl = input_ids.size() out_size = attn_masks.size(1) gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size) batch = {'input_ids': input_ids, 'txt_type_ids': txt_type_ids, 'position_ids': position_ids, 'img_feat': img_feat, 'img_pos_feat': img_pos_feat, 'attn_masks': attn_masks, 'gather_index': gather_index, 'targets': targets, 'trg_length':trg_len, 'labels':labels, 'qids':qids, 'label_raw':labels_raw} batch = move_to_cuda(batch) return batch class QR_gpt_VcrDataset(VcrDetectFeatTxtTokDataset): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # assert self.task != "qa,qar",\ # "loading training dataset with each task separately" def _get_input_ids(self, txt_dump): # text input input_ids_q = txt_dump['input_ids'] type_ids_q = [0]*len(input_ids_q) trg = txt_dump['input_ids_rs'][txt_dump['qar_target']] input_ids_as = txt_dump['input_ids_as'] input_ids_as_all=[] #把所有答案拼在问题后面,用sep隔开 #用2作为type # type_id # 0 -- question # 1 -- region
-4] harmed -2.1 0.83066 [-1, -4, -2, -2, -2, -2, -1, -2, -3, -2] harmfully -2.6 0.91652 [-3, -4, -3, -1, -3, -3, -3, -3, -1, -2] harmfulness -2.6 0.8 [-3, -1, -3, -3, -3, -2, -4, -2, -2, -3] harming -2.6 0.66332 [-3, -3, -2, -2, -3, -3, -2, -2, -2, -4] harmless 1.0 0.7746 [2, 1, 1, 1, 1, 0, 0, 2, 0, 2] harmlessly 1.4 1.2 [4, 0, 1, 0, 1, 1, 1, 2, 3, 1] harmlessness 0.8 1.16619 [1, 2, 0, 1, 0, 1, 2, 1, 2, -2] harmonic 1.8 0.87178 [2, 1, 2, 3, 0, 1, 3, 2, 2, 2] harmonica 0.6 0.8 [0, 1, 0, 0, 0, 2, 1, 0, 2, 0] harmonically 2.1 1.13578 [3, 3, 4, 1, 0, 1, 2, 2, 2, 3] harmonicas 0.1 0.3 [0, 0, 0, 0, 0, 0, 1, 0, 0, 0] harmonicist 0.5 0.92195 [0, 0, 0, 0, 1, 0, 0, 0, 1, 3] harmonicists 0.9 1.3 [2, 1, 0, 0, 2, 0, 0, 0, 4, 0] harmonics 1.5 1.0247 [2, 2, 2, 0, 0, 2, 3, 2, 0, 2] harmonies 1.3 0.9 [2, 0, 2, 1, 2, 2, 0, 2, 0, 2] harmonious 2.0 1.09545 [3, 4, 2, 2, 2, 0, 2, 3, 1, 1] harmoniously 1.9 0.9434 [4, 2, 2, 1, 2, 1, 1, 3, 2, 1] harmoniousness 1.8 0.6 [1, 2, 2, 2, 2, 1, 2, 3, 2, 1] harmonise 1.8 0.74833 [1, 1, 2, 3, 2, 1, 1, 3, 2, 2] harmonised 1.3 0.9 [2, 3, 2, 0, 2, 1, 1, 1, 1, 0] harmonising 1.4 0.66332 [1, 2, 1, 1, 2, 1, 1, 1, 1, 3] harmonium 0.9 1.22066 [0, 3, 0, 2, 3, 0, 0, 0, 0, 1] harmoniums 0.8 0.9798 [0, 0, 0, 2, 0, 2, 0, 2, 2, 0] harmonization 1.9 0.83066 [3, 1, 2, 2, 2, 2, 3, 0, 2, 2] harmonizations 0.9 0.9434 [0, 0, 2, 0, 2, 2, 0, 2, 1, 0] harmonize 1.7 0.78102 [3, 2, 2, 1, 2, 2, 0, 1, 2, 2] harmonized 1.6 0.91652 [1, 2, 1, 1, 2, 2, 3, 3, 0, 1] harmonizer 1.6 0.8 [1, 2, 1, 1, 2, 2, 2, 3, 0, 2] harmonizers 1.6 1.11355 [2, 2, 2, 1, 2, 0, 4, 0, 2, 1] harmonizes 1.5 0.92195 [0, 2, 2, 2, 0, 1, 1, 2, 3, 2] harmonizing 1.4 0.66332 [0, 1, 1, 2, 2, 1, 2, 2, 1, 2] harmony 1.7 0.45826 [2, 2, 2, 2, 1, 2, 2, 2, 1, 1] harms -2.2 1.6 [2, -3, -2, -2, -3, -2, -4, -4, -2, -2] harried -1.4 0.4899 [-1, -1, -2, -1, -2, -2, -1, -1, -1, -2] harsh -1.9 0.7 [-1, -1, -2, -2, -1, -3, -3, -2, -2, -2] harsher -2.2 0.6 [-2, -3, -2, -3, -2, -2, -1, -3, -2, -2] harshest -2.9 0.83066 [-4, -2, -2, -2, -2, -3, -3, -4, -4, -3] hate -2.7 1.00499 [-4, -3, -4, -4, -2, -2, -2, -2, -1, -3] hated -3.2 0.6 [-3, -3, -4, -3, -2, -3, -3, -4, -4, -3] hateful -2.2 1.249 [-3, 1, -3, -3, -1, -2, -2, -3, -3, -3] hatefully -2.3 0.78102 [-1, -3, -3, -3, -1, -2, -2, -2, -3, -3] hatefulness -3.6 0.4899 [-4, -4, -3, -3, -3, -4, -4, -4, -4, -3] hater -1.8 0.6 [-2, -1, -2, -2, -2, -1, -1, -2, -2, -3] haters -2.2 0.6 [-2, -1, -3, -2, -3, -2, -3, -2, -2, -2] hates -1.9 0.7 [-2, -1, -2, -2, -3, -1, -1, -2, -2, -3] hating -2.3 1.1 [-4, -3, -4, -1, -2, -2, -1, -2, -1, -3] hatred -3.2 0.9798 [-1, -3, -2, -4, -3, -3, -4, -4, -4, -4] haunt -1.7 1.00499 [-1, -1, -3, -1, -2, -2, -1, -4, -1, -1] haunted -2.1 0.7 [-2, -2, -1, -3, -3, -2, -2, -3, -1, -2] haunting -1.1 0.83066 [-3, 0, -2, -1, 0, -1, -1, -1, -1, -1] haunts -1.0 1.41421 [0, -2, -2, -2, -2, -1, 2, -2, 1, -2] havoc -2.9 0.7 [-2, -4, -4, -3, -2, -3, -3, -3, -2, -3] healthy 1.7 0.9 [1, 3, 1, 1, 3, 3, 1, 2, 1, 1] heartbreak -2.7 0.78102 [-1, -3, -3, -3, -2, -4, -2, -3, -3, -3] heartbreaker -2.2 1.07703 [-2, -3, 0, -3, -2, -1, -4, -3, -2, -2] heartbreakers -2.1 0.9434 [-3, -2, -3, -2, -1, -1, -4, -1, -2, -2] heartbreaking -2.0 1.73205 [-3, -1, -3, -3, -4, 2, -3, -2, 0, -3] heartbreakingly -1.8 2.08806 [-3, 3, 1, -3, -3, -2, -3, -3, -4, -1] heartbreaks -1.8 1.77764 [-2, 1, -3, -2, -3, -2, -3, 2, -4, -2] heartbroken -3.3 0.45826 [-4, -3, -3, -4, -3, -3, -4, -3, -3, -3] heartfelt 2.5 0.5 [3, 3, 2, 3, 2, 2, 3, 2, 2, 3] heartless -2.2 0.74833 [-2, -2, -2, -4, -2, -1, -2, -3, -2, -2] heartlessly -2.8 0.6 [-3, -2, -3, -3, -2, -3, -4, -2, -3, -3] heartlessness -2.8 0.87178 [-3, -3, -2, -3, -4, -4, -1, -3, -2, -3] heartwarming 2.1 1.22066 [3, 2, 3, 3, 2, 2, 3, 3, -1, 1] heaven 2.3 1.18743 [1, 1, 2, 4, 3, 3, 3, 4, 1, 1] heavenlier 3.0 0.63246 [3, 2, 3, 3, 4, 3, 3, 4, 2, 3] heavenliest 2.7 1.1 [3, 2, 3, 4, 2, 4, 3, 0, 3, 3] heavenliness 2.7 0.9 [3, 2, 1, 4, 3, 2, 3, 4, 3, 2] heavenlinesses 2.3 2.2383 [4, 4, 4, 3, -2, 3, 3, 4, -2, 2] heavenly 3.0 0.63246 [3, 3, 3, 3, 2, 3, 3, 4, 2, 4] heavens 1.7 1.18743 [4, 0, 1, 2, 0, 3, 2, 2, 2, 1] heavenward 1.4 1.35647 [0, 3, 0, 4, 1, 2, 2, 0, 2, 0] heavenwards 1.2 1.32665 [1, 4, 0, 0, 2, 1, 1, 0, 3, 0] heavyhearted -2.1 0.83066 [-2, -3, -3, -2, -3, -1, -1, -1, -2, -3] heh -0.6 1.28062 [0, 1, -1, 1, -1, -2, -1, -3, 1, -1] hell -3.6 0.66332 [-4, -4, -4, -4, -4, -2, -3, -4, -3, -4] hellish -3.2 0.74833 [-3, -3, -2, -2, -4, -3, -4, -4, -3, -4] help 1.7 0.78102 [3, 2, 1, 2, 1, 2, 3, 1, 1, 1] helper 1.4 0.8 [1, 1, 0, 1, 1, 2, 1, 2, 3, 2] helpers 1.1 0.83066 [1, 1, 0, 2, 1, 1, 1, 1, 3, 0] helpful 1.8 0.87178 [2, 1, 3, 1, 1, 3, 1, 2, 3, 1] helpfully 2.3 0.9 [1, 2, 2, 3, 2, 3, 3, 2, 4, 1] helpfulness 1.9 1.13578 [1, 4, 1, 2, 2, 1, 1, 2, 4, 1] helping 1.2 0.6 [2, 1, 1, 2, 0, 1, 1, 1, 2, 1] helpless -2.0 0.63246 [-2, -3, -2, -2, -2, -3, -1, -2, -1, -2] helplessly -1.4 0.4899 [-1, -1, -2, -2, -1, -1, -1, -2, -2, -1] helplessness -2.1 0.9434 [-2, -4, -1, -2, -1, -3, -3, -2, -1, -2] helplessnesses -1.7 0.64031 [-2, -1, -2, -1, -2, -1, -3, -2, -1, -2] helps 1.6 0.4899 [1, 1, 1, 2, 2, 2, 1, 2, 2, 2] hero 2.6 0.8 [2, 3, 2, 2, 4, 4, 2, 3, 2, 2] heroes 2.3 0.9 [3, 4, 3, 1, 3, 2, 1, 2, 2, 2] heroic 2.6 0.8 [3, 3, 1, 4, 2, 3, 2, 3, 2, 3] heroical 2.9 1.04403 [4, 4, 2, 4, 2, 3, 1, 4, 2, 3] heroically 2.4 0.8 [2, 2, 2, 3, 3, 3, 4, 1, 2, 2] heroicomic 1.0 1.0 [1, 0, 1, 0, 2, 0, 3, 1, 2, 0] heroicomical 1.1 0.83066 [2, 0, 0, 2, 1, 2, 1, 2, 1, 0] heroics 2.4 0.8 [2, 1, 2, 2, 2, 3, 3, 3, 4, 2] heroin -2.2 1.83303 [0, -2, -4, 2, -4, -2, -2, -3, -3, -4] heroine 2.7 1.1 [0, 2, 4, 4, 3, 3, 3, 3, 2, 3] heroines 1.8 1.32665 [2, 1, 1, 4, 3, 1, 0, 3, 3, 0] heroinism -2.0 2.0 [-3, -4, 2, -2, -4, -3, -2, -1, 1, -4] heroism 2.8 0.6 [3, 3, 4, 3, 2, 2, 3, 3, 2, 3] heroisms 2.2 0.87178 [3, 1, 2, 4, 3, 2, 2, 2, 1, 2] heroize 2.1 0.7 [3, 2, 3, 1, 2, 2, 2, 3, 1, 2] heroized 2.0 1.18322 [1, 0, 3, 3, 2, 0, 3, 3, 2, 3] heroizes 2.2 0.9798 [1, 3, 2, 3, 4, 3, 2, 1, 2, 1] heroizing 1.9 1.64012 [2, 3, -2, 2, 4, 3, 2, 2, 3, 0] heron 0.1 0.3 [0, 0, 0, 0, 0, 1, 0, 0, 0, 0] heronries 0.7 1.1 [2, 0, 0, 0, 2, 0, 3, 0, 0, 0] heronry 0.1 0.9434 [0, 0, 0, 0, 0, 2, 0, 1, -2, 0] herons 0.5 1.0247 [0, 0, 0, 3, 0, 2, 0, 0, 0, 0] heros 1.3 1.18743 [3, 0, 0, 2, 0, 2, 2, 3, 1, 0] hesitance -0.9 0.3 [-1, -1, 0, -1, -1, -1, -1, -1, -1, -1] hesitancies -1.0 0.63246 [-1, -1, -1, -2, -1, -1, -2, 0, 0, -1] hesitancy -0.9 0.53852 [0, -1, 0, -2, -1, -1, -1, -1, -1, -1] hesitant -1.0 0.7746 [0, -1, 0, -1, -2, 0, -1, -2, -1, -2] hesitantly -1.2 0.4 [-1, -1, -2, -1, -1, -1, -1, -2, -1, -1] hesitate -1.1 0.53852 [-2, -1, -1, -1, -2, -1, -1, 0, -1, -1] hesitated -1.3 0.9 [-1, -2, -1, -2, -2, -2, -1, 1, -1, -2] hesitater -1.4 0.66332 [-1, -1, -1, -1, -1, -2, -1, -3, -2, -1] hesitaters -1.4 0.4899 [-1, -2, -1, -1, -1, -1, -2, -2, -2, -1] hesitates -1.4 0.4899 [-1, -1, -1, -2, -1, -2, -1, -2, -2, -1] hesitating -1.4 0.66332 [-1, -1, -2, -1, -1, -3, -2, -1, -1, -1] hesitatingly -1.5 0.80623 [-1, -1, -1, -1, -3, -2, -3, -1, -1, -1] hesitation -1.1 0.53852 [-2, 0, -1, -1, -1, -1, -1, -2, -1, -1] hesitations -1.1 0.53852 [-1, -1, -1, 0, -2, -1, -1, -2, -1, -1] hid -0.4 0.4899 [0, -1, 0, 0, -1, -1, -1, 0, 0, 0] hide -0.7 0.64031 [0, -1, -1, -1, -1, 0, 0, -2, -1, 0] hides -0.7 0.9 [-1, -2, -1, 0, -1, 0, 0, -2, 1, -1] hiding -1.2 0.4 [-1, -1, -2, -1, -1, -1, -1, -1, -2, -1] highlight 1.4 0.91652 [3, 0, 1, 1, 2, 1, 0, 2, 2, 2] hilarious 1.7 1.41774 [2, 2, 2, 3, 3, 1, -2, 2, 3, 1] hindrance -1.7 0.78102 [-2, -3, -2, -1, -1, -1, -3, -1, -2, -1] hoax -1.1 1.04403 [-3, -1, -2, -1, 1, -2, -1, -1, -1, 0] holiday 1.7 1.18743 [1, 3, 2, 2, 0, 0, 1, 2, 4, 2] holidays 1.6 1.0198 [2, 0, 1, 2, 3, 0, 1, 2, 3, 2] homesick -0.7 1.67631 [-2, -1, -1, -2, -1, -2, 2, -1, 3, -2] homesickness -1.8 1.249 [-3, -2, -1, -1, -3, -2, -1, 1, -3, -3] homesicknesses -1.8 0.6 [-1, -2, -2, -2, -1, -2, -1, -2, -3, -2] honest 2.3 0.9 [3, 2, 1, 2, 3, 1, 2, 3, 2, 4] honester 1.9 0.7 [2, 3, 2, 2, 1, 3, 1, 1, 2, 2] honestest 3.0 0.7746 [1, 3,
import tensorflow as tf from . import custom_layers class Discriminator(object): """Discriminator that takes image input and outputs logits. Attributes: name: str, name of `Discriminator`. kernel_regularizer: `l1_l2_regularizer` object, regularizar for kernel variables. bias_regularizer: `l1_l2_regularizer` object, regularizar for bias variables. params: dict, user passed parameters. alpha_var: variable, alpha for weighted sum of fade-in of layers. input_layers: list, `Input` layers for each resolution of image. from_rgb_conv_layers: list, `Conv2D` fromRGB layers. from_rgb_leaky_relu_layers: list, leaky relu layers that follow `Conv2D` fromRGB layers. conv_layers: list, `Conv2D` layers. leaky_relu_layers: list, leaky relu layers that follow `Conv2D` layers. growing_downsample_layers: list, `AveragePooling2D` layers for growing branch. shrinking_downsample_layers: list, `AveragePooling2D` layers for shrinking branch. minibatch_stddev_layer: `MiniBatchStdDev` layer, applies minibatch stddev to image to add an additional feature channel based on the sample. flatten_layer: `Flatten` layer, flattens image for logits layer. logits_layer: `Dense` layer, used for calculating logits. models: list, instances of discriminator `Model`s for each growth. """ def __init__( self, kernel_regularizer, bias_regularizer, name, params, alpha_var, num_growths ): """Instantiates and builds discriminator network. Args: kernel_regularizer: `l1_l2_regularizer` object, regularizar for kernel variables. bias_regularizer: `l1_l2_regularizer` object, regularizar for bias variables. name: str, name of discriminator. params: dict, user passed parameters. alpha_var: variable, alpha for weighted sum of fade-in of layers. num_growths: int, number of growth phases for model. """ # Set name of discriminator. self.name = name # Store regularizers. self.kernel_regularizer = kernel_regularizer self.bias_regularizer = bias_regularizer # Store parameters. self.params = params # Store reference to alpha variable. self.alpha_var = alpha_var # Store lists of layers. self.input_layers = [] self.from_rgb_conv_layers = [] self.from_rgb_leaky_relu_layers = [] self.conv_layers = [] self.leaky_relu_layers = [] self.growing_downsample_layers = [] self.shrinking_downsample_layers = [] self.minibatch_stddev_layer = None self.flatten_layer = None self.logits_layer = None # Instantiate discriminator layers. self._create_discriminator_layers() # Store list of discriminator models. self.models = self._create_models(num_growths) ########################################################################## ########################################################################## ########################################################################## def _create_input_layers(self): """Creates discriminator input layers for each image resolution. Returns: List of `Input` layers. """ height, width = self.params["generator_projection_dims"][0:2] # Create list to hold `Input` layers. input_layers = [ tf.keras.Input( shape=(height * 2 ** i, width * 2 ** i, self.params["depth"]), name="{}_{}x{}_inputs".format( self.name, height * 2 ** i, width * 2 ** i ) ) for i in range(len(self.params["discriminator_from_rgb_layers"])) ] return input_layers def _create_from_rgb_layers(self): """Creates discriminator fromRGB layers of 1x1 convs. Returns: List of fromRGB 1x1 conv layers and leaky relu layers. """ # Get fromRGB layer properties. from_rgb = [ self.params["discriminator_from_rgb_layers"][i][0][:] for i in range( len(self.params["discriminator_from_rgb_layers"]) ) ] # Create list to hold toRGB 1x1 convs. from_rgb_conv_layers = [ custom_layers.WeightScaledConv2D( filters=from_rgb[i][3], kernel_size=from_rgb[i][0:2], strides=from_rgb[i][4:6], padding="same", activation=None, kernel_initializer=( tf.random_normal_initializer(mean=0., stddev=1.0) if self.params["use_equalized_learning_rate"] else "he_normal" ), kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, use_equalized_learning_rate=( self.params["use_equalized_learning_rate"] ), name="{}_from_rgb_layers_conv2d_{}_{}x{}_{}_{}".format( self.name, i, from_rgb[i][0], from_rgb[i][1], from_rgb[i][2], from_rgb[i][3] ) ) for i in range(len(from_rgb)) ] from_rgb_leaky_relu_layers = [ tf.keras.layers.LeakyReLU( alpha=self.params["discriminator_leaky_relu_alpha"], name="{}_from_rgb_layers_leaky_relu_{}".format(self.name, i) ) for i in range(len(from_rgb)) ] return from_rgb_conv_layers, from_rgb_leaky_relu_layers def _create_base_conv_layer_block(self): """Creates discriminator base conv layer block. Returns: List of base block conv layers and list of leaky relu layers. """ # Get conv block layer properties. conv_block = self.params["discriminator_base_conv_blocks"][0] # Create list of base conv layers. base_conv_layers = [ custom_layers.WeightScaledConv2D( filters=conv_block[i][3], kernel_size=conv_block[i][0:2], strides=conv_block[i][4:6], padding="same", activation=None, kernel_initializer=( tf.random_normal_initializer(mean=0., stddev=1.0) if self.params["use_equalized_learning_rate"] else "he_normal" ), kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, use_equalized_learning_rate=( self.params["use_equalized_learning_rate"] ), name="{}_base_layers_conv2d_{}_{}x{}_{}_{}".format( self.name, i, conv_block[i][0], conv_block[i][1], conv_block[i][2], conv_block[i][3] ) ) for i in range(len(conv_block) - 1) ] # Have valid padding for layer just before flatten and logits. base_conv_layers.append( custom_layers.WeightScaledConv2D( filters=conv_block[-1][3], kernel_size=conv_block[-1][0:2], strides=conv_block[-1][4:6], padding="valid", activation=None, kernel_initializer=( tf.random_normal_initializer(mean=0., stddev=1.0) if self.params["use_equalized_learning_rate"] else "he_normal" ), kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, use_equalized_learning_rate=( self.params["use_equalized_learning_rate"] ), name="{}_base_layers_conv2d_{}_{}x{}_{}_{}".format( self.name, len(conv_block) - 1, conv_block[-1][0], conv_block[-1][1], conv_block[-1][2], conv_block[-1][3] ) ) ) base_leaky_relu_layers = [ tf.keras.layers.LeakyReLU( alpha=self.params["discriminator_leaky_relu_alpha"], name="{}_base_layers_leaky_relu_{}".format(self.name, i) ) for i in range(len(conv_block)) ] return base_conv_layers, base_leaky_relu_layers def _create_growth_conv_layer_block(self, block_idx): """Creates discriminator growth conv layer block. Args: block_idx: int, the current growth block's index. Returns: List of growth block's conv layers and list of growth block's leaky relu layers. """ # Get conv block layer properties. conv_block = ( self.params["discriminator_growth_conv_blocks"][block_idx] ) # Create new growth convolutional layers. growth_conv_layers = [ custom_layers.WeightScaledConv2D( filters=conv_block[i][3], kernel_size=conv_block[i][0:2], strides=conv_block[i][4:6], padding="same", activation=None, kernel_initializer=( tf.random_normal_initializer(mean=0., stddev=1.0) if self.params["use_equalized_learning_rate"] else "he_normal" ), kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, use_equalized_learning_rate=( self.params["use_equalized_learning_rate"] ), name="{}_growth_layers_conv2d_{}_{}_{}x{}_{}_{}".format( self.name, block_idx, i, conv_block[i][0], conv_block[i][1], conv_block[i][2], conv_block[i][3] ) ) for i in range(len(conv_block)) ] growth_leaky_relu_layers = [ tf.keras.layers.LeakyReLU( alpha=self.params["discriminator_leaky_relu_alpha"], name="{}_growth_layers_leaky_relu_{}_{}".format( self.name, block_idx, i ) ) for i in range(len(conv_block)) ] return growth_conv_layers, growth_leaky_relu_layers def _create_downsample_layers(self): """Creates discriminator downsample layers. Returns: Lists of AveragePooling2D layers for growing and shrinking branches. """ # Create list to hold growing branch's downsampling layers. growing_downsample_layers = [ tf.keras.layers.AveragePooling2D( pool_size=(2, 2), strides=(2, 2), name="{}_growing_average_pooling_2d_{}".format( self.name, i - 1 ) ) for i in range( 1, len(self.params["discriminator_from_rgb_layers"]) ) ] # Create list to hold shrinking branch's downsampling layers. shrinking_downsample_layers = [ tf.keras.layers.AveragePooling2D( pool_size=(2, 2), strides=(2, 2), name="{}_shrinking_average_pooling_2d_{}".format( self.name, i - 1 ) ) for i in range( 1, len(self.params["discriminator_from_rgb_layers"]) ) ] return growing_downsample_layers, shrinking_downsample_layers def _create_discriminator_layers(self): """Creates discriminator layers. Args: input_shape: tuple, shape of latent vector input of shape [batch_size, latent_size]. """ # Create input layers for each image resolution. self.input_layers = self._create_input_layers() (self.from_rgb_conv_layers, self.from_rgb_leaky_relu_layers) = self._create_from_rgb_layers() (base_conv_layers, base_leaky_relu_layers) = self._create_base_conv_layer_block() self.conv_layers.append(base_conv_layers) self.leaky_relu_layers.append(base_leaky_relu_layers) for block_idx in range( len(self.params["discriminator_growth_conv_blocks"]) ): (growth_conv_layers, growth_leaky_relu_layers ) = self._create_growth_conv_layer_block(block_idx) self.conv_layers.append(growth_conv_layers) self.leaky_relu_layers.append(growth_leaky_relu_layers) (self.growing_downsample_layers, self.shrinking_downsample_layers) = self._create_downsample_layers() self.minibatch_stddev_layer = custom_layers.MiniBatchStdDev( params={ "use_minibatch_stddev": self.params["discriminator_use_minibatch_stddev"], "group_size": self.params["discriminator_minibatch_stddev_group_size"], "use_averaging": self.params["discriminator_minibatch_stddev_use_averaging"] } ) self.flatten_layer = tf.keras.layers.Flatten() self.logits_layer = custom_layers.WeightScaledDense( units=1, activation=None, kernel_initializer=( tf.random_normal_initializer(mean=0., stddev=1.0) if self.params["use_equalized_learning_rate"] else "he_normal" ), kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, use_equalized_learning_rate=( self.params["use_equalized_learning_rate"] ), name="{}_layers_dense_logits".format(self.name) ) ########################################################################## ########################################################################## ########################################################################## def _use_logits_layer(self, inputs): """Uses flatten and logits layers to get logits tensor. Args: inputs: tensor, output of last conv layer of discriminator. Returns: Final logits tensor of discriminator. """ # Set shape to remove ambiguity for dense layer. height, width = self.params["generator_projection_dims"][0:2] valid_kernel_size = ( self.params["discriminator_base_conv_blocks"][0][-1][0] ) inputs.set_shape( [ inputs.get_shape()[0], height - valid_kernel_size + 1, width - valid_kernel_size + 1, inputs.get_shape()[-1]] ) # Flatten final block conv tensor. flat_inputs = self.flatten_layer(inputs=inputs) # Final linear layer for logits. logits = self.logits_layer(inputs=flat_inputs) return logits def _create_base_block_and_logits(self, inputs): """Creates base discriminator block and logits. Args: block_conv: tensor, output of previous `Conv2D` block's layer. Returns: Final logits tensor of discriminator. """ # Only need the first conv layer block for base network. base_conv_layers = self.conv_layers[0] base_leaky_relu_layers = self.leaky_relu_layers[0] network = self.minibatch_stddev_layer(inputs=inputs) for i in range(len(base_conv_layers)): network = base_conv_layers[i](inputs=network) network = base_leaky_relu_layers[i](inputs=network) # Get logits now. logits = self._use_logits_layer(inputs=network) return logits def _create_growth_transition_weighted_sum(self, inputs, block_idx): """Creates growth transition img_to_vec weighted_sum. Args: inputs: tensor, input image to discriminator. block_idx: int, current block index of model progression. Returns: Tensor of weighted sum between shrinking and growing block paths. """ # Growing side chain. growing_from_rgb_conv_layer = self.from_rgb_conv_layers[block_idx] growing_from_rgb_leaky_relu_layer = ( self.from_rgb_leaky_relu_layers[block_idx] ) growing_downsample_layer = ( self.growing_downsample_layers[block_idx - 1] ) growing_conv_layers = self.conv_layers[block_idx] growing_leaky_relu_layers = self.leaky_relu_layers[block_idx] # Pass inputs through layer chain. network = growing_from_rgb_conv_layer(inputs=inputs) network = growing_from_rgb_leaky_relu_layer(inputs=network) for i in range(len(growing_conv_layers)): network = growing_conv_layers[i](inputs=network) network = growing_leaky_relu_layers[i](inputs=network) # Down sample from 2s X 2s to s X s image. growing_network = growing_downsample_layer(inputs=network) # Shrinking side chain. shrinking_from_rgb_conv_layer = ( self.from_rgb_conv_layers[block_idx - 1] ) shrinking_from_rgb_leaky_relu_layer = ( self.from_rgb_leaky_relu_layers[block_idx - 1] ) shrinking_downsample_layer = ( self.shrinking_downsample_layers[block_idx - 1] ) # Pass inputs through layer chain. # Down sample from 2s X 2s to s X s image. network = shrinking_downsample_layer(inputs=inputs) network = shrinking_from_rgb_conv_layer(inputs=network) shrinking_network = shrinking_from_rgb_leaky_relu_layer( inputs=network ) # Weighted sum. weighted_sum = tf.add( x=growing_network * self.alpha_var, y=shrinking_network * (1.0 - self.alpha_var), name="{}_growth_transition_weighted_sum_{}".format( self.name, block_idx ) ) return weighted_sum def _create_perm_growth_block_network(self, inputs, block_idx): """Creates discriminator permanent block network. Args: inputs: tensor, output of previous block's layer. block_idx: int, current block index of model progression. Returns: Tensor from final permanent block `Conv2D` layer. """ # Get permanent growth blocks, so skip the base block. permanent_conv_layers = self.conv_layers[1:block_idx] permanent_leaky_relu_layers = self.leaky_relu_layers[1:block_idx] permanent_downsample_layers = self.growing_downsample_layers[0:block_idx - 1] # Reverse order of blocks. permanent_conv_layers = permanent_conv_layers[::-1] permanent_leaky_relu_layers = permanent_leaky_relu_layers[::-1] permanent_downsample_layers = permanent_downsample_layers[::-1] # Pass inputs through layer chain. network = inputs # Loop through the permanent growth blocks. for i in range(len(permanent_conv_layers)): # Get layers from
purpose, it does not exist in bytes @staticmethod def process(data, process_kwargs=None): """ Adapted from: https://github.com/OSGeo/gdal/blob/2.0/gdal/apps/gdaldem.cpp#L481 Edges are not implemented, result clips one pixel from array. """ if process_kwargs is None: return data array = data["values"].copy() array[array == data["no_data_value"]] = process_kwargs["fill"] xres, yres = process_kwargs["resolution"] alt = math.radians(process_kwargs["altitude"]) az = math.radians(process_kwargs["azimuth"]) zsf = 1 / 8 # vertical scale factor square_zsf = zsf * zsf # gradient s0 = slice(None, None), slice(None, -2), slice(None, -2) s1 = slice(None, None), slice(None, -2), slice(1, -1) s2 = slice(None, None), slice(None, -2), slice(2, None) s3 = slice(None, None), slice(1, -1), slice(None, -2) s4 = slice(None, None), slice(1, -1), slice(1, -1) s5 = slice(None, None), slice(1, -1), slice(2, None) s6 = slice(None, None), slice(2, None), slice(None, -2) s7 = slice(None, None), slice(2, None), slice(1, -1) s8 = slice(None, None), slice(2, None), slice(2, None) # angle calculation y = np.empty(array.shape, dtype="f4") y[s4] = ( array[s0] + 2 * array[s1] + array[s2] - array[s6] - 2 * array[s7] - array[s8] ) / yres x = np.empty(array.shape, dtype="f4") x[s4] = ( array[s0] + 2 * array[s3] + array[s6] - array[s2] - 2 * array[s5] - array[s8] ) / xres with np.errstate(all="ignore"): xx_plus_yy = x * x + y * y aspect = np.arctan2(y, x) # shading cang = ( math.sin(alt) - math.cos(alt) * zsf * np.sqrt(xx_plus_yy) * np.sin(aspect - az) ) / np.sqrt(1 + square_zsf * xx_plus_yy) cang = cang[..., 1:-1, 1:-1] result = np.where(cang <= 0, 0, 255 * cang).astype("u1") return {"values": result, "no_data_value": 256} def get_sources_and_requests(self, **request): new_request = expand_request_pixels(request, radius=1) if new_request is None: # not an expandable request: do nothing return [(self.store, request)] # determine resolution bbox = request["bbox"] resolution = ( (bbox[2] - bbox[0]) / request["width"], (bbox[3] - bbox[1]) / request["height"], ) process_kwargs = dict( resolution=resolution, altitude=self.altitude, azimuth=self.azimuth, fill=self.fill, ) return [(self.store, new_request), (process_kwargs, None)] class Place(BaseSingle): """Place an input raster at given coordinates Note that if the store's projection is different from the requested one, the data will be reprojected before placing it at a different position. Args: store (RasterBlock): Raster that will be placed. place_projection (str): The projection in which this operation is done. This also specifies the projection of the ``anchor`` and ``coordinates`` args. anchor (list of 2 numbers): The anchor into the source raster that will be placed at given coordinates. coordinates (list of lists of 2 numbers): The target coordinates. The center of the bbox will be placed on each of these coordinates. statistic (str): What method to use to merge overlapping rasters. One of: {"last", "first", "count", "sum", "mean", "min", "max", "argmin", "argmax", "product", "std", "var", "p<number>"} Returns: RasterBlock with the source raster placed """ def __init__(self, store, place_projection, anchor, coordinates, statistic="last"): if not isinstance(store, RasterBlock): raise TypeError("'{}' object is not allowed".format(type(store))) try: get_sr(place_projection) except RuntimeError: raise ValueError( "'{}' is not a valid projection string".format(place_projection) ) anchor = list(anchor) if len(anchor) != 2: raise ValueError("Expected 2 numbers in the 'anchor' parameter") for x in anchor: if not isinstance(x, (int, float)): raise TypeError("'{}' object is not allowed".format(type(x))) if coordinates is None or len(coordinates) == 0: coordinates = [] else: coordinates = np.asarray(coordinates, dtype=float) if coordinates.ndim != 2 or coordinates.shape[1] != 2: raise ValueError( "Expected a list of lists of 2 numbers in the " "'coordinates' parameter" ) coordinates = coordinates.tolist() check_statistic(statistic) super().__init__(store, place_projection, anchor, coordinates, statistic) @property def place_projection(self): return self.args[1] @property def anchor(self): return self.args[2] @property def coordinates(self): return self.args[3] @property def statistic(self): return self.args[4] @property def projection(self): """The native projection of this block. Only returns something if the place projection equals the store projection""" store_projection = self.store.projection if store_projection is None: return if get_sr(self.place_projection).IsSame(get_sr(store_projection)): return store_projection @property def geo_transform(self): """The native geo_transform of this block Returns None if the store projection and place projections differ.""" if self.projection is not None: return self.store.geo_transform @property def extent(self): geometry = self.geometry if geometry is None: return if not geometry.GetSpatialReference().IsSame(EPSG4326): geometry = geometry.Clone() geometry.TransformTo(EPSG4326) x1, x2, y1, y2 = geometry.GetEnvelope() return x1, y1, x2, y2 @property def geometry(self): """Combined geometry in this block's native projection. """ store_geometry = self.store.geometry if store_geometry is None: return sr = get_sr(self.place_projection) if not store_geometry.GetSpatialReference().IsSame(sr): store_geometry = store_geometry.Clone() store_geometry.TransformTo(sr) _x1, _x2, _y1, _y2 = store_geometry.GetEnvelope() p, q = self.anchor P, Q = zip(*self.coordinates) x1, x2 = _x1 + min(P) - p, _x2 + max(P) - p y1, y2 = _y1 + min(Q) - q, _y2 + max(Q) - q return ogr.CreateGeometryFromWkt(POLYGON.format(x1, y1, x2, y2), sr) def get_sources_and_requests(self, **request): if request["mode"] != "vals": return ({"mode": request["mode"]}, None), (self.store, request) # transform the anchor and coordinates into the requested projection anchor = shapely_transform( Point(self.anchor), self.place_projection, request["projection"] ).coords[0] coordinates = [ shapely_transform( Point(coord), self.place_projection, request["projection"] ).coords[0] for coord in self.coordinates ] # transform the source's extent extent_geometry = self.store.geometry if extent_geometry is None: # no geometry means: no data return (({"mode": "null"}, None),) sr = get_sr(request["projection"]) if not extent_geometry.GetSpatialReference().IsSame(sr): extent_geometry = extent_geometry.Clone() extent_geometry.TransformTo(sr) xmin, xmax, ymin, ymax = extent_geometry.GetEnvelope() # compute the requested cellsize x1, y1, x2, y2 = request["bbox"] size_x = (x2 - x1) / request["width"] size_y = (y2 - y1) / request["height"] # point requests: never request the full source extent if size_x > 0 and size_y > 0: # check what the full source extent would require full_height = math.ceil((ymax - ymin) / size_y) full_width = math.ceil((xmax - xmin) / size_x) if full_height * full_width <= request["width"] * request["height"]: _request = request.copy() _request["width"] = full_width _request["height"] = full_height _request["bbox"] = ( xmin, ymin, xmin + full_width * size_x, ymin + full_height * size_y, ) process_kwargs = { "mode": "warp", "anchor": anchor, "coordinates": coordinates, "src_bbox": _request["bbox"], "dst_bbox": request["bbox"], "cellsize": (size_x, size_y), "statistic": self.statistic, } return [(process_kwargs, None), (self.store, _request)] # generate a new (backwards shifted) bbox for each coordinate sources_and_requests = [] filtered_coordinates = [] for _x, _y in coordinates: bbox = [ x1 + anchor[0] - _x, y1 + anchor[1] - _y, x2 + anchor[0] - _x, y2 + anchor[1] - _y, ] # check the overlap with the source's extent # Note that raster cells are defined [xmin, xmax) and (ymin, ymax] # so points precisely at xmax or ymin certainly do not have data. if bbox[0] >= xmax or bbox[1] > ymax or bbox[2] < xmin or bbox[3] <= ymin: continue filtered_coordinates.append((_x, _y)) _request = request.copy() _request["bbox"] = bbox sources_and_requests.append((self.store, _request)) if len(sources_and_requests) == 0: # No coordinates inside: we still need to return an array # of the correct shape. Send a time request to get the depth. _request = request.copy() _request["mode"] = "time" process_kwargs = { "mode": "empty", "dtype": self.dtype, "fillvalue": self.fillvalue, "width": request["width"], "height": request["height"], "statistic": self.statistic, } return [(process_kwargs, None), (self.store, _request)] process_kwargs = {"mode": "group", "statistic": self.statistic} return [(process_kwargs, None)] + sources_and_requests @staticmethod def process(process_kwargs, *multi): if process_kwargs["mode"] in {"meta", "time"}: return multi[0] if process_kwargs["mode"] == "null": return if process_kwargs["mode"] == "empty": data = multi[0] if data is None: return out_shape = ( len(data["time"]), process_kwargs["height"], process_kwargs["width"], ) out_no_data_value = process_kwargs["fillvalue"] out_dtype = process_kwargs["dtype"] stack = [] elif process_kwargs["mode"] == "group": # We have a bunch of arrays that are already shifted. Stack them. stack = [data for data in multi if data is not None] if len(stack) == 0: return # instead of returning nodata (because inputs are None) elif process_kwargs["mode"] == "warp": # There is a single 'source' raster that we are going to shift # multiple times into the result. The cellsize is already correct. data = multi[0] if data is None: return out_no_data_value = data["no_data_value"] source = data["values"] out_dtype = source.dtype # convert the anchor to pixels (indices inside 'source') anchor = process_kwargs["anchor"] src_bbox = process_kwargs["src_bbox"] size_x, size_y = process_kwargs["cellsize"] anchor_px = ( (anchor[0] - src_bbox[0]) / size_x, (anchor[1] - src_bbox[1]) / size_y, ) # compute the output shape x1, y1, x2, y2 = process_kwargs["dst_bbox"] coordinates
0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.356445, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 4.69907, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0614842, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.250981, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.391445, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.272128, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.438933, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.221558, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.932619, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.251221, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.97023, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0739523, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0114143, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.103142, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0844156, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.177095, 'Execution Unit/Register Files/Runtime Dynamic': 0.0958299, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.232667, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.565191, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.25, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00162206, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00162206, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00143327, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000566026, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00121264, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00589003, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0148215, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0811509, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.16189, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.274289, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.275625, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 7.63092, 'Instruction Fetch Unit/Runtime Dynamic': 0.651776, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0229731, 'L2/Runtime Dynamic': 0.0080628, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.02266, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.866568, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0577665, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0577665, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.29545, 'Load Store Unit/Runtime Dynamic': 1.20922, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.142442, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.284885, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0505533, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.050897, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.320947, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0449689, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.563898, 'Memory Management Unit/Runtime Dynamic': 0.0958659, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 20.0729, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.194534, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0146451, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.135756, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
self.state = 408 self._errHandler.sync(self) _la = self._input.LA(1) while _la==visualgParser.VIRGULA: self.state = 401 self.match(visualgParser.VIRGULA) self.state = 404 self._errHandler.sync(self) token = self._input.LA(1) if token in [visualgParser.MATRIZ, visualgParser.INTEIRO, visualgParser.REAL, visualgParser.VARIAVEL]: self.state = 402 self.selecao_aritmetica() pass elif token in [visualgParser.BOOL]: self.state = 403 self.match(visualgParser.BOOL) pass else: raise NoViableAltException(self) self.state = 410 self._errHandler.sync(self) _la = self._input.LA(1) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Lista_de_intervaloContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def ABRE_COLCHETES(self): return self.getToken(visualgParser.ABRE_COLCHETES, 0) def intervalo(self): return self.getTypedRuleContext(visualgParser.IntervaloContext,0) def FECHA_COLCHETES(self): return self.getToken(visualgParser.FECHA_COLCHETES, 0) def getRuleIndex(self): return visualgParser.RULE_lista_de_intervalo def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterLista_de_intervalo" ): listener.enterLista_de_intervalo(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitLista_de_intervalo" ): listener.exitLista_de_intervalo(self) def lista_de_intervalo(self): localctx = visualgParser.Lista_de_intervaloContext(self, self._ctx, self.state) self.enterRule(localctx, 46, self.RULE_lista_de_intervalo) try: self.enterOuterAlt(localctx, 1) self.state = 411 self.match(visualgParser.ABRE_COLCHETES) self.state = 412 self.intervalo() self.state = 413 self.match(visualgParser.FECHA_COLCHETES) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Tipo_da_variavelContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def TIPO_DE_DADO(self): return self.getToken(visualgParser.TIPO_DE_DADO, 0) def tipo_vetor(self): return self.getTypedRuleContext(visualgParser.Tipo_vetorContext,0) def getRuleIndex(self): return visualgParser.RULE_tipo_da_variavel def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterTipo_da_variavel" ): listener.enterTipo_da_variavel(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitTipo_da_variavel" ): listener.exitTipo_da_variavel(self) def tipo_da_variavel(self): localctx = visualgParser.Tipo_da_variavelContext(self, self._ctx, self.state) self.enterRule(localctx, 48, self.RULE_tipo_da_variavel) try: self.state = 417 self._errHandler.sync(self) token = self._input.LA(1) if token in [visualgParser.TIPO_DE_DADO]: self.enterOuterAlt(localctx, 1) self.state = 415 self.match(visualgParser.TIPO_DE_DADO) pass elif token in [visualgParser.VETOR]: self.enterOuterAlt(localctx, 2) self.state = 416 self.tipo_vetor() pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Tipo_vetorContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def VETOR(self): return self.getToken(visualgParser.VETOR, 0) def lista_de_intervalo(self): return self.getTypedRuleContext(visualgParser.Lista_de_intervaloContext,0) def DE(self): return self.getToken(visualgParser.DE, 0) def TIPO_DE_DADO(self): return self.getToken(visualgParser.TIPO_DE_DADO, 0) def getRuleIndex(self): return visualgParser.RULE_tipo_vetor def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterTipo_vetor" ): listener.enterTipo_vetor(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitTipo_vetor" ): listener.exitTipo_vetor(self) def tipo_vetor(self): localctx = visualgParser.Tipo_vetorContext(self, self._ctx, self.state) self.enterRule(localctx, 50, self.RULE_tipo_vetor) try: self.enterOuterAlt(localctx, 1) self.state = 419 self.match(visualgParser.VETOR) self.state = 420 self.lista_de_intervalo() self.state = 421 self.match(visualgParser.DE) self.state = 422 self.match(visualgParser.TIPO_DE_DADO) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class IntervaloContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def PONTO_PONTO(self, i:int=None): if i is None: return self.getTokens(visualgParser.PONTO_PONTO) else: return self.getToken(visualgParser.PONTO_PONTO, i) def INTEIRO(self, i:int=None): if i is None: return self.getTokens(visualgParser.INTEIRO) else: return self.getToken(visualgParser.INTEIRO, i) def VIRGULA(self, i:int=None): if i is None: return self.getTokens(visualgParser.VIRGULA) else: return self.getToken(visualgParser.VIRGULA, i) def getRuleIndex(self): return visualgParser.RULE_intervalo def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterIntervalo" ): listener.enterIntervalo(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitIntervalo" ): listener.exitIntervalo(self) def intervalo(self): localctx = visualgParser.IntervaloContext(self, self._ctx, self.state) self.enterRule(localctx, 52, self.RULE_intervalo) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 425 self._errHandler.sync(self) _la = self._input.LA(1) while True: self.state = 424 self.match(visualgParser.INTEIRO) self.state = 427 self._errHandler.sync(self) _la = self._input.LA(1) if not (_la==visualgParser.INTEIRO): break self.state = 429 self.match(visualgParser.PONTO_PONTO) self.state = 431 self._errHandler.sync(self) _la = self._input.LA(1) while True: self.state = 430 self.match(visualgParser.INTEIRO) self.state = 433 self._errHandler.sync(self) _la = self._input.LA(1) if not (_la==visualgParser.INTEIRO): break self.state = 445 self._errHandler.sync(self) _la = self._input.LA(1) while _la==visualgParser.VIRGULA: self.state = 435 self.match(visualgParser.VIRGULA) self.state = 437 self._errHandler.sync(self) _la = self._input.LA(1) while True: self.state = 436 self.match(visualgParser.INTEIRO) self.state = 439 self._errHandler.sync(self) _la = self._input.LA(1) if not (_la==visualgParser.INTEIRO): break self.state = 441 self.match(visualgParser.PONTO_PONTO) self.state = 442 self.match(visualgParser.INTEIRO) self.state = 447 self._errHandler.sync(self) _la = self._input.LA(1) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Print_variavelContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def calculo(self): return self.getTypedRuleContext(visualgParser.CalculoContext,0) def DOIS_PONTOS(self, i:int=None): if i is None: return self.getTokens(visualgParser.DOIS_PONTOS) else: return self.getToken(visualgParser.DOIS_PONTOS, i) def INTEIRO(self, i:int=None): if i is None: return self.getTokens(visualgParser.INTEIRO) else: return self.getToken(visualgParser.INTEIRO, i) def getRuleIndex(self): return visualgParser.RULE_print_variavel def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterPrint_variavel" ): listener.enterPrint_variavel(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitPrint_variavel" ): listener.exitPrint_variavel(self) def print_variavel(self): localctx = visualgParser.Print_variavelContext(self, self._ctx, self.state) self.enterRule(localctx, 54, self.RULE_print_variavel) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 448 self.calculo() self.state = 449 self.match(visualgParser.DOIS_PONTOS) self.state = 451 self._errHandler.sync(self) _la = self._input.LA(1) while True: self.state = 450 self.match(visualgParser.INTEIRO) self.state = 453 self._errHandler.sync(self) _la = self._input.LA(1) if not (_la==visualgParser.INTEIRO): break self.state = 461 self._errHandler.sync(self) _la = self._input.LA(1) if _la==visualgParser.DOIS_PONTOS: self.state = 455 self.match(visualgParser.DOIS_PONTOS) self.state = 457 self._errHandler.sync(self) _la = self._input.LA(1) while True: self.state = 456 self.match(visualgParser.INTEIRO) self.state = 459 self._errHandler.sync(self) _la = self._input.LA(1) if not (_la==visualgParser.INTEIRO): break except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class CalculoContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def expressao_aritmetica(self): return self.getTypedRuleContext(visualgParser.Expressao_aritmeticaContext,0) def expressao_logica(self): return self.getTypedRuleContext(visualgParser.Expressao_logicaContext,0) def chamar_funcao(self): return self.getTypedRuleContext(visualgParser.Chamar_funcaoContext,0) def getRuleIndex(self): return visualgParser.RULE_calculo def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterCalculo" ): listener.enterCalculo(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitCalculo" ): listener.exitCalculo(self) def calculo(self): localctx = visualgParser.CalculoContext(self, self._ctx, self.state) self.enterRule(localctx, 56, self.RULE_calculo) try: self.enterOuterAlt(localctx, 1) self.state = 466 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,53,self._ctx) if la_ == 1: self.state = 463 self.expressao_aritmetica() pass elif la_ == 2: self.state = 464 self.expressao_logica() pass elif la_ == 3: self.state = 465 self.chamar_funcao() pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Expressao_aritmeticaContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def selecao_aritmetica(self, i:int=None): if i is None: return self.getTypedRuleContexts(visualgParser.Selecao_aritmeticaContext) else: return self.getTypedRuleContext(visualgParser.Selecao_aritmeticaContext,i) def ABRE_PARENTESES(self, i:int=None): if i is None: return self.getTokens(visualgParser.ABRE_PARENTESES) else: return self.getToken(visualgParser.ABRE_PARENTESES, i) def OPERADOR_UNARIO(self, i:int=None): if i is None: return self.getTokens(visualgParser.OPERADOR_UNARIO) else: return self.getToken(visualgParser.OPERADOR_UNARIO, i) def FECHA_PARENTESES(self, i:int=None): if i is None: return self.getTokens(visualgParser.FECHA_PARENTESES) else: return self.getToken(visualgParser.FECHA_PARENTESES, i) def OP_SOM(self, i:int=None): if i is None: return self.getTokens(visualgParser.OP_SOM) else: return self.getToken(visualgParser.OP_SOM, i) def OP_SUB(self, i:int=None): if i is None: return self.getTokens(visualgParser.OP_SUB) else: return self.getToken(visualgParser.OP_SUB, i) def OP_MUL(self, i:int=None): if i is None: return self.getTokens(visualgParser.OP_MUL) else: return self.getToken(visualgParser.OP_MUL, i) def OP_DIV(self, i:int=None): if i is None: return self.getTokens(visualgParser.OP_DIV) else: return self.getToken(visualgParser.OP_DIV, i) def OP_RES(self, i:int=None): if i is None: return self.getTokens(visualgParser.OP_RES) else: return self.getToken(visualgParser.OP_RES, i) def OP_POT(self, i:int=None): if i is None: return self.getTokens(visualgParser.OP_POT) else: return self.getToken(visualgParser.OP_POT, i) def OP_DIV_INT(self, i:int=None): if i is None: return self.getTokens(visualgParser.OP_DIV_INT) else: return self.getToken(visualgParser.OP_DIV_INT, i) def getRuleIndex(self): return visualgParser.RULE_expressao_aritmetica def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterExpressao_aritmetica" ): listener.enterExpressao_aritmetica(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitExpressao_aritmetica" ): listener.exitExpressao_aritmetica(self) def expressao_aritmetica(self): localctx = visualgParser.Expressao_aritmeticaContext(self, self._ctx, self.state) self.enterRule(localctx, 58, self.RULE_expressao_aritmetica) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 469 self._errHandler.sync(self) _la = self._input.LA(1) if _la==visualgParser.ABRE_PARENTESES: self.state = 468 self.match(visualgParser.ABRE_PARENTESES) self.state = 472 self._errHandler.sync(self) _la = self._input.LA(1) if _la==visualgParser.OPERADOR_UNARIO: self.state = 471 self.match(visualgParser.OPERADOR_UNARIO) self.state = 474 self.selecao_aritmetica() self.state = 488 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,59,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: if _alt==1: self.state = 475 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << visualgParser.OP_SOM) | (1 << visualgParser.OP_SUB) | (1 << visualgParser.OP_MUL) | (1 << visualgParser.OP_DIV) | (1 << visualgParser.OP_RES) | (1 << visualgParser.OP_POT) | (1 << visualgParser.OP_DIV_INT))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() self.state = 477 self._errHandler.sync(self) _la = self._input.LA(1) if _la==visualgParser.ABRE_PARENTESES: self.state = 476 self.match(visualgParser.ABRE_PARENTESES) self.state = 480 self._errHandler.sync(self) _la = self._input.LA(1) if _la==visualgParser.OPERADOR_UNARIO: self.state = 479 self.match(visualgParser.OPERADOR_UNARIO) self.state = 482 self.selecao_aritmetica() self.state = 484 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,58,self._ctx) if la_ == 1: self.state = 483 self.match(visualgParser.FECHA_PARENTESES) self.state = 490 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,59,self._ctx) self.state = 492 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,60,self._ctx) if la_ == 1: self.state = 491 self.match(visualgParser.FECHA_PARENTESES) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Expressao_logicaContext(ParserRuleContext): __slots__ = 'parser' def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def selecao_logica(self, i:int=None): if i is None: return self.getTypedRuleContexts(visualgParser.Selecao_logicaContext) else: return self.getTypedRuleContext(visualgParser.Selecao_logicaContext,i) def ABRE_PARENTESES(self, i:int=None): if i is None: return self.getTokens(visualgParser.ABRE_PARENTESES) else: return self.getToken(visualgParser.ABRE_PARENTESES, i)
<reponame>idevopscloud/python-kubernetes<gh_stars>0 #!/usr/bin/env python # # Copyright 2014 tigmi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from kubernetes import simplejson class PullPolicy(object): '''PullPolicy describes a policy for if/when to pull a container image ''' '''Always attempt to pull the latest image. Container will fail If the pull fails. ''' PullAlways = "PullAlways" '''Never pull an image, only use a local image. Container will fail if the image isn't present ''' PullNever = "PullNever" '''Pull if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. ''' PullIfNotPresent = "PullIfNotPresent" class Container(object): """A Class representing the Container structure used by the kubernetes API Container represents a single container that is expected to be run on the host. The Container structure exposes the following properties: Container.Name Container.Image Container.Command Container.WorkingDir Container.Ports Container.Env Container.Memory Container.CPU Container.VolumeMounts Container.LivenessProb Container.Lifecycle Container.Privileged Container.ImagePullPolicy """ def __init__(self, **kwargs): '''An object to hold a Kubernete Container. Arg: Name: Required: This must be a DNS_LABEL. Each container in a pod must have a unique name. Image: Required. Command: Optional: Defaults to whatever is defined in the image. WorkingDir: Optional: Defaults to Docker's default. Ports: Env: Memory: Optional: Defaults to unlimited. CPU: Optional: Defaults to unlimited. VolumeMounts: LivenessProb: Lifecycle: Privileged: Optional: Default to false. ImagePullPolicy: Optional: Policy for pulling images for this container ''' param_defaults = { 'Name': None, 'Image': None, 'Command': None, 'WorkingDir': None, 'Ports': None, 'Env': None, 'Memory': None, 'CPU': None, 'VolumeMounts': None, 'LivenessProb': None, 'Lifecycle': None, 'Privileged': None, 'ImagePullPolicy': None} for (param, default) in param_defaults.iteritems(): setattr(self, param, kwargs.get(param, default)) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.Name == other.Name and \ self.Image == other.Image and \ self.Command == other.Command and \ self.WorkingDir == other.WorkingDir and \ self.Ports == other.Ports and \ self.Env == other.Env and \ self.Memory == other.Memory and \ self.CPU == other.CPU and \ self.VolumeMounts == other.VolumeMounts and \ self.LivenessProb == other.LivenessProb and \ self.Lifecycle == other.Lifecycle and \ self.Privileged == other.Privileged and \ self.ImagePullPolicy == other.ImagePullPolicy except AttributeError: return False def __str__(self): '''A string representation of this Kubernetes.Container instance. The return value is the same as the JSON string representation. Returns: A string representation of this kubernetes.Container instance. ''' return self.AsJsonString() def AsJsonString(self): '''A JSON string representation of this kubernetes.Container instance. Returns: A JSON string representation of this kubernetes.Container instance. ''' return simplejson.dumps(self.AsDict(), sort_keys=True) def AsDict(self): ''' A dic representation of this kubernetes.Container instance. The return values uses the same key names as the JSON representation. Returns: A dict representing this kubernetes.Container instance ''' data = {} if self.Name: data['name'] = self.Name if self.Image: data['image'] = self.Image if self.Command: data['command'] = self.Command if self.WorkingDir: data['workingDir'] = self.WorkingDir if self.Ports: data['ports'] = [port.AsDict() for port in self.Ports] if self.Env: data['env'] = [env.AsDict() for env in self.Env] if self.Memory: data['memory'] = self.Memory if self.CPU: data['cpu'] = self.CPU if self.VolumeMounts: data['volumeMounts'] = [volumeMount.AsDict() for volumeMount in self.VolumeMounts] if self.LivenessProb: data['livenessProb'] = self.LivenessProb if self.Lifecycle: data['lifecycle'] = self.Lifecycle if self.Privileged: data['privileged'] = self.Privileged if self.ImagePullPolicy: data['imagePullPolicy'] = self.ImagePullPolicy return data @staticmethod def NewFromJsonDict(data): '''Create a new instance base on a JSON dict Args: data: A JSON dict, as converted from the JSON in the kubernetes API Returns: A kubernetes.Container instance ''' command = None ports = None env = None volumeMounts = None livenessProb = None lifecyle = None if 'command' in data: command = [c for c in data['command']] if 'ports' in data: from kubernetes import Port ports = [Port.NewFromJsonDict(port) for port in data['ports']] if 'env' in data: from kubernetes import EnvVar env = [EnvVar.NewFromJsonDict(e) for e in data['env']] if 'volumeMounts' in data: from kubernetes import VolumeMount volumeMounts = [VolumeMount.NewFromJsonDict(volumeMount) for volumeMount in data['volumeMounts']] if 'livenessProb' in data: from kubernetes import LivenessProb livenessProb = LivenessProb.NewFromJsonDict(data['livenessProb']) if 'lifecyle' in data: from kubernetes import Lifecyle lifecyle = Lifecyle.NewFromJsonDict(data['lifecyle']) return Container(Name=data.get('name', None), Image=data.get('image', None), Command=command, WorkingDir=data.get('workingDir', None), Ports=ports, Env=env, Memory=data.get('memory', None), CPU=data.get('cpu', None), VolumeMounts=volumeMounts, LivenessProb=livenessProb, Lifecyle=lifecyle, Privileged=data.get('privileged', False), ImagePullPolicy=data.get('imagePullPolicy', None)) class Handler(object): '''A Class representing the Handler structure used by the kubernetes API Handler defines a specific action that should be taken TODO: pass structured data to these actions, and document that data here. The Handler structure exposes the following properties: Handler.Exec Handler.HTTPGet ''' def __init__(self, **kwargs): '''An object to hold a Kubernetes Handler. One and only one of the following should be specified. Arg: Exec: Exec specifies the action to take. HTTPGet: HTTPGet specifies the http request to perform. ''' param_defaults = { 'Exec': None, 'HTTPGet': None} for (param, default) in param_defaults.iteritems(): setattr(self, param, kwargs.get(param, default)) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.Exec == other.Exec and \ self.HTTPGet == other.HTTPGet except AttributeError: return False def __str__(self): '''A string representation of this kubernetes.Handler instance. The return value is the same as the JSON string representation. Returns: A string representation of this kubernetes.Handler instance. ''' return self.AsJsonString() def AsJsonString(self): '''A JSON string representation of this kubernetes.Handler instance. Returns: A JSON string representation of this kubernetes.Handler instance. ''' return simplejson.dumps(self.AsDict(), sort_keys=True) def AsDict(self): ''' A dic representation of this kubernetes.Handler instance. The return values uses the same key names as the JSON representation. Returns: A dict representing this kubernetes.Handler instance ''' data = {} if self.Exec: data['exec'] = self.Exec.AsDict() if self.HTTPGet: data['httpGet'] = self.HTTPGet.AsDict() return data @staticmethod def NewFromJsonDict(data): '''Create a new instance base on a JSON dict Args: data: A JSON dict, as converted from the JSON in the kubernetes API Returns: A kubernetes.Handler instance ''' Exec = None httpGet = None if 'exec' in data: from kubernetes import ExecAction Exec = ExecAction.NewFromJsonDict(data['exec']) if 'httpGet' in data: from kubernetes import HTTPGetAction httpGet = HTTPGetAction.NewFromJsonDict(data['httpGet']) return Handler(Exec=Exec, HTTPGet=httpGet) class Lifecyle(object): '''A Class representing the Lifecyle structure used by the kubernetes API Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. The Lifecyle structure exposes the following properties: Lifecyle.PostStart Lifecyle.PreStop ''' def __init__(self, **kwargs): '''An object to hold a Kubernetes Lifecyle. Arg: PostStart: PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted. PreStop: PreStop is called immediately before a container is terminated. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. ''' param_defaults = { 'PostStart': None, 'PreStop': None} for (param, default) in param_defaults.iteritems(): setattr(self, param, kwargs.get(param, default)) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.PostStart == other.PostStart and \ self.PreStop == other.PreStop except AttributeError: return False def __str__(self): '''A string representation of this kubernetes.Lifecyle instance. The return value is the same as the JSON string representation. Returns: A string representation of this kubernetes.Lifecyle instance. ''' return self.AsJsonString() def AsJsonString(self): '''A JSON string representation of this kubernetes.Lifecyle instance. Returns: A JSON string representation of this kubernetes.Lifecyle instance. ''' return simplejson.dumps(self.AsDict(), sort_keys=True) def AsDict(self): ''' A dic representation of this kubernetes.Lifecyle instance. The return values uses the same key names as the JSON representation. Returns: A dict representing this kubernetes.Lifecyle instance ''' data = {} if self.PostStart: data['postStart'] = self.PostStart.AsDict() if self.PreStop: data['preStop'] = self.PreStop.AsDict() return data @staticmethod def NewFromJsonDict(data): '''Create a new instance base on
ratio of the disk polling time to memory polling time. disk_to_memory_ratio = int(repy_constants.DISK_POLLING_HDD / memory_check_interval) # Which cycle number we're on counter = 0 # Elevate our priority, above normal is higher than the usercode, and is enough for disk/mem windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL) # need my pid to get a process handle... mypid = os.getpid() # run forever (only exit if an error occurs) while True: try: # Increment the interval counter counter += 1 # Check memory use, get the WorkingSetSize or RSS memused = windows_api.process_memory_info(mypid)['WorkingSetSize'] if memused > nanny.get_resource_limit("memory"): # We will be killed by the other thread... raise Exception, "Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'" # Check if we should check the disk if (counter % disk_to_memory_ratio) == 0: # Check diskused diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR) if diskused > nanny.get_resource_limit("diskused"): raise Exception, "Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'" # Sleep until the next iteration of checking the memory time.sleep(memory_check_interval) except windows_api.DeadProcess: # Process may be dead, or die while checking memory use # In any case, there is no reason to continue running, just exit harshexit.harshexit(99) except: tracebackrepy.handle_exception() print >> sys.stderr, "Nanny died! Trying to kill everything else" harshexit.harshexit(20) # Windows specific CPU Nanny Stuff winlastcpuinfo = [0,0] # Enforces CPU limit on Windows and Windows CE def win_check_cpu_use(cpulim, pid): global winlastcpuinfo # get use information and time... now = getruntime() # Get the total cpu time usertime = windows_api.get_process_cpu_time(pid) useinfo = [usertime, now] # get the previous time and cpu so we can compute the percentage oldusertime = winlastcpuinfo[0] oldnow = winlastcpuinfo[1] if winlastcpuinfo == [0,0]: winlastcpuinfo = useinfo # give them a free pass if it's their first time... return 0 # save this data for next time... winlastcpuinfo = useinfo # Get the elapsed time... elapsedtime = now - oldnow # This is a problem if elapsedtime == 0: return -1 # Error condition # percent used is the amount of change divided by the time... percentused = (usertime - oldusertime) / elapsedtime # Calculate amount of time to sleep for stoptime = nanny.calculate_cpu_sleep_interval(cpulim, percentused,elapsedtime) if stoptime > 0.0: # Try to timeout the process if windows_api.timeout_process(pid, stoptime): # Log the stoptime process_stopped_timeline.append((now, stoptime)) # Drop the first element if the length is greater than the maximum entries if len(process_stopped_timeline) > process_stopped_max_entries: process_stopped_timeline.pop(0) # Return how long we slept so parent knows whether it should sleep return stoptime else: # Process must have been making system call, try again next time return -1 # If the stop time is 0, then avoid calling timeout_process else: return 0.0 # Dedicated Thread for monitoring CPU, this is run as a part of repy class WinCPUNannyThread(threading.Thread): # Thread variables pid = 0 # Process pid def __init__(self): self.pid = os.getpid() threading.Thread.__init__(self,name="CPUNannyThread") def run(self): # Elevate our priority, set us to the highest so that we can more effectively throttle success = windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_HIGHEST) # If we failed to get HIGHEST priority, try above normal, else we're still at default if not success: windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL) # Run while the process is running while True: try: # Get the frequency frequency = repy_constants.CPU_POLLING_FREQ_WIN # Base amount of sleeping on return value of # win_check_cpu_use to prevent under/over sleeping slept = win_check_cpu_use(nanny.get_resource_limit("cpu"), self.pid) if slept == -1: # Something went wrong, try again pass elif (slept < frequency): time.sleep(frequency-slept) except windows_api.DeadProcess: # Process may be dead harshexit.harshexit(97) except: tracebackrepy.handle_exception() print >> sys.stderr, "CPU Nanny died! Trying to kill everything else" harshexit.harshexit(25) ############## *nix specific functions (may include Mac) ############### # This method handles messages on the "diskused" channel from # the external process. When the external process measures disk used, # it is piped in and cached for calls to getresources. def IPC_handle_diskused(bytes): cached_disk_used = bytes # This method handles messages on the "repystopped" channel from # the external process. When the external process stops repy, it sends # a tuple with (TOS, amount) where TOS is time of stop (getruntime()) and # amount is the amount of time execution was suspended. def IPC_handle_stoptime(info): # Push this onto the timeline process_stopped_timeline.append(info) # Drop the first element if the length is greater than the max if len(process_stopped_timeline) > process_stopped_max_entries: process_stopped_timeline.pop(0) # Use a special class of exception for when # resource limits are exceeded class ResourceException(Exception): pass # Armon: Method to write a message to the pipe, used for IPC. # This allows the pipe to be multiplexed by sending simple dictionaries def write_message_to_pipe(writehandle, channel, data): """ <Purpose> Writes a message to the pipe <Arguments> writehandle: A handle to a pipe which can be written to. channel: The channel used to describe the data. Used for multiplexing. data: The data to send. <Exceptions> As with os.write() EnvironmentError will be thrown if os.write() sends 0 bytes, indicating the pipe is broken. """ # Construct the dictionary mesg_dict = {"ch":channel,"d":data} # Convert to a string mesg_dict_str = marshal.dumps(mesg_dict) # Make a full string mesg = str(len(mesg_dict_str)) + ":" + mesg_dict_str # Send this index = 0 while index < len(mesg): bytes = os.write(writehandle, mesg[index:]) if bytes == 0: raise EnvironmentError, "Write send 0 bytes! Pipe broken!" index += bytes # Armon: Method to read a message from the pipe, used for IPC. # This allows the pipe to be multiplexed by sending simple dictionaries def read_message_from_pipe(readhandle): """ <Purpose> Reads a message from a pipe. <Arguments> readhandle: A handle to a pipe which can be read from <Exceptions> As with os.read(). EnvironmentError will be thrown if os.read() returns a 0-length string, indicating the pipe is broken. <Returns> A tuple (Channel, Data) where Channel is used to multiplex the pipe. """ # Read until we get to a colon data = "" index = 0 # Loop until we get a message while True: # Read in data if the buffer is empty if index >= len(data): # Read 8 bytes at a time mesg = os.read(readhandle,8) if len(mesg) == 0: raise EnvironmentError, "Read returned empty string! Pipe broken!" data += mesg # Increment the index while there is data and we have not found a colon while index < len(data) and data[index] != ":": index += 1 # Check if we've found a colon if len(data) > index and data[index] == ":": # Get the message length mesg_length = int(data[:index]) # Determine how much more data we need more_data = mesg_length - len(data) + index + 1 # Read in the rest of the message while more_data > 0: mesg = os.read(readhandle, more_data) if len(mesg) == 0: raise EnvironmentError, "Read returned empty string! Pipe broken!" data += mesg more_data -= len(mesg) # Done, convert the message to a dict whole_mesg = data[index+1:] mesg_dict = marshal.loads(whole_mesg) # Return a tuple (Channel, Data) return (mesg_dict["ch"],mesg_dict["d"]) # This dictionary defines the functions that handle messages # on each channel. E.g. when a message arrives on the "repystopped" channel, # the IPC_handle_stoptime function should be invoked to handle it. IPC_HANDLER_FUNCTIONS = {"repystopped":IPC_handle_stoptime, "diskused":IPC_handle_diskused } # This thread checks that a process is alive and invokes # delegate methods when messages arrive on the pipe. class monitor_process_checker(threading.Thread): def __init__(self, readhandle): """ <Purpose> Terminates harshly if the monitor process dies before we do. <Arguments> readhandle: A file descriptor to the handle of a pipe to the monitor process. """ # Name our self threading.Thread.__init__(self, name="ProcessChecker") # Store the handle self.readhandle = readhandle def run(self): # Run forever while True: # Read a message try: mesg = read_message_from_pipe(self.readhandle) except Exception, e: break # Check for a handler function if mesg[0] in IPC_HANDLER_FUNCTIONS: # Invoke the handler function with the data handler = IPC_HANDLER_FUNCTIONS[mesg[0]] handler(mesg[1]) # Print a message if there is a message on an unknown channel else: print "[WARN] Message on unknown channel from monitor process:", mesg[0] ### We only leave the loop on a fatal error, so we need to exit now #
<filename>python/opscore/RO/TkUtil.py #!/usr/bin/env python """Tkinter utilities History: 2004-10-08 ROwen 2004-10-12 ROwen Modified getWindowingSystem to handle versions of Tk < ~8.4 2005-06-17 ROwen Added getButtonNumbers. 2005-07-07 ROwen Added TclFunc 2005-08-24 ROwen Expanded the docstring for TclFunc and made the tcl name a bit clearer. 2005-08-25 ROwen Removed useless __del__ from TclFunc and updated the documentation. 2005-09-12 ROwen Added EvtNoProp. 2006-10-25 ROwen Added addColors (based on scaleColor from opscore.RO.Wdg.WdgPrefs). Modified colorOK to use winfo_rgb. 2010-05-04 ROwen Added Geometry, including the ability to constrain a window's geometry to fit on screen. 2010-05-21 ROwen Bug fix: Geometry.toTkStr could include extent when it shouldn't. 2010-07-20 ROwen Added Timer class. 2011-06-16 ROwen Ditched obsolete "except (SystemExit, KeyboardInterrupt): raise" code 2012-07-09 ROwen Added Timer to __all__. 2012-11-16 ROwen Added getTclVersion function. 2013-10-07 ROwen Timer.start accepts keyword arguments for the callback function. 2014-07-21 ROwen Timer.__init__ accepts keyword arguments for the callback function. 2015-09-24 ROwen Replace "== None" with "is None" to modernize the code. """ __all__ = ['addColors', 'colorOK', 'EvtNoProp', 'getWindowingSystem', 'getTclVersion', 'TclFunc', 'Geometry', 'Timer', 'WSysAqua', 'WSysX11', 'WSysWin'] import re import sys import traceback from six.moves import tkinter import opscore.RO.OS # windowing system constants WSysAqua = "aqua" WSysX11 = "x11" WSysWin = "win32" # internal globals g_tkWdg = None g_winSys = None g_tkVersion = None def addColors(*colorMultPairs): """Add colors or scale a color. Inputs: - A list of one or more (color, mult) pairs. Returns sum of (R, G, B) * mult for each (color, mult) pair, with R, G, and B individually limited to range [0, 0xFFFF]. """ netRGB = [0, 0, 0] for color, mult in colorMultPairs: colorRGB = _getTkWdg().winfo_rgb(color) netRGB = [netRGB[ii] + (mult * colorRGB[ii]) for ii in range(3)] truncRGB = [max(min(int(val), 0xFFFF), 0) for val in netRGB] retColor = "#%04x%04x%04x" % tuple(truncRGB) #print "mixColors(%r); netRGB=%s; truncRGB=%s; retColor=%r" % (colorMultPairs, netRGB, truncRGB, retColor) return retColor def colorOK(colorStr): """Return True if colorStr is a valid tk color, False otherwise. """ tkWdg = _getTkWdg() try: tkWdg.winfo_rgb(colorStr) except tkinter.TclError: return False return True class EvtNoProp(object): """Function wrapper that prevents event propagation. Input: function to bind """ def __init__(self, func): self.func = func def __call__(self, *args, **kargs): self.func(*args, **kargs) return "break" def getButtonNumbers(): """Return the button numbers corresponding to the left, middle and right buttons. """ winSys = getWindowingSystem() if winSys == WSysAqua: return (1, 3, 2) else: return (1, 2, 3) def getTclVersion(): """Return the Tcl/Tk version as a string Returns the result of tcl command "info patchlevel". Some representative return values (from tcl documentation for tcl_patchLevel): 8.4.16 8.5b3 """ global g_tkVersion if g_tkVersion is None: tkWdg = _getTkWdg() g_tkVersion = tkWdg.tk.call("info", "patchlevel") return g_tkVersion def getWindowingSystem(): """Return the Tk window system. Returns one of: - WSysAqua: the MacOS X native system - WSysX11: the unix windowing system - WSysWin: the Windows windowing system Other values might also be possible. Please don't call this until you have started Tkinter with Tkinter.Tk(). Warning: windowingsystem is a fairly recent tk command; if it is not available then this code does its best to guess but will not guess aqua. """ global g_winSys if not g_winSys: tkWdg = _getTkWdg() try: g_winSys = tkWdg.tk.call("tk", "windowingsystem") except tkinter.TclError: # windowingsystem not supported; take a best guess if opscore.RO.OS.PlatformName == "win": g_winSys = "win32" else: g_winSys = "x11" return g_winSys #class TkAdapter: #_tkWdg = None #def __init__(self): #if self._tkWdg is None: #self._tkWdg = self._getTkWdg() #self.funcDict = {} #def after(*args): #self._tkWdg.after(*args) #def register(self, func): #"""Register a function as a tcl function. #Returns the name of the tcl function. #Be sure to deregister the function when done #or delete the TkAdapter #""" #funcObj = TclFunc(func) #funcName = funcObj.tclFuncName #self.funcDict[funcName] = funcObj #return funcName #def deregister(self, funcName): #"""Deregister a tcl function. #Raise KeyError if function not found. #""" #func = self.funcDict.pop(funcName) #func.deregister() #def eval(self, *args): #"""Evaluate an arbitrary tcl expression and return the result""" #return self._tkWdg.tk.eval(*args) #def call(self, *args): #"""Call a tcl function""" #return self._tkWdg.tk.call(*args) class TclFunc: """Register a python function as a tcl function. Based on Tkinter's _register method (which, being private, I prefer not to use explicitly). If the function call fails, a traceback is printed. Please call deregister when you no longer want the tcl function to exist. """ tkApp = None def __init__(self, func, debug=False): if self.tkApp is None: self.tkApp = _getTkWdg().tk self.func = func self.tclFuncName = "pyfunc%s" % (id(self),) self.debug = bool(debug) try: self.tclFuncName += str(func.__name__) except AttributeError: pass if self.debug: print("registering tcl function %s for python function %s" % (self.tclFuncName, func)) self.tkApp.createcommand(self.tclFuncName, self) def __call__(self, *args): try: self.func(*args) except Exception as e: sys.stderr.write("tcl function %s failed: %s\n" % (self.tclFuncName, e)) traceback.print_exc(file=sys.stderr) def deregister(self): """Deregister callback and delete reference to python function. Safe to call if already deregistered. """ if self.debug: print("%r.deregister()" % (self,)) if not self.func: if self.debug: print("already deregistered") return try: self.tkApp.deletecommand(self.tclFuncName) except tkinter.TclError as e: if self.debug: print("deregistering failed: %r" % (e,)) pass self.func = None def __repr__(self): return "%s(%s)" % (self.__class__.__name__, self.tclFuncName) def __str__(self): return self.tclFuncName class Geometry(object): """A class representing a tk geometry Fields include the following two-element tuples: - offset: x,y offset of window relative to screen; see also offsetFlipped - offsetFlipped: is the meaning of x,y offset flipped? if False (unflipped) then offset is the distance from screen top/left to window top/left if True (flipped) offset is the distance from window bottom/right to screen bottom/right - extent: x,y extent; always positive or (None, None) if extent is unknown System constants: - minCorner: minimum visible offset position (platform-dependent) - screenExtent: x,y extent of all screens put together (if the screens are not the same size and arranged side by side then the area will include pixels that are not visible) WARNING: on some platforms offsetFlipped < 0 is not handled properly. In particular on Mac OS X with Tk 8.4: - the offset is actually relative to the top or right offset of the window, which is dead wrong - setting the geometry for a window with ngeative offset offset may simply not work, resulting in a geometry that is not what you asked for (I have particularly seen this for windows nearly as large as the screen) That is why the constrainToGeomStr method always returns a tk geometry string with positive corners. """ if opscore.RO.OS.PlatformName == "mac": minCorner = (0, 22) else: minCorner = (0, 0) _root = None _geomRE = re.compile( r"((?P<width>\d+)x(?P<height>\d+))?(?P<xsign>[+-])(?P<x>[-]?\d+)(?P<ysign>[+-])(?P<y>[-]?\d+)$", re.IGNORECASE) def __init__(self, offset, offsetFlipped, extent): """Create a new Geometry Inputs (each is a sequence of two values): - offset: x,y offset of window relative to screen; see also offsetFlipped - offsetFlipped: is the meaning of x,y offset flipped? if False (unflipped) then offset is the distance from screen top/left to window top/left if True (flipped) offset is the distance from window bottom/right to screen bottom/right - extent: x,y extent; you may specify None or (None, None) if the extent is unknown; however, you may not specify an integer for one axis and None for the other raise RuntimeError if any input does not have two elements (except that extent may be None) """ if len(offset) != 2: raise RuntimeError("offset=%r does not have two values" % (offset,)) self.offset = tuple(int(val) for val in offset) if len(offsetFlipped) != 2: raise RuntimeError("offsetFlipped=%r does not have two values" % (offsetFlipped,)) self.offsetFlipped = tuple(bool(val) for val in offsetFlipped) if extent is None: self.extent = (None, None) else: if len(extent) != 2: raise RuntimeError("extent=%r does not have two values" % (extent,)) if None in extent: self.extent = (None, None) else: self.extent = tuple(int(val) for val in extent) @classmethod def fromTkStr(cls, geomStr): """Create a Geometry from a tk geometry string Inputs: - geomStr: tk geometry string """ match = cls._geomRE.match(geomStr) if not match: raise RuntimeError("Could not parse geomStr string %r" % (geomStr,)) groupDict = match.groupdict() return cls( offset = tuple(groupDict[name] for name in ("x", "y")), offsetFlipped = tuple(cls._flippedFromChar(groupDict[name]) for name in ("xsign", "ysign")), extent = tuple(groupDict[name] for name in ("width", "height")), ) def constrained(self, constrainExtent=True, defExtent=50): """Return a geometry that is constrain to lie entirely within the screen(s) Inputs: - constrainExtent: if True then the extent and offset position are both constrained
labels[i].name + ' - The columns: ' + el + 'is missing. The columns allowed are: label, usecase.' return message if len(list_db_col) != len(cols): message = 'LABELS FILE - ' + labels[i].name + ' - The columns allowed are: label, usecase. If you inserted more (less) columns please, remove (add) them.' return message if df.shape[0] == 0: message = 'LABELS FILE - You must provide at least a row.' return message else: df_dup = df[df.duplicated(subset=['label', 'usecase'], keep=False)] if df_dup.shape[0] > 0: message = 'WARNING LABELS FILE - ' + labels[i].name + ' - The rows: ' + str(df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.' for ind in range(df.shape[0]): cursor.execute('SELECT COUNT(*) FROM annotation_label WHERE label = %s AND name = %s', [str(df.loc[ind, 'label']), str(df.loc[ind, 'usecase'])]) num = cursor.fetchone() if num[0] > 0: message = 'WARNING LABELS FILE - ' + labels[i].name + ' - The label: ' + str(df.loc[ind, 'label']) + ' for the use case: ' + str(df.loc[ind, 'usecase']) + ' is already present in the database. It will be ignored.' el = '' if None in df['usecase'].tolist(): el = 'usecase' elif None in df['label'].tolist(): el = 'label' if el != '': lista = df[el].tolist() ind = lista.index(None) message = 'LABELS FILE - ' + labels[i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + ' .' return message distinct_label_usecase = df['usecase'].unique() for el in distinct_label_usecase: if el in ['colon', 'uterine cervix', 'lung']: bool_arr = check_exa_lab_conc_only(str(el)) if bool_arr[0] == True: message = ' WARNING LABELS FILE - ' + labels[i].name + ' - You are using EXAMODE labels for the use case ' + str(el) +\ '. Uploading new labels will remove the existing ones together with all the annotatiions. The action is irreversible.' if el not in distinct_uc_report: message = 'WARNING LABELS FILE - ' + labels[i].name + ' - The file contains the labels for ' + el + ' which has 0 reports associated.' return message elif len(pubmedfiles) > 0: message = '' for i in range(len(pubmedfiles)): if not pubmedfiles[i].name.endswith('csv'): message = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - The file must be .csv' return message try: df = pd.read_csv(pubmedfiles[i]) df = df.where(pd.notnull(df), None) df = df.reset_index(drop=True) except Exception as e: message = 'PUBMED FILE - ' + reports[ i].name + ' - An error occurred while parsing the csv. Check if it is well formatted. ' return message else: cols = list(df.columns) if 'usecase' in cols: df['usecase']=df['usecase'].str.lower() list_db_col = ['ID', 'usecase'] for el in list_db_col: if el not in cols: message = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - The column: ' + str( el) + ' must be present.' return message for el in cols: if el not in list_db_col: message = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - The column: ' + str(el) + ' is not allowed.' return message if df.shape[0] == 0: message = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - You must provide at least a report.' return message else: df_dup = df[df.duplicated(subset=['ID', 'usecase'], keep=False)] if df_dup.shape[0] > 0: message = 'WARNING PUBMED FILE - ' + pubmedfiles[i].name + ' - The rows: ' + str(df_dup.index.to_list()) + ' are duplicated. The duplicates are ignored.' for ind in range(df.shape[0]): found = False id_report = 'PUBMED_'+str(df.loc[ind, 'ID']) cursor.execute('SELECT COUNT(*) FROM report WHERE id_report = %s AND language in %s', [str(id_report), tuple(languages)]) num = cursor.fetchone() if num[0] > 0: message = 'WARNING PUBMED FILE - ' + pubmedfiles[i].name + ' - The report: ' + str(id_report) + ' is already present in the database. It will be ignored.' for el in list_db_col: if df.loc[ind, el] is not None: found = True break if found == False: message = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - The report at row ' + str(ind) + ' has the columns: ' + ', '.join(list_db_col) + ' empty. Provide a value for at least one of these columns.' return message el = '' if None in df['usecase'].tolist(): el = 'usecase' elif None in df['ID'].tolist(): el = 'institute' if el != '': lista = df[el].tolist() ind = lista.index(None) message = 'PUBMED FILE - ' + pubmedfiles[ i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + '.' return message elif len(reports) > 0: message = '' for i in range(len(reports)): if not reports[i].name.endswith('csv'): message = 'REPORTS FILE - ' + reports[i].name + ' - The file must be .csv' return message try: df = pd.read_csv(reports[i]) df = df.where(pd.notnull(df), None) df = df.reset_index(drop=True) except Exception as e: message = 'REPORTS FILE - ' + reports[ i].name + ' - An error occurred while parsing the csv. Check if it is well formatted. ' return message else: cols = list(df.columns) count = 0 if 'usecase' in cols: df['usecase']=df['usecase'].str.lower() if 'institute' in cols: df['institute']=df['institute'].str.lower() if 'language' in cols: df['language']=df['language'].str.lower() # print(cols) list_db_col = ['id_report', 'institute', 'usecase', 'language'] for el in list_db_col: if el not in cols: message = 'REPORTS FILE - ' + reports[i].name + ' - The column: ' + str(el) + ' must be present.' return message if 'usecase' in cols: df['usecase']=df['usecase'].str.lower() if 'institute' in cols: df['institute']=df['institute'].str.lower() if 'language' in cols: df['language']=df['language'].str.lower() list_not_db_col = [] for el in cols: if el not in list_db_col: list_not_db_col.append(el) if jsonDispUp is not None and jsonAnnUp is not None: if len(disp) > 0 or len(ann) > 0: ann_intersect = list(set(ann) & set(list_not_db_col)) for el in list_not_db_col: if (el not in disp and el not in ann) and (el not in jsonDispUp and el not in jsonAnnUp): count = count + 1 if count == len(list_not_db_col): message = 'REPORT FIELDS - Please, provide at least one field to display in file: ' + \ reports[i].name + '. Be careful that if you do not provide one field to annotate you will not be able to perform mention annotation and linking.' return message elif len(ann_intersect) == 0 and (jsonAnnUp[0]) == '': message = 'WARNING REPORT FIELDS - file: ' + reports[ i].name + ' Please, provide at least one field to annotate if you want to find mentions and perform linking.' if len(list_not_db_col) == 0: message = 'REPORTS FILE - ' + reports[i].name + ' - You must provide at least one column other than institute, usecase, language, id_report' return message if df.shape[0] == 0: message = 'REPORTS FILE - ' + reports[i].name + ' - You must provide at least a report.' return message else: df_dup = df[df.duplicated(subset=['id_report', 'language'], keep=False)] if df_dup.shape[0] > 0: message = 'WARNING REPORTS FILE - ' + reports[i].name + ' - The rows: ' + str( df_dup.index.to_list()) + ' are duplicated. The duplicates are ignored.' for ind in range(df.shape[0]): found = False cursor.execute('SELECT COUNT(*) FROM report WHERE id_report = %s AND language = %s', [str(df.loc[ind, 'id_report']), str(df.loc[ind, 'language'])]) num = cursor.fetchone() if num[0] > 0: message = 'WARNING REPORT FILE - ' + reports[i].name + ' - The report: ' + str( df.loc[ind, 'id_report']) + ' for the language: ' + str(df.loc[ind, 'language']) + ' is already present in the database. It will be ignored.' for el in list_db_col: if df.loc[ind, el] is not None: found = True break if found == False: message = 'REPORTS FILE - ' + reports[i].name + ' - The report at row ' + str( ind) + ' has the columns: ' + ', '.join( list_db_col) + ' empty. Provide a value for at least one of these columns.' return message found = False count_both = 0 not_none_cols = [] for el in list_not_db_col: if df.loc[ind, el] is not None: found = True not_none_cols.append(el) if found == False: message = 'REPORTS
0.0, px_amt) return True def kp_pan_px_center(self, viewer, event, data_x, data_y, msg=True): """This pans so that the cursor is over the center of the current pixel.""" if not self.canpan: return False self.pan_center_px(viewer) return True def kp_center(self, viewer, event, data_x, data_y): if self.canpan: viewer.center_image() return True def kp_zoom_out(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_out() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_in(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_in() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) keylist = self.settings.get('kp_zoom') try: zoomval = (keylist.index(event.key)) except IndexError: return False viewer.zoom_to(zoomval) if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_inv(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) keylist = self.settings.get('kp_zoom_inv') try: zoomval = - (keylist.index(event.key)) except IndexError: return False viewer.zoom_to(zoomval) if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_fit(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_fit() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_autozoom_toggle(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) val = viewer.get_settings().get('autozoom') if val == 'off': val = 'on' else: val = 'off' viewer.enable_autozoom(val) if msg: viewer.onscreen_message('Autozoom %s' % val, delay=1.0) return True def kp_autozoom_override(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.enable_autozoom('override') if msg: viewer.onscreen_message('Autozoom Override', delay=1.0) return True def kp_cut_255(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) viewer.cut_levels(0.0, 255.0, no_reset=True) return True def kp_cut_minmax(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() mn, mx = image.get_minmax(noinf=True) viewer.cut_levels(mn, mx, no_reset=True) return True def kp_cut_auto(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) if msg: viewer.onscreen_message("Auto cut levels", delay=1.0) viewer.auto_levels() return True def kp_autocuts_toggle(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) val = viewer.get_settings().get('autocuts') if val == 'off': val = 'on' else: val = 'off' viewer.enable_autocuts(val) if msg: viewer.onscreen_message('Autocuts %s' % val, delay=1.0) return True def kp_autocuts_override(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) viewer.enable_autocuts('override') if msg: viewer.onscreen_message('Autocuts Override', delay=1.0) return True def kp_autocuts_alg_prev(self, viewer, event, data_x, data_y, msg=True): self._cycle_cuts_alg(viewer, msg, direction='up') return True def kp_autocuts_alg_next(self, viewer, event, data_x, data_y, msg=True): self._cycle_cuts_alg(viewer, msg, direction='down') return True def kp_autocenter_toggle(self, viewer, event, data_x, data_y, msg=True): if self.canpan: msg = self.settings.get('msg_pan', msg) val = viewer.get_settings().get('autocenter') if val == 'off': val = 'on' else: val = 'off' viewer.set_autocenter(val) if msg: viewer.onscreen_message('Autocenter %s' % val, delay=1.0) return True def kp_autocenter_override(self, viewer, event, data_x, data_y, msg=True): if self.canpan: msg = self.settings.get('msg_pan', msg) viewer.set_autocenter('override') if msg: viewer.onscreen_message('Autocenter Override', delay=1.0) return True def kp_contrast_restore(self, viewer, event, data_x, data_y, msg=True): if self.cancmap: msg = self.settings.get('msg_cmap', msg) self.restore_contrast(viewer, msg=msg) return True def kp_flip_x(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == '[': flipx = not flipX else: flipx = False viewer.transform(flipx, flipY, swapXY) if msg: viewer.onscreen_message("Flip X=%s" % flipx, delay=1.0) return True def kp_flip_y(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == ']': flipy = not flipY else: flipy = False viewer.transform(flipX, flipy, swapXY) if msg: viewer.onscreen_message("Flip Y=%s" % flipy, delay=1.0) return True def kp_swap_xy(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == 'backslash': swapxy = not swapXY else: swapxy = False viewer.transform(flipX, flipY, swapxy) if msg: viewer.onscreen_message("Swap XY=%s" % swapxy, delay=1.0) return True def kp_transform_reset(self, viewer, event, data_x, data_y): if self.canflip: viewer.transform(False, False, False) return True def kp_dist(self, viewer, event, data_x, data_y, msg=True): self._cycle_dist(viewer, msg) return True def kp_dist_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_dist(viewer, msg) return True def kp_dist_prev(self, viewer, event, data_x, data_y, msg=True): self._cycle_dist(viewer, msg, direction='up') return True def kp_dist_next(self, viewer, event, data_x, data_y, msg=True): self._cycle_dist(viewer, msg, direction='down') return True def kp_cmap_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_cmap(viewer, msg) return True def kp_cmap_restore(self, viewer, event, data_x, data_y, msg=True): self.restore_colormap(viewer, msg) return True def kp_cmap_invert(self, viewer, event, data_x, data_y, msg=True): self._invert_cmap(viewer, msg) return True def kp_cmap_prev(self, viewer, event, data_x, data_y, msg=True): self._cycle_cmap(viewer, msg, direction='up') return True def kp_cmap_next(self, viewer, event, data_x, data_y, msg=True): self._cycle_cmap(viewer, msg, direction='down') return True def kp_toggle_cbar(self, viewer, event, data_x, data_y, msg=True): canvas = viewer.get_private_canvas() # canvas already has a color bar? objs = list(canvas.get_objects_by_kinds(('colorbar', 'drawablecolorbar'))) tf = (len(objs) == 0) viewer.show_color_bar(tf) return True def kp_imap_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_imap(viewer, msg) return True def kp_imap_prev(self, viewer, event, data_x, data_y, msg=True): self._cycle_imap(viewer, msg, direction='up') return True def kp_imap_next(self, viewer, event, data_x, data_y, msg=True): self._cycle_imap(viewer, msg, direction='down') return True def kp_rotate_reset(self, viewer, event, data_x, data_y): if self.canrotate: viewer.rotate(0.0) return True def kp_rotate_inc90(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._rotate_inc(viewer, 90.0, msg=msg) return True def kp_rotate_dec90(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._rotate_inc(viewer, -90.0, msg=msg) return True def kp_orient_lh(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._orient(viewer, righthand=False, msg=msg) return True def kp_orient_rh(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._orient(viewer, righthand=True, msg=msg) return True def kp_reset(self, viewer, event, data_x, data_y): self.reset(viewer) return True def _toggle_lock(self, viewer, mode_type): bm = viewer.get_bindmap() # toggle default mode type to locked/oneshot dfl_modetype = bm.get_default_mode_type() # get current mode mode_name, cur_modetype = bm.current_mode() if dfl_modetype in ('locked', 'softlock'): if mode_type == dfl_modetype: mode_type = 'oneshot' # install the lock type bm.set_default_mode_type(mode_type) bm.set_mode(mode_name, mode_type=mode_type) def kp_lock(self, viewer, event, data_x, data_y): self._toggle_lock(viewer, 'locked') return True def kp_softlock(self, viewer, event, data_x, data_y): self._toggle_lock(viewer, 'softlock') return True def kp_save_profile(self, viewer, event, data_x, data_y, msg=True): viewer.checkpoint_profile() if msg: viewer.onscreen_message("Profile saved", delay=0.5) return True ##### MOUSE ACTION CALLBACKS ##### ## def ms_none(self, viewer, event, data_x, data_y): ## return False ## def ms_cursor(self, viewer, event, data_x, data_y): ## return False ## def ms_wheel(self, viewer, event, data_x, data_y): ## return False ## def ms_draw(self, viewer, event, data_x, data_y): ## return False def ms_zoom(self, viewer, event, data_x, data_y, msg=True): """Zoom the image by dragging the cursor left or right. """ if not self.canzoom: return True msg = self.settings.get('msg_zoom', msg) x, y = self.get_win_xy(viewer) if event.state == 'move': self._zoom_xy(viewer, x, y) elif event.state == 'down': if msg: viewer.onscreen_message("Zoom (drag mouse L-R)", delay=1.0) self._start_x, self._start_y = x, y else: viewer.onscreen_message(None) return True def _scale_adjust(self, factor, event_amt, zoom_accel, max_limit=None): # adjust scale by factor, amount encoded in event and zoom acceleration value amount = factor - ((factor - 1.0) * (1.0 - min(event_amt, 15.0) / 15.0) * zoom_accel) amount = max(1.000000001, amount) if max_limit is not None: amount = min(amount, max_limit) return amount def ms_zoom_in(self, viewer, event, data_x, data_y, msg=False): """Zoom in one level by a mouse click. """ if not self.canzoom: return True if not (event.state == 'down'): return True with viewer.suppress_redraw: viewer.panset_xy(data_x, data_y) if self.settings.get('scroll_zoom_direct_scale', True): zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0) # change scale by 100% amount = self._scale_adjust(2.0, 15.0, zoom_accel, max_limit=4.0) self._scale_image(viewer, 0.0, amount, msg=msg) else: viewer.zoom_in() if hasattr(viewer, 'center_cursor'): viewer.center_cursor() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def ms_zoom_out(self, viewer, event, data_x, data_y, msg=False): """Zoom out one level by a mouse click. """ if not self.canzoom: return True if not (event.state == 'down'): return True with viewer.suppress_redraw: # TODO: think about whether it is the correct behavior to # set the pan position when zooming out #viewer.panset_xy(data_x, data_y) if self.settings.get('scroll_zoom_direct_scale', True): zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0) # change scale by 100% amount = self._scale_adjust(2.0, 15.0, zoom_accel, max_limit=4.0) self._scale_image(viewer, 180.0, amount, msg=msg) else: viewer.zoom_out() if hasattr(viewer, 'center_cursor'): viewer.center_cursor() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def ms_rotate(self, viewer, event, data_x, data_y, msg=True): """Rotate the image by dragging the cursor left or right. """ if not self.canrotate: return True msg = self.settings.get('msg_rotate', msg) x, y = self.get_win_xy(viewer) if event.state == 'move': self._rotate_xy(viewer, x, y) elif event.state == 'down': if msg: viewer.onscreen_message("Rotate (drag around center)", delay=1.0) self._start_x, self._start_y = x, y self._start_rot = viewer.get_rotation() else: viewer.onscreen_message(None) return True def ms_rotate_reset(self, viewer, event, data_x, data_y,
from pandac.PandaModules import loadPrcFileData loadPrcFileData('', 'win-size 640 480') # Window size loadPrcFileData('', 'win-fixed-size #t') # Window is a fixed size loadPrcFileData('', 'textures-auto-power-2 1') loadPrcFileData('', 'textures-power-2 up') loadPrcFileData('', 'load-file-type p3assimp') from direct.showbase.ShowBase import ShowBase from panda3d.core import CollisionTraverser, CollisionNode from panda3d.core import CollisionHandlerQueue, CollisionRay, TransparencyAttrib from panda3d.core import AmbientLight, DirectionalLight, Vec4, Vec3, Point2 from panda3d.core import CardMaker, Texture, PTAUchar, CPTAUchar, BitMask32 from panda3d.vision import ARToolKit from direct.showbase.DirectObject import DirectObject from direct.task.Task import Task from pieces import create_piece from config import * import Ray import multiprocessing import numpy as np import sys import cv2 import time class ChessboardDemo(ShowBase): def __init__(self): ShowBase.__init__(self) self.disableMouse() # Setting up webcam image self.webcam = Webcam() self.ar2 = ARToolKit.make(self.cam, "data/camera_para.dat", 1) self.cam.node().getDisplayRegion(0).setSort(20) # Creating the anchor to the marker self.anchor = self.render.attachNewNode("Anchor node") self.anchor.reparent_to(render) self.ar2.attachPattern("data/marker.patt", self.anchor) # Setting up lighting alight = AmbientLight('ambientLight') alight.setColor(Vec4(0.4, 0.4, 0.4, 1)) alightNP = render.attachNewNode(alight) dlight = DirectionalLight('directionalLight') dlight.setDirection(Vec3(-1, 1, -1)) alight.setColor(Vec4(0.4, 0.4, 0.4, 1)) dlightNP = render.attachNewNode(dlight) render.setLightOff() render.setLight(alightNP) render.setLight(dlightNP) # Setting up players self.humans = HUMANS self.ais = AIS self.ai_queue = multiprocessing.Queue() # 1 = white, -1 = black self.turn = 1 # 0 = no check, 1 = white, -1 = black self.check = 0 self.can_move = True self.gameover = False self.texture_black = self.loader.loadTexture(TEXTURE_BLACK) self.texture_white = self.loader.loadTexture(TEXTURE_WHITE) self.setup_collision() self.create_board() self.moves = self.get_valid_moves() # Currently highlighted square (list [x,y,z]) self.hiSq = False # Piece we are currently selecting self.dragging = False # Events taskMgr.add(self.update_webcam, 'cam') taskMgr.add(self.ai_move, 'ai') taskMgr.add(self.mouseover, 'mouseover') self.accept("mouse1", self.left_click) self.accept("mouse3", self.right_click) self.accept('escape', sys.exit) # Escape closes the window def create_board(self): # C++ object containing the actual chessboard self.board = Ray.Chess_AI(np.ascontiguousarray(np.array(BOARD)), self.turn, PAWN_2STEP) # Array containing the piece objects we are going to draw self.board_array = np.transpose(np.array(BOARD)) self.draw_pieces = np.full_like(self.board_array, None, dtype=np.object) self.draw_squares = np.zeros_like(self.board_array, dtype=np.object) max_x, max_y, max_z = BOARD_SIZE # Creates the 3D objects (from the file pieces.py) and the squares for z in range(max_z): for y in range(max_y): for x in range(max_x): if self.board_array[x,y,z] != 0: self.draw_pieces[x,y,z] = create_piece(self.board_array[x,y,z], [x,y,z], self) # Load, parent, color, and position the model (a single square polygon) self.draw_squares[x,y,z] = loader.loadModel("models/square") self.draw_squares[x,y,z].reparentTo(self.anchor) self.draw_squares[x,y,z].setScale(SCALE) self.draw_squares[x,y,z].setPos(square_position(x,y,z, BOARD_SIZE)) self.draw_squares[x,y,z].setColor(square_color(x,y,z)) # The bottom one is solid, the rest are a little translucid if z > 0: self.draw_squares[x,y,z].setTransparency(TransparencyAttrib.MAlpha) self.draw_squares[x,y,z].setAlphaScale(0.75) # Set the model itself to be collideable with the ray. self.draw_squares[x,y,z].find("**/polygon").node().setIntoCollideMask(BitMask32.bit(1)) # Set a tag on the square's node so we can look up what square this self.draw_squares[x,y,z].find("**/polygon").node().setTag('square', ','.join([str(dim) for dim in (x,y,z)])) def setup_collision(self): self.picker = CollisionTraverser() # Make a traverser self.pq = CollisionHandlerQueue() # Make a handler # Make a collision node for our picker ray self.pickerNode = CollisionNode('mouseRay') # Attach that node to the camera since the ray will need to be positioned # relative to it self.pickerNP = camera.attachNewNode(self.pickerNode) # Everything to be picked will use bit 1. This way if we were doing other # collision we could separate it self.pickerNode.setFromCollideMask(BitMask32.bit(1)) self.pickerRay = CollisionRay() # Add it to the collision node self.pickerNode.addSolid(self.pickerRay) # Register the ray as something that can cause collisions self.picker.addCollider(self.pickerNP, self.pq) def ai_move(self, task): if self.turn in self.ais and not self.gameover: if self.can_move is True: # Start the thinking process self.can_move = False recursions = 1 if len(self.moves) < 30: recursions = 2 if len(self.moves) < 12: recursions = 3 if TEST is True: print(f'doing {recursions} recursions') self.start = time.time() make_ai_think = multiprocessing.Process(target=ai, args=(self.board, self.ai_queue, recursions)) make_ai_think.start() else: # The AI function will put the move in this queue when it figures it out if not self.ai_queue.empty(): if TEST is True: print(f'Took {time.time()-self.start}.') piece, move = self.ai_queue.get() self.can_move = False self.move_pieces(piece, move) self.turn = 1 if self.turn == -1 else -1 new_array = np.ascontiguousarray(np.transpose(self.board_array)) self.board.set_board(new_array, self.turn) # This hides the check on the player's king if there was one self.hide_possible_moves() self.moves = self.get_valid_moves() self.can_move = True if TEST is True: print('AI moved') return Task.cont def update_webcam(self, task): self.can_get_image = False self.webcam_texture = self.webcam.step() self.ar2.analyze(self.webcam_texture) self.can_get_image = True return Task.cont def move_pieces(self, a, b, move_model=True): # Move the 3D model of the piece and update its square variable # Also delete the other one... if move_model is True: # If there is a piece on the new location if self.draw_pieces[b[0], b[1], b[2]] is not None: # We delete it self.draw_pieces[b[0], b[1], b[2]].obj.removeNode() # We move the piece to its new location self.draw_pieces[b[0], b[1], b[2]] = self.draw_pieces[a[0], a[1], a[2]] self.draw_pieces[b[0], b[1], b[2]].move([b[0], b[1], b[2]]) # Remove the piece from the old location self.draw_pieces[a[0], a[1], a[2]] = None # Move one to other's position self.board_array[b[0], b[1], b[2]] = self.board_array[a[0], a[1], a[2]] # Replace one's position with empty self.board_array[a[0], a[1], a[2]] = 0 def mouseover(self, task): # If we have a mouse if self.mouseWatcherNode.hasMouse(): mpos = self.mouseWatcherNode.getMouse() # Set the position of the ray based on the mouse position self.pickerRay.setFromLens(self.camNode, mpos.getX(), mpos.getY()) # Do the actual collision pass self.picker.traverse(self.anchor) if self.pq.getNumEntries() <= 0: if self.hiSq is not False: self.square_default_color(self.hiSq) self.hiSq = False else: # Sort the hits so the closest is first, and highlight that node self.pq.sortEntries() dims = self.pq.getEntry(0).getIntoNode().getTag('square').split(',') x,y,z = [int(dim) for dim in dims] # Remove highlight from previous square if self.hiSq is not False and self.hiSq != [x,y,z]: # Turn square back to its normal, non highlighted color self.square_default_color(self.hiSq) # Set the highlight on the current square self.draw_squares[x,y,z].setColor(HIGHLIGHT) self.hiSq = [x,y,z] return Task.cont def right_click(self): # Drop the piece if self.dragging is not False: # Hide the green/red squares showing where we can move self.hide_possible_moves() tmp = self.dragging self.dragging = False self.square_default_color(tmp) def left_click(self): if self.gameover is False and self.turn in self.humans: # MOVING SELECTED PIECE if self.dragging is not False: # If we have a piece selected and we are hovering over a square if self.hiSq is not False: # If the square we are clicking is a possible move, we move if (self.dragging, self.hiSq) in self.moves: self.can_move = False self.move_pieces(self.dragging, self.hiSq) self.turn = 1 if self.turn == -1 else -1 # Moving the object new_array = np.ascontiguousarray(np.transpose(self.board_array)) self.board.set_board(new_array, self.turn) self.hide_possible_moves() self.moves = self.get_valid_moves() self.can_move = True # Hide the green/red squares showing where we can move self.hide_possible_moves() # Drop the piece tmp = self.dragging self.dragging = False self.square_default_color(tmp) # SELECTING PIECE if self.hiSq is not False: # If we pick the piece of the side whose turn it is if self.turn * self.board_array[self.hiSq[0],self.hiSq[1],self.hiSq[2]] > 0: # Hide the old green/red squares showing where we could move self.hide_possible_moves() # Select it self.dragging = self.hiSq self.show_possible_moves() def get_valid_moves(self): moves = self.board.get_moves() check_found = self.board.is_in_check() if check_found is True: self.check = self.turn kings = np.argwhere(self.board_array == 6*self.turn) for king in kings: self.draw_squares[king[0], king[1], king[2]].setColor(HIGHLIGHT_ATTACK) if not moves: print('CHECKMATE') self.gameover = True for king in kings: self.draw_pieces[king[0], king[1], king[2]].obj.setColor(HIGHLIGHT_ATTACK) else: print('CHECK') else: self.check = 0 if not moves: self.gameover = True print('DRAW') return moves def square_default_color(self, pos): 'Colors a specific square' # If we have a piece selected if self.dragging is not False: # If it's a move by a selected piece, it's green or red if (self.dragging, pos) in self.moves: if self.board_array[pos[0], pos[1], pos[2]] == 0: self.draw_squares[pos[0], pos[1], pos[2]].setColor(HIGHLIGHT_MOVE) else: self.draw_squares[pos[0], pos[1], pos[2]].setColor(HIGHLIGHT_ATTACK) # If it's a selected piece, it's blue elif self.dragging == pos: self.draw_squares[pos[0], pos[1], pos[2]].setColor(HIGHLIGHT) # If it isn't then it's just black or white else: self.draw_squares[pos[0], pos[1], pos[2]].setColor(square_color(*pos)) # If we don't have a piece selected, it's just black or white else: self.draw_squares[pos[0], pos[1], pos[2]].setColor(square_color(*pos)) # Mark king in red if in check if self.check: if self.board_array[pos[0], pos[1], pos[2]]*self.turn == 6 and self.dragging != pos: self.draw_squares[pos[0], pos[1], pos[2]].setColor(HIGHLIGHT_ATTACK) def show_possible_moves(self): # Changes the color of the squares the selected piece can move to for piece, move in self.moves: if piece == [self.dragging[0],self.dragging[1],self.dragging[2]]: if self.board_array[move[0],move[1],move[2]]*self.turn < 0: self.draw_squares[move[0],move[1],move[2]].setColor(HIGHLIGHT_ATTACK) else: self.draw_squares[move[0],move[1],move[2]].setColor(HIGHLIGHT_MOVE) def hide_possible_moves(self): # When we unselect a piece, we remove the coloring from the squares we can move to for piece, move in self.moves: self.draw_squares[move[0],move[1],move[2]].setColor(square_color(*move)) class Webcam(DirectObject): def __init__(self): 'This object deals with obtaining the image from the webcam and processing it' base.setBackgroundColor(0.5,0.5,0.5) self.cap
<reponame>Dusk-Argentum/TomeSeeker-BETA<filename>bot.py """ Written by @zhu.exe#4211 (187421759484592128). """ import asyncio import os import aiohttp from discord.ext import commands from discord.ext.commands import CommandInvokeError from funcs import * OWNER_ID = "97153790897045504" PREFIX = "," # This is the prefix you call commands with in Discord. For example: ".help" will call the the "Help" command, but only if your prefix is ".". DESCRIPTION = "A bot to look up homebrew info from a internet source. Written by zhu.exe#4211, modified by Dusk-Argentum#6530 and silverbass#2407." # Keep this the same. TOKEN = os.environ["TOKEN"] UPDATE_DELAY = 600 # Delay is measured in seconds. "120" is 2 minutes, "360" is 6 minutes, "600" is 10 minutes, etc. discordping = 1 # This is where your sources go. EXAMPLE_CLASS_SOURCE = "" # Put your source URL between the quotes. Remember to use the RAW version if you're using GitHub. EXAMPLE_FEAT_SOURCE = "" EXAMPLE_ITEM_SOURCE = "" EXAMPLE_MONSTER_SOURCE = "" EXAMPLE_RACE_SOURCE = "" EXAMPLE_SPELL_SOURCE = "" # Don't worry if you don't use them all; you can leave any one blank as long as you don't call it later. # Source SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker-BETA/master/Sources.txt" # Misadventures In Lyyth Sources MIL_CLASS_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Misadventures%20in%20Lyyth/classes.txt" MIL_FEAT_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Misadventures%20in%20Lyyth/feats.txt" MIL_ITEM_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Misadventures%20in%20Lyyth/items.txt" MIL_MONSTER_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Misadventures%20in%20Lyyth/monsters.txt" MIL_RACE_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Misadventures%20in%20Lyyth/races.txt" MIL_SPELL_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Misadventures%20in%20Lyyth/spells.txt" # Planar Recovery And Improvement Mission Agency Sources PRIMA_CLASS_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Planar%20Recovery%20and%20Improvement%20Mission%20Agency/classes.txt" PRIMA_FEAT_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Planar%20Recovery%20and%20Improvement%20Mission%20Agency/feats.txt" PRIMA_ITEM_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Planar%20Recovery%20and%20Improvement%20Mission%20Agency/items.txt" PRIMA_MONSTER_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Planar%20Recovery%20and%20Improvement%20Mission%20Agency/monsters.txt" PRIMA_RACE_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Planar%20Recovery%20and%20Improvement%20Mission%20Agency/races.txt" PRIMA_SPELL_SOURCE = "https://raw.githubusercontent.com/Dusk-Argentum/TomeSeeker/master/Planar%20Recovery%20and%20Improvement%20Mission%20Agency/spells.txt" # Keep these the same if you're following the example sources. DIVIDER = "***" # a string that divides distinct items. IGNORED_ENTRIES = 1 # a number of entries to ignore (in case of an index, etc) META_LINES = 0 # the number of lines of meta info each feat has bot = commands.Bot(command_prefix=commands.when_mentioned_or(PREFIX), description=DESCRIPTION, pm_help=False) # Change "pm_help" to True if you want the help to be PMed instead of printed in the channel where the command is called. client = discord.Client() # Leave this alone. # If you ever decide to add more sources for different things, be sure to declare them here or else your bot will error out. # Source #sources = [] # MIL mil_classes = [] mil_feats = [] mil_items = [] mil_monsters = [] mil_races = [] mil_spells = [] # PRIMA prima_classes = [] prima_feats = [] prima_items = [] prima_monsters = [] prima_races = [] prima_spells = [] @bot.event async def on_ready(): # What happens in this block happens upon startup. Be sure to include code to update your sources here. async with aiohttp.ClientSession() as session: async with session.get(MIL_CLASS_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update MIL classes: {text}") raw_mil_classes = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for mil_class in raw_mil_classes: lines = mil_class.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in mil_classes if name.lower() == i["name"].lower()]: mil_classes.remove(dup) mil_classes.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed class {name} from MIL.") async with aiohttp.ClientSession() as session: async with session.get(MIL_FEAT_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update MIL feats: {text}") raw_mil_feats = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for mil_feat in raw_mil_feats: lines = mil_feat.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in mil_feats if name.lower() == i["name"].lower()]: mil_feats.remove(dup) mil_feats.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed feat {name} from MIL.") async with aiohttp.ClientSession() as session: async with session.get(MIL_ITEM_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update MIL items: {text}") raw_mil_items = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for mil_item in raw_mil_items: lines = mil_item.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in mil_items if name.lower() == i["name"].lower()]: mil_items.remove(dup) mil_items.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed item {name} from MIL.") async with aiohttp.ClientSession() as session: async with session.get(MIL_MONSTER_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update MIL monsters: {text}") raw_mil_monsters = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for mil_monster in raw_mil_monsters: lines = mil_monster.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in mil_monsters if name.lower() == i["name"].lower()]: mil_monsters.remove(dup) mil_monsters.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed monster {name} from MIL.") async with aiohttp.ClientSession() as session: async with session.get(MIL_RACE_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update MIL races: {text}") raw_mil_races = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for mil_race in raw_mil_races: lines = mil_race.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in mil_races if name.lower() == i["name"].lower()]: mil_races.remove(dup) mil_races.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed race {name} from MIL.") async with aiohttp.ClientSession() as session: async with session.get(MIL_SPELL_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update MIL spells: {text}") raw_mil_spells = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for mil_spell in raw_mil_spells: lines = mil_spell.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in mil_spells if name.lower() == i["name"].lower()]: mil_spells.remove(dup) mil_spells.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed spell {name} from MIL.") async with aiohttp.ClientSession() as session: async with session.get(PRIMA_CLASS_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update PRIMA classes: {text}") raw_prima_classes = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for prima_class in raw_prima_classes: lines = prima_class.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in prima_classes if name.lower() == i["name"].lower()]: prima_classes.remove(dup) prima_classes.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed class {name} from PRIMA.") async with aiohttp.ClientSession() as session: async with session.get(PRIMA_FEAT_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update PRIMA feats: {text}") raw_prima_feats = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for prima_feat in raw_prima_feats: lines = prima_feat.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in prima_feats if name.lower() == i["name"].lower()]: prima_feats.remove(dup) prima_feats.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed feat {name} from PRIMA.") async with aiohttp.ClientSession() as session: async with session.get(PRIMA_ITEM_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update PRIMA items: {text}") raw_prima_items = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for prima_item in raw_prima_items: lines = prima_item.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in prima_items if name.lower() == i["name"].lower()]: prima_items.remove(dup) prima_items.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed item {name} from PRIMA.") async with aiohttp.ClientSession() as session: async with session.get(PRIMA_MONSTER_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update PRIMA monsters: {text}") raw_prima_monsters = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for prima_monster in raw_prima_monsters: lines = prima_monster.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in prima_monsters if name.lower() == i["name"].lower()]: prima_monsters.remove(dup) prima_monsters.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed monster {name} from PRIMA.") async with aiohttp.ClientSession() as session: async with session.get(PRIMA_RACE_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update PRIMA races: {text}") raw_prima_races = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for prima_race in raw_prima_races: lines = prima_race.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in prima_races if name.lower() == i["name"].lower()]: prima_races.remove(dup) prima_races.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed race {name} from PRIMA.") async with aiohttp.ClientSession() as session: async with session.get(PRIMA_SPELL_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update PRIMA spells: {text}") raw_prima_spells = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for prima_spell in raw_prima_spells: lines = prima_spell.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in prima_spells if name.lower() == i["name"].lower()]: prima_spells.remove(dup) prima_spells.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed spell {name} from PRIMA.") await bot.change_presence(game=discord.Game(name="D&D 5e | .help"), status=discord.Status("online")) # This line sets the bot's presence upon startup. Change the prefix to match the one above, or change the whole message entirely. It's up to you. bot.loop.create_task(update_sources_loop()) await bot.change_presence(game=discord.Game(name="D&D 5e | ,help"), status=discord.Status("online")) # This line sets the bot's presence upon startup. Change the prefix to match the one above, or change the whole message entirely. It's up to you. bot.loop.create_task(update_sources_loop()) @bot.event # This sends errors when necessary. async def on_command_error(error, ctx): if isinstance(error, commands.CommandNotFound): return if isinstance(error, CommandInvokeError): error = error.original await bot.send_message(ctx.message.channel, error) @bot.event # This updates the sources at the interval mentioned at the beginning. async def update_sources_loop(): try: await bot.wait_until_ready() while not bot.is_closed: await update_sources() await asyncio.sleep(UPDATE_DELAY) except asyncio.CancelledError: pass async def update_sources(): # This is required to update your sources at a regular interval so you don't have to restart your bot/force an update via the ".update" command every time you add something new. Be sure to change everything to your sources. async with aiohttp.ClientSession() as session: async with session.get(MIL_CLASS_SOURCE) as resp: text = await resp.text() if 399 < resp.status < 600: raise Exception(f"Failed to update MIL classes: {text}") mil_classes.clear() raw_mil_classes = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:] for mil_class in raw_mil_classes: lines = mil_class.split("\n") name = lines[0].strip("# ") meta = "\n".join(lines[1::]) desc = "\n".join(lines) for dup in [i for i in mil_classes if name.lower() == i["name"].lower()]: mil_classes.remove(dup) mil_classes.append({"name": name, "meta": meta, "desc": desc}) print(f"Indexed class {name} from MIL.") async with aiohttp.ClientSession() as session: async with session.get(MIL_FEAT_SOURCE) as resp: text
self.fit_history['score'].append(score) self.fit_history['dep_params'].append( self.cov_struct.dep_params) # Don't exit until the association parameters have been # updated at least once. if del_params < ctol and num_assoc_updates > 0: break if self._do_cov_update and (itr % params_niter) == 0\ and (itr >= first_dep_update): self._update_assoc(mean_params) num_assoc_updates += 1 if del_params >= ctol: warnings.warn("Iteration limit reached prior to convergence", IterationLimitWarning) if mean_params is None: warnings.warn("Unable to estimate GEE parameters.", ConvergenceWarning) return None bcov, ncov, bc_cov, _ = self._covmat() if bcov is None: warnings.warn("Estimated covariance structure for GEE " "estimates is singular", ConvergenceWarning) return None if self.constraint is not None: mean_params, bcov = self._handle_constraint(mean_params, bcov) if mean_params is None: warnings.warn("Unable to estimate constrained GEE " "parameters.", ConvergenceWarning) return None scale = self.estimate_scale() # The superclass constructor will multiply the covariance # matrix argument bcov by scale, which we don't want, so we # divide bcov by the scale parameter here results = GEEResults(self, mean_params, bcov / scale, scale) results.covariance_type = covariance_type results.fit_history = self.fit_history results.naive_covariance = ncov results.robust_covariance_bc = bc_cov results.score_norm = del_params results.converged = (del_params < ctol) results.cov_struct = self.cov_struct return results def _handle_constraint(self, mean_params, bcov): """ Expand the parameter estimate `mean_params` and covariance matrix `bcov` to the coordinate system of the unconstrained model. Parameters: ----------- mean_params : array-like A parameter vector estimate for the reduced model. bcov : array-like The covariance matrix of mean_params. Returns: -------- mean_params : array-like The input parameter vector mean_params, expanded to the coordinate system of the full model bcov : array-like The input covariance matrix bcov, expanded to the coordinate system of the full model """ # The number of variables in the full model red_p = len(mean_params) full_p = self.constraint.lhs.shape[1] mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)] # Get the score vector under the full model. save_exog_li = self.exog_li self.exog_li = self.constraint.exog_fulltrans_li import copy save_cached_means = copy.deepcopy(self.cached_means) self.update_cached_means(mean_params0) _, score = self._update_mean_params() if score is None: warnings.warn("Singular matrix encountered in GEE score test", ConvergenceWarning) return None, None _, ncov1, _, cmat = self._covmat() scale = self.estimate_scale() cmat = cmat / scale**2 score2 = score[len(mean_params):] * scale amat = np.linalg.inv(ncov1) bmat_11 = cmat[0:red_p, 0:red_p] bmat_22 = cmat[red_p:, red_p:] bmat_12 = cmat[0:red_p, red_p:] amat_11 = amat[0:red_p, 0:red_p] amat_12 = amat[0:red_p, red_p:] score_cov = bmat_22 - \ np.dot(amat_12.T, np.linalg.solve(amat_11, bmat_12)) score_cov -= np.dot(bmat_12.T, np.linalg.solve(amat_11, amat_12)) score_cov += np.dot(amat_12.T, np.dot(np.linalg.solve(amat_11, bmat_11), np.linalg.solve(amat_11, amat_12))) from scipy.stats.distributions import chi2 score_statistic = np.dot(score2, np.linalg.solve(score_cov, score2)) score_df = len(score2) score_pvalue = 1 - chi2.cdf(score_statistic, score_df) self.score_test_results = {"statistic": score_statistic, "df": score_df, "p-value": score_pvalue} mean_params = self.constraint.unpack_param(mean_params) bcov = self.constraint.unpack_cov(bcov) self.exog_li = save_exog_li self.cached_means = save_cached_means self.exog = self.constraint.restore_exog() return mean_params, bcov def _update_assoc(self, params): """ Update the association parameters """ self.cov_struct.update(params) def _derivative_exog(self, params, exog=None, transform='dydx', dummy_idx=None, count_idx=None): """ For computing marginal effects returns dF(XB) / dX where F(.) is the predicted probabilities transform can be 'dydx', 'dyex', 'eydx', or 'eyex'. Not all of these make sense in the presence of discrete regressors, but checks are done in the results in get_margeff. """ #note, this form should be appropriate for ## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit if exog is None: exog = self.exog margeff = self.mean_deriv_exog(exog, params) # lpr = np.dot(exog, params) # margeff = (self.mean_deriv(exog, lpr) / exog) * params # margeff = np.dot(self.pdf(np.dot(exog, params))[:, None], # params[None,:]) if 'ex' in transform: margeff *= exog if 'ey' in transform: margeff /= self.predict(params, exog)[:, None] if count_idx is not None: from statsmodels.discrete.discrete_margins import ( _get_count_effects) margeff = _get_count_effects(margeff, exog, count_idx, transform, self, params) if dummy_idx is not None: from statsmodels.discrete.discrete_margins import ( _get_dummy_effects) margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform, self, params) return margeff def setup_ordinal(self): """ Restructure ordinal data as binary indicators so that they can be analysed using Generalized Estimating Equations. """ self.endog_orig = self.endog.copy() self.exog_orig = self.exog.copy() self.groups_orig = self.groups.copy() self.exog_names_orig = list(self.exog_names) # The unique outcomes, except the greatest one. self.endog_values = np.unique(self.endog) endog_cuts = self.endog_values[0:-1] ncut = len(endog_cuts) nrows = ncut * len(self.endog) exog = np.zeros((nrows, self.exog.shape[1]), dtype=self.exog.dtype) endog = np.zeros(nrows, dtype=self.endog.dtype) intercepts = np.zeros((nrows, ncut), dtype=np.float64) groups = np.zeros(nrows, dtype=self.groups.dtype) time = np.zeros((nrows, self.time.shape[1]), dtype=np.float64) offset = np.zeros(nrows, dtype=np.float64) jrow = 0 zipper = zip(self.exog, self.endog, self.groups, self.time, self.offset) for (exog_row, endog_value, group_value, time_value, offset_value) in zipper: # Loop over thresholds for the indicators for thresh_ix, thresh in enumerate(endog_cuts): exog[jrow, :] = exog_row endog[jrow] = (int(endog_value > thresh)) intercepts[jrow, thresh_ix] = 1 groups[jrow] = group_value time[jrow] = time_value offset[jrow] = offset_value jrow += 1 exog = np.concatenate((intercepts, exog), axis=1) icept_names = ["I(%s > %.0f)" % (self.endog_names, x) for x in endog_cuts] exog = pd.DataFrame(exog, columns=icept_names + self.exog_names) self.ordinal = True self._reset(endog, exog, groups, time=time, family=self.family, cov_struct=self.cov_struct, missing=self.missing, offset=offset, dep_data=self.dep_data, constraint=self.constraint) def setup_nominal(self): """ Restructure nominal data as binary indicators so that they can be analysed using Generalized Estimating Equations. """ self.endog_orig = self.endog.copy() self.exog_orig = self.exog.copy() self.groups_orig = self.groups.copy() self.exog_names_orig = list(self.exog_names) # The unique outcomes, except the greatest one. self.endog_values = np.unique(self.endog) endog_cuts = self.endog_values[0:-1] ncut = len(endog_cuts) nrows = len(endog_cuts) * self.exog.shape[0] ncols = len(endog_cuts) * self.exog.shape[1] exog = np.zeros((nrows, ncols), dtype=np.float64) endog = np.zeros(nrows, dtype=np.float64) groups = np.zeros(nrows, dtype=np.float64) time = np.zeros((nrows, self.time.shape[1]), dtype=np.float64) offset = np.zeros(nrows, dtype=np.float64) jrow = 0 zipper = zip(self.exog, self.endog, self.groups, self.time, self.offset) for (exog_row, endog_value, group_value, time_value, offset_value) in zipper: # Loop over thresholds for the indicators for thresh_ix, thresh in enumerate(endog_cuts): u = np.zeros(len(endog_cuts), dtype=np.float64) u[thresh_ix] = 1 exog[jrow, :] = np.kron(u, exog_row) endog[jrow] = (int(endog_value == thresh)) groups[jrow] = group_value time[jrow] = time_value offset[jrow] = offset_value jrow += 1 names = [] for v in self.endog_values[0:-1]: names.extend(["%s [%s]" % (name, v) for name in self.exog_names]) exog = pd.DataFrame(exog, columns=names) self.nominal = True self._reset(endog, exog, groups, time=time, family=self.family, cov_struct=self.cov_struct, missing=self.missing, offset=offset, dep_data=self.dep_data, constraint=self.constraint) class GEEResults(base.LikelihoodModelResults): ''' Class to contain GEE results. GEEResults inherits from statsmodels.LikelihoodModelResults Parameters ---------- See statsmodels.LikelihoodModelReesults Returns ------- **Attributes** naive_covariance : ndarray covariance of the parameter estimates that is not robust to correlation or variance misspecification robust_covariance_bc : ndarray covariance of the parameter estimates that is robust and bias reduced converged : bool indicator for convergence of the optimization. True if the norm of the score is smaller than a threshold covariance_type : string string indicating whether a "robust", "naive" or "bias_ reduced" covariance is used as default fit_history : dict Contains information about the iterations. fittedvalues : array Linear predicted values for the fitted model. dot(exog, params) model : class instance Pointer to GEE model instance that called fit. normalized_cov_params : array See GEE docstring params : array The coefficients of the fitted model. Note that interpretation of the coefficients often depends on the distribution family and the data. scale : float The estimate of the scale / dispersion for the model fit. See GEE.fit for more information. score_norm : float norm of the score at the end of the iterative estimation. bse : array The standard errors of the fitted GEE parameters. See Also -------- statsmodels.LikelihoodModelResults GEE ''' # Default covariance type covariance_type = "robust" def __init__(self, model, params, cov_params, scale): super(GEEResults, self).__init__(model, params, normalized_cov_params=cov_params, scale=scale) def standard_errors(self, covariance_type="robust"): """ This is a convenience function that returns the standard errors for any covariance type. The value of `bse` is the standard errors for whichever covariance type is specified as an argument to `fit` (defaults to "robust"). Arguments: ---------- covariance_type : string One of "robust", "naive", or "bias_reduced". Determines the covariance used to compute standard errors. Defaults to "robust". """ # Check covariance_type covariance_type = covariance_type.lower() allowed_covariances = ["robust", "naive", "bias_reduced"] if covariance_type not in allowed_covariances: msg = "GEE: `covariance_type` must be one of " +\ ", ".join(allowed_covariances) raise ValueError(msg) if covariance_type == "robust": return np.sqrt(np.diag(self.cov_params())) elif covariance_type == "naive": return np.sqrt(np.diag(self.naive_covariance)) elif covariance_type == "bias_reduced": return np.sqrt(np.diag(self.robust_covariance_bc)) # Need to override to allow for different covariance types. @cache_readonly def bse(self): return self.standard_errors(self.covariance_type) @cache_readonly def resid(self): """ Returns the residuals, the endogeneous
asn_descriptions.description, ' 'asn_assignments.start_hex, asn_assignments.next_start_hex ' 'FROM asn_descriptions JOIN asn_assignments ON ' 'asn_assignments.as_num = asn_descriptions.as_num ' 'WHERE num_type = ? AND start_hex >= ? AND next_start_hex <= ?') self.cursor.execute(sql, (num_type, lookup_start_hex, lookup_end_hex)) row = self.cursor.fetchall() if row: return row def _concatenate_and_write( self, records, write_function=None, record_filter=None, bits=32): netblocks = [] for row in records: try: start_hex, next_start_hex, record = \ long(row[0], 16), long(row[1], 16), str(row[2]) nb = bits - int(log(next_start_hex - start_hex, 2)) net = ipaddr.IPNetwork("%s/%d" % (ipaddr.IPAddress(start_hex), nb)) if callable(record_filter): record = record_filter(record) except ValueError: continue # Concatenate adjacent blocks of the same country if netblocks and netblocks[-1][1] == record: pn = netblocks[-1][0] nb = bits - int(log(int(net.network) + int(net.numhosts) - int(pn.network), 2)) netblocks[-1] = (ipaddr.IPNetwork("%s/%d" % (pn.network, nb)), record) # if the adjacent blocks aren't the same country, # write the last block out to csv and add the new block # to the list for possible concatenation elif netblocks: prev_n, prev_record = netblocks.pop() if write_function: write_function(prev_n, prev_record) netblocks.append((net, record)) # this is the base case else: netblocks.append((net, record)) def export_asn(self, filename, num_type): """ Export assignments to the CSV format used to build the geoip-database asn lookup """ sql = ('SELECT start_hex, next_start_hex, as_num ' 'FROM asn_assignments WHERE num_type = ? ORDER BY start_hex') self.cursor.execute(sql, (num_type,)) try: f = open(filename, 'w') except IOError: print("Unable to open %s" % filename) return def write_csv_line(network, asn): # XXX: wild guess f.write(""""%s","%s","%d","%d","%s"\n""" % (network.network, network.broadcast, int(network.network), int(network.broadcast), asn)) if num_type == 'ipv6': ip_bits = 128 elif num_type == 'ipv4': ip_bits = 32 else: return self._concatenate_and_write(self.cursor, write_function=write_csv_line, bits=ip_bits) f.close() def export_geoip(self, lookup, filename, num_type): """ Export assignments to the CSV format used to build the geoip-database package """ sql = ('SELECT start_hex, next_start_hex, country_code ' 'FROM assignments WHERE num_type = ? ORDER BY start_hex') self.cursor.execute(sql, (num_type,)) try: f = open(filename, 'w') except IOError: print("Unable to open %s" % filename) return def write_csv_line(network, country_code): country_name = lookup.get_name_from_country_code(country_code) if country_name: country_name = country_name.split( "#")[0].strip() # Drop comments f.write(""""%s","%s","%d","%d","%s","%s"\n""" % ( network.network, network.broadcast, int(network.network), int(network.broadcast), country_code, country_name)) if num_type == 'ipv6': ip_bits = 128 elif num_type == 'ipv4': ip_bits = 32 else: return self._concatenate_and_write(self.cursor, write_function=write_csv_line, record_filter=str.upper, bits=ip_bits) f.close() class DownloaderParser(object): def __init__(self, cache_dir, database_cache, user_agent, verbose=False): self.cache_dir = cache_dir self.database_cache = database_cache self.user_agent = user_agent self.verbose = verbose MAXMIND_URLS = """ http://geolite.maxmind.com/download/geoip/database/GeoIPCountryCSV.zip http://geolite.maxmind.com/download/geoip/database/GeoIPv6.csv.gz """ RIR_URLS = """ ftp://ftp.arin.net/pub/stats/arin/delegated-arin-extended-latest ftp://ftp.ripe.net/ripe/stats/delegated-ripencc-latest ftp://ftp.afrinic.net/pub/stats/afrinic/delegated-afrinic-latest ftp://ftp.apnic.net/pub/stats/apnic/delegated-apnic-latest ftp://ftp.lacnic.net/pub/stats/lacnic/delegated-lacnic-latest """ LIR_URLS = """ ftp://ftp.ripe.net/ripe/dbase/split/ripe.db.inetnum.gz ftp://ftp.ripe.net/ripe/dbase/split/ripe.db.inet6num.gz """ COUNTRY_CODE_URL = ("http://www.iso.org/iso/home/standards/country_codes/" "country_names_and_code_elements_txt-temp.htm") ASN_DESCRIPTION_URL = "http://www.cidr-report.org/as2.0/autnums.html" ASN_ASSIGNMENT_URLS = [ ('http://archive.routeviews.org/oix-route-views/' 'oix-full-snapshot-latest.dat.bz2'), ] def download_maxmind_files(self): """ Download all LIR delegation urls. """ for maxmind_url in self.MAXMIND_URLS.split(): self._download_to_cache_dir(maxmind_url) def download_rir_files(self): """ Download all RIR delegation files including md5 checksum. """ for rir_url in self.RIR_URLS.split(): rir_md5_url = rir_url + '.md5' self._download_to_cache_dir(rir_url) self._download_to_cache_dir(rir_md5_url) def download_lir_files(self): """ Download all LIR delegation urls. """ for lir_url in self.LIR_URLS.split(): self._download_to_cache_dir(lir_url) def download_country_code_file(self): """ Download and save the latest semicolon-separated open country codes file. """ self._download_to_cache_dir(self.COUNTRY_CODE_URL) def download_asn_description_file(self): """ Download and save the latest ASN to Name report from cidr-report.org""" self._download_to_cache_dir(self.ASN_DESCRIPTION_URL) def download_asn_assignment_files(self): """ Download and save the latest routing snapshots. """ for assignment_url in self.ASN_ASSIGNMENT_URLS: self._download_to_cache_dir(assignment_url) def _download_to_cache_dir(self, url): """ Fetch a resource (with progress bar) and store contents to the local cache directory under the file name given in the URL. """ if not os.path.exists(self.cache_dir): if self.verbose: print("Initializing the cache directory...") os.mkdir(self.cache_dir) filename = url.split('/')[-1] if self.verbose: print(url) req = Request(url) if self.user_agent: req.add_header('User-Agent', self.user_agent) # TODO Allow use of a proxy. # req.set_proxy(host, type) try: fetcher = urlopen(req) except URLError as err: msg = "An error occurred while attempting to cache file from:" print(("%s\n\t%s\n\t%s" % (msg, url, str(err)))) return length_header = fetcher.headers.get("Content-Length") expected_bytes = -1 if length_header: expected_bytes = int(length_header) print(("Fetching %d kilobytes" % round(float(expected_bytes / 1024), 2))) download_started = time.time() output_file = open(os.path.join(self.cache_dir, filename), "wb") received_bytes, seconds_elapsed = 0, 0 while True: seconds_elapsed = time.time() - download_started if expected_bytes >= 0: self._update_progress_bar(received_bytes, expected_bytes, seconds_elapsed) chunk = fetcher.read(1024) if len(chunk) == 0: if expected_bytes >= 0 and received_bytes != expected_bytes: print(("Expected %s bytes, only received %s" % (expected_bytes, received_bytes))) print("") break received_bytes += len(chunk) output_file.write(chunk) output_file.close() def _update_progress_bar(self, received_bytes, expected_bytes, seconds_elapsed): """ Write a progress bar to the console. """ if is_win32: rows = 100 # use some WinCon function for these? columns = 80 # but not really important. EOL = "\r" else: rows, columns = list(map(int, os.popen('stty size', 'r' ).read().split())) EOL = "\x1b[G" if seconds_elapsed == 0: seconds_elapsed = 1 percent_done = float(received_bytes) / float(expected_bytes) caption = "%.2f K/s" % (received_bytes / 1024 / seconds_elapsed) width = columns - 4 - len(caption) sys.stdout.write("[%s>%s] %s%s" % ( "=" * int(percent_done * width), "." * (width - int(percent_done * width)), caption, EOL)) sys.stdout.flush() def check_rir_file_mtimes(self): """ Return True if the mtime of any RIR file in our cache directory is > 24 hours, False otherwise. """ if not os.path.exists(self.cache_dir): return False for rir_url in self.RIR_URLS.split(): rir_path = os.path.join(self.cache_dir, rir_url.split('/')[-1]) if os.path.exists(rir_path): rir_stat = os.stat(rir_path) if (time.time() - rir_stat.st_mtime) > 86400: return True return False def verify_rir_files(self): """ Compute md5 checksums of all RIR files, compare them to the provided .md5 files, and return True if the two checksums match, or False otherwise. """ for rir_url in self.RIR_URLS.split(): rir_path = os.path.join(self.cache_dir, rir_url.split('/')[-1]) rir_md5_path = os.path.join(self.cache_dir, rir_url.split('/')[-1] + '.md5') if not os.path.exists(rir_md5_path) or \ not os.path.exists(rir_path): continue rir_md5_file = open(rir_md5_path, 'r') expected_checksum = rir_md5_file.read() rir_md5_file.close() if "=" in expected_checksum: expected_checksum = expected_checksum.split("=")[-1].strip() elif expected_checksum == "": if self.verbose: print("No checksum... skipping verification...") continue else: regex = re.compile("[a-f0-9]{32}") regres = regex.findall(expected_checksum) if len(regres) > 1: print("Error: mutiple checksum found") elif len(regres) < 1: print("Error: no checksum found") else: expected_checksum = regres[0] computed_checksum = "" rir_file = open(rir_path, 'rb') rir_data = rir_file.read() rir_file.close() computed_checksum = str(hashlib.md5(rir_data).hexdigest()) if expected_checksum != computed_checksum: print(("The computed md5 checksum of %s, %s, does *not* " "match the provided checksum %s!" % (rir_path, computed_checksum, expected_checksum))) def parse_maxmind_files(self, maxmind_urls=None): """ Parse locally cached MaxMind files and insert assignments to the local database cache, overwriting any existing MaxMind assignments. """ if not maxmind_urls: maxmind_urls = self.MAXMIND_URLS.split() self.database_cache.delete_assignments('maxmind') for maxmind_url in maxmind_urls: maxmind_path = os.path.join(self.cache_dir, maxmind_url.split('/')[-1]) if not os.path.exists(maxmind_path): print("Unable to find %s." % maxmind_path) continue if maxmind_path.endswith('.zip'): maxmind_zip_path = zipfile.ZipFile(maxmind_path) for contained_filename in maxmind_zip_path.namelist(): content = maxmind_zip_path.read(contained_filename) self._parse_maxmind_content(content, 'maxmind', 'maxmind') maxmind_zip_path.close() elif maxmind_path.endswith('.gz'): gzip_file = gzip.open(maxmind_path) content = gzip_file.read() self._parse_maxmind_content(content, 'maxmind', 'maxmind') gzip_file.close() self.database_cache.commit_changes() def import_maxmind_file(self, maxmind_path): self.database_cache.delete_assignments(maxmind_path) if not os.path.exists(maxmind_path): print("Unable to find %s." % maxmind_path) return with open(maxmind_path, 'r') as f: content = f.read() self._parse_maxmind_content(content, maxmind_path, maxmind_path) self.database_cache.commit_changes() def _parse_maxmind_content(self, content, source_type, source_name): keys = ['start_str', 'end_str', 'start_num', 'end_num', 'country_code', 'country_name'] for line in content.decode('utf-8').split('\n'): if len(line.strip()) == 0 or line.startswith("#"): continue line = line.replace('"', '').replace(' ', '').strip() parts = line.split(',') entry = dict((k, v) for k, v in zip(keys, parts)) start_num = int(entry['start_num']) end_num = int(entry['end_num']) country_code = str(entry['country_code']) start_ipaddr = ipaddr.ip_address(entry['start_str']) if isinstance(start_ipaddr, ipaddr.IPv4Address): num_type = 'ipv4' else: num_type = 'ipv6' self.database_cache.insert_assignment( start_num, end_num, num_type, country_code, source_type, source_name) def parse_rir_files(self, rir_urls=None): """ Parse locally cached RIR files and insert assignments to the local database cache, overwriting any existing RIR assignments. """ if not rir_urls: rir_urls = self.RIR_URLS.split() self.database_cache.delete_assignments('rir') keys = "registry country_code type start value date status" for rir_url in rir_urls: rir_path = os.path.join(self.cache_dir, rir_url.split('/')[-1]) if not os.path.exists(rir_path): print("Unable to find %s." % rir_path) continue rir_file = open(rir_path, 'r') for line in rir_file: if line.startswith("#"): continue entry = dict((k, v) for k, v in zip(keys.split(), line.strip().split("|"))) source_name = str(entry['registry']) country_code = str(entry['country_code']) if source_name.replace( ".", "", 1).isdigit() or country_code == "*": continue num_type = entry['type'] if num_type == 'asn': start_num = end_num = int(entry['start']) elif num_type == 'ipv4': start_num = int(ipaddr.IPv4Address(entry['start'])) end_num = start_num + int(entry['value']) - 1 elif num_type == 'ipv6': network_str = entry['start'] + '/' + entry['value'] network_ipaddr = ipaddr.IPv6Network(network_str) start_num = int(network_ipaddr.network_address) end_num = int(network_ipaddr.broadcast_address) self.database_cache.insert_assignment( start_num, end_num, num_type, country_code, 'rir', source_name) rir_file.close() self.database_cache.commit_changes() def parse_lir_files(self, lir_urls=None): """ Parse
data['buff']['3']['second_shell'], second_img='./base/other/etc_2.png', third_img='./base/other/etc_3.png', cutoff=8) # click back to mission board # open daily mission board self.make_sure_loaded('./base/other/daily.png', device, data['daily']['dms'], data['daily']['second_shell'], cutoff=8, shell_first=True, sleep_duration=0.5) claim() text = device.serial+': opened and claimed rewards (and exp/gold buff) on daily mission board for the first time' logging.info(text) print(text) # get game language im, device = self.update_cache(device) first_misison = crop(im, data['first mission']) image = filter(first_misison) text_lang = image_to_string(image).splitlines()[0].lower().replace('♀', '') while True: try: lang = detect(text_lang) break except: device.shell(data['daily']['second_shell']) slp(1) claim() slp(5) continue if lang == 'en' or lang == 'da' or lang == 'fr': lang = 'eng' elif lang == 'ja': lang = 'jpn' elif lang == 'vi': lang = 'vie' else: with open('./languages.json', encoding='utf-8') as j: langs = json.load(j) lang = None missions_ = [] langs_ = [] _langs_ = {} for lang__ in langs: langs_.append(lang__) for _lang_ in langs[lang__]: missions_.append(_lang_) _langs_[_lang_] = lang__ for lang__ in langs_: text_lang = image_to_string(image, lang__).splitlines()[0].lower().replace('♀', '') if lang__ == 'jpn': text_lang = text_lang.replace(' ', '') lang_ = extractOne(text_lang, missions_) print(lang_[1]) if lang_[1] > 85: lang = _langs_[lang_[0]] if lang is None: text = device.serial+': language not supported or cannot recognized (supported languages: english, japanese, vietnamese)' logging.info(text) print(text) if self.launched is not None: text = device.serial+': because launched from config so closing after done' logging.info(text) print(text) run_(path+f' quit --index {str(self.launched)}') exit() # check for undone missions not_done = [] not_done_ = [] count = 0 while True: im, device = self.update_cache(device) # get 4 visible missions on mission board visible_missions = [crop(im, data['first mission']), crop(im, data['second mission']), \ crop(im, data['third mission']), crop(im, data['fourth mission'])] if not_done_ == not_done: if count >= 20: self.weekly(device, data) if self.gb_cf['mails'] == True: self.mails(device, data) if self.gb_cf['loh'] == True: re = self.loh(device, data, lang) if re != 'success': text = device.serial+': loh not enough currency or unavailable' logging.info(text) print(text) text = device.serial+': all avalible missions has been completed, script ended' logging.info(text) print(text) if self.launched is not None: text = device.serial+': because launched from config so closing after done' logging.info(text) print(text) run_(path+f' quit --index {str(self.launched)}') exit() count+=1 not_done_ = not_done count_ = 0 for mission in visible_missions: pil_image = mission text = image_to_string(pil_image, lang).splitlines()[0].lower().replace('♀', '') if text == ' ': img = filter(pil_image) text = image_to_string(img, lang).splitlines()[0].lower().replace('♀', '') re = self.do_mission(text, device, data['shortcut'][str(count_)], data, size_, lang) if re == 'not': if text not in not_done: not_done.append(text) else: self.make_sure_loaded('./base/other/daily.png', device, data['daily']['dms'], data['daily']['shell'], cutoff=8) claim() logging.info(device.serial+': opened and claimed rewards on daily mission board') break count_+=1 def do_mission(self, mission, device, pos, data, res, lang): with open('./languages.json', encoding='utf-8') as j: lang_data = json.load(j)[lang] lst = [] for name in lang_data: lst.append(name) ext = extractOne(mission, lst) re = lang_data[ext[0]] if re == 'dragon': if self.gb_cf['dragon'] == False: return 'not' if self.dragon_ == True: return 'not' return self.dragon(device, pos, data, lang) elif re == 'friendship': if self.gb_cf['friendship'] == False: return 'not' if self.friendship_ == True: return 'not' return self.friendship(device, pos, data) elif re == 'inn': if self.gb_cf['inn'] == False: return 'not' if self.inn_ == True: return 'not' return self.inn(device, pos, data) elif re == 'lov': if self.gb_cf['lov'] == False: return 'not' if self.lov_ == True: return 'not' return self.lov(device, pos, data) elif re == 'shop': if self.gb_cf['shop'] == False: return 'not' if self.shop_ == True: return 'not' return self.shop(device, pos, data) elif re == 'stockage': if self.gb_cf['stockage'] == False: return 'not' if self.stockage_ == True: return 'not' return self.stockage(device, pos, data) elif re == 'tower': if self.gb_cf['tower'] == False: return 'not' if self.tower_ == True: return 'not' return self.tower(device, pos, data, lang) elif re == 'wb': if self.gb_cf['wb'] == False: return 'not' if self.wb_ == True: return 'not' return self.wb(device, pos, data) elif re == 'lil': if self.gb_cf['lil'] == False: return 'not' if self.lil_ == True: return 'not' return self.lil(device, pos, data, res) elif re == 'dungeons': return 'not' elif re == 'stamina': return 'not' elif re == 'login': return 'not' def dragon(self, device, position, data, lang): print(device.serial+': hunting dragon...') logging.info(device.serial+': hunting dragon') # click mission shortcut shortcut = self.make_sure_loaded('./base/dragon/raid_list.png', device, data['dragon']['1']['dms'], data['dragon']['1']['shell']+position, cutoff=20, loop=20, sleep_duration=10) if shortcut == 'loop': self.dragon_ = True return 'not' logging.info(device.serial+': loaded from mission shortcut') # click create red dragon raid self.make_sure_loaded('./base/dragon/red_dra.png', device, data['dragon']['2']['dms'], data['dragon']['2']['shell']) logging.info(device.serial+': clicked create dragon raid') with open('./languages.json', encoding='utf-8') as j: dragon_text = json.load(j)[lang]['dragon'] # change hard level to t6 stage 1 while True: im, device = self.update_cache(device) pil_image = crop(im, data['dragon']['3']['dms']) img = filter(pil_image) text = image_to_string(img, lang).replace('♀', '') if lang == 'jpn': text = text.replace(' ', '') text_ = text.splitlines()[0].lower().replace(' ', '') if SequenceMatcher(None, dragon_text, text_).ratio() > 0.9: device.shell(data['dragon']['3']['shell']) break else: device.shell(data['dragon']['4']['shell']) logging.info(device.serial+': changed to dragon t6 stage 1') # click single raid self.make_sure_loaded('./base/dragon/single_raid.png', device, data['dragon']['5']['dms'], data['dragon']['5']['shell'], shell_first=True) logging.info(device.serial+': clicked single raid') # click enter raid self.make_sure_loaded('./base/dragon/party.png', device, data['dragon']['6']['dms'], data['dragon']['6']['shell'], sleep_duration=0.5, cutoff=20) logging.info(device.serial+': clicked enter raid') # check avalible party # slot 1 self.make_sure_loaded('./base/dragon/party_4.png', device, data['dragon']['7']['dms'], data['dragon']['7']['shell'], oposite=True, sleep_duration=1) # slot 2 self.make_sure_loaded('./base/dragon/party_3.png', device, data['dragon']['8']['dms'], data['dragon']['8']['shell'], oposite=True, sleep_duration=1) # slot 3 self.make_sure_loaded('./base/dragon/party_2.png', device, data['dragon']['9']['dms'], data['dragon']['9']['shell'], oposite=True, sleep_duration=1) # slot 4 self.make_sure_loaded('./base/dragon/party_1.png', device, data['dragon']['10']['dms'], data['dragon']['10']['shell'], oposite=True, sleep_duration=1) # slot 5 self.make_sure_loaded('./base/dragon/party_6.png', device, data['dragon']['11']['dms'], data['dragon']['11']['shell'], oposite=True, sleep_duration=1) # slot 6 self.make_sure_loaded('./base/dragon/party_5.png', device, data['dragon']['12']['dms'], data['dragon']['12']['shell'], oposite=True, sleep_duration=1) logging.info(device.serial+': checked all avalible slots') # click start battle self.make_sure_loaded('./base/dragon/battle.png', device, data['dragon']['13']['dms'], data['dragon']['13']['shell'], cutoff=30) logging.info(device.serial+': clicked start battle') # wait until finish self.make_sure_loaded('./base/dragon/end.png', device, data['dragon']['14']['dms'], sleep_duration=15, cutoff=10, ck=False, loop=4) logging.info(device.serial+': battle completed') # click exit battle self.make_sure_loaded('./base/dragon/party.png', device, data['dragon']['15']['dms'], data['dragon']['15']['shell'], sleep_duration=0.5) logging.info(device.serial+': exited battle') # click exit self.make_sure_loaded('./base/dragon/my_info.png', device, data['dragon']['16']['dms'], data['dragon']['16']['shell'], sleep_duration=0.5) device.shell(data['dragon']['17']['shell']) logging.info(device.serial+': successfully did dragon mission') self.dragon_ = True return 'success' def friendship(self, device, position, data): print(device.serial+': exchanging friendship points...') logging.info(device.serial+': exchanging friendship points') # click mission shortcut shortcut = self.make_sure_loaded('./base/friendship/friends.png', device, data['friendship']['1']['dms'], data['friendship']['1']['shell']+position, loop=20, cutoff=20, sleep_duration=10) if shortcut == 'loop': self.friendship_ = True return 'not' logging.info(device.serial+': loaded from mission shortcut') # click exchange friendship points self.make_sure_loaded('./base/friendship/exchange.png', device, data['friendship']['2']['dms'], data['friendship']['2']['shell'], cutoff=10, shell_first=True, loop=30) logging.info(device.serial+': clicked exchange friendship points') # click exit self.make_sure_loaded('./base/friendship/my_info.png', device, data['friendship']['3']['dms'], data['friendship']['3']['shell'], sleep_duration=0.5) device.shell(data['friendship']['4']['shell']) logging.info(device.serial+': successfully did friendship mission') self.friendship_ = True return 'success' def inn(self, device, position, data): print(device.serial+': doing stuffs in inn...') logging.info(device.serial+': doing stuffs in inn') # click mission shortcut shortcut = self.make_sure_loaded('./base/inn/visit_inn.png', device, data['inn']['1']['dms'], data['inn']['1']['shell']+position, cutoff=20, loop=20, sleep_duration=10) if shortcut == 'loop': self.inn_ = True return 'not' logging.info(device.serial+': loaded from mission shortcut') # open inn self.make_sure_loaded('./base/inn/inn.png', device, data['inn']['2']['dms'], data['inn']['2']['shell'], second_img='./base/inn/inn_.png', cutoff=15) logging.info(device.serial+': opened inn') # give gifts def gift(): slp(2) self.make_sure_loaded('./base/inn/greet.png', device, data['inn']['3']['dms'], data['inn']['3']['shell'], second_shell=data['inn']['2']['shell'], cutoff=10, \ second_img='./base/inn/greet_.png', third_img='./base/inn/greet__.png', loop=5, shell_first=True) self.make_sure_loaded('./base/inn/start_conversation.png', device, data['inn']['4']['dms'], data['inn']['4']['shell'], second_shell=data['inn']['2']['shell'], cutoff=10, \ second_img='./base/inn/start_conversation_.png', third_img='./base/inn/start_conversation__.png', loop=5, shell_first=True) self.make_sure_loaded('./base/inn/send_gift.png', device, data['inn']['5']['dms'], data['inn']['5']['shell'], second_shell=data['inn']['2']['shell'], cutoff=10, \ second_img='./base/inn/send_gift_.png', third_img='./base/inn/send_gift__.png', loop=5, shell_first=True) # choose hero in inn def choose_hero(tap1, tap2): self.make_sure_loaded('./base/inn/inn.png', device, data['inn']['6']['dms'], data['inn']['6']['shell']+str(tap1)+' '+str(tap2), shell_first=True, second_img='./base/inn/inn_.png', cutoff=25, second_shell=data['inn']['2']['shell']) # give gifts to first hero gift() logging.info(device.serial+': gave gifts to first hero') # give gifts to second hero choose_hero(data['inn']['7']['shell'][0], data['inn']['7']['shell'][1]) gift() logging.info(device.serial+': gave gifts to second hero') # give gifts to third hero choose_hero(data['inn']['8']['shell'][0], data['inn']['8']['shell'][1]) gift() logging.info(device.serial+': gave gifts to third hero') # give gifts to fourth hero choose_hero(data['inn']['9']['shell'][0], data['inn']['9']['shell'][1]) gift() logging.info(device.serial+': gave gifts to fourth hero') # give gifts to fifth hero choose_hero(data['inn']['10']['shell'][0], data['inn']['10']['shell'][1]) gift() logging.info(device.serial+': gave gifts to fifth hero') # give gifts to sixth hero choose_hero(data['inn']['11']['shell'][0], data['inn']['11']['shell'][1]) gift() logging.info(device.serial+': gave gifts to sixth hero') # click 'Mini Game' count = 0 while True: if count == 6: break self.make_sure_loaded('./base/inn/mini_game.png', device, data['inn']['12']['dms'], data['inn']['12']['shell']) slp(0.5) device.shell(data['inn']['13']['shell']) slp(0.5) self.make_sure_loaded('./base/inn/inn.png', device, data['inn']['14']['dms'], data['inn']['14']['shell'], cutoff=20, second_img='./base/inn/inn_.png') slp(1) count+=1 logging.info(device.serial+': played minigames') # click exit self.make_sure_loaded('./base/inn/visit_inn.png', device, data['inn']['15']['dms'], data['inn']['15']['shell'], cutoff=20, sleep_duration=3) self.make_sure_loaded('./base/inn/my_info.png', device, data['inn']['16']['dms'], data['inn']['16']['shell'], sleep_duration=0.5) device.shell(data['inn']['17']['shell']) logging.info(device.serial+': successfully did some stuffs in inn mission') self.inn_ = True return 'success' def lov(self, device, position, data): print(device.serial+': suiciding in lov...') logging.info(device.serial+': suiciding in lov') # click mission shortcut shortcut = self.make_sure_loaded('./base/lov/arena.png', device, data['lov']['1']['dms'], data['lov']['1']['shell']+position, loop=20, cutoff=20, sleep_duration=10) if shortcut == 'loop': self.lov_ = True return 'not' logging.info(device.serial+': loaded from mission shortcut') # click select arena self.make_sure_loaded('./base/lov/arenas.png', device,
''' TIEGCM Kamodo reader, adapted to new structure for satellite flythrough software Initial version - <NAME> (?) Initial version of model_varnames contributed by <NAME> New code: <NAME> (June 2021 and on) NOTE: The current logic for variables that depend on imlev slices off self._imlev coordinate This only works because there is one variable that depends on imlev: H_imlev The logic on lines 311-313 will have to be reworked a bit if other variables depend on imlev later. Remaining tasks: - check variable dictionary for proper naming, and use of units in Kamodo ''' from numpy import vectorize from datetime import datetime, timezone, timedelta ### Make a dict of the possible variable names in TIEGCM ### the intended format is: "Output VarName":['Latex_Varname', 'Latex_Unit' ] ### The constituent species are output in units of mass mixing ratio (mmr). ### Denoted by \psi_i, mmr is the fractional contribution of ### a species i to the total mass density \rho_{total}, and ### is calculated as \psi_i = \rho_i / \rho_{total}, where ### \rho_i is the mass density of a constituent species. model_varnames={ ### 4D Variables, vertical coordinate on midpoint levels (lev) "ZGMID" : ["H_ilev",'variable description',0,'GDZ','sph',['time','lon','lat','ilev'],"cm"], # geometric height- interpolated to the mid points "TN" : ["T_n",'variable description',1,'GDZ','sph',['time','lon','lat','ilev'],"K"], # neutral temperature "O2" : ["psi_O2",'variable description',2,'GDZ','sph',['time','lon','lat','ilev'],""], # molecular oxygen, mmr "O1" : ["psi_O",'variable description',3,'GDZ','sph',['time','lon','lat','ilev'],""], # atomic oxygen , mmr "N2" : ["psi_N2",'variable description',4,'GDZ','sph',['time','lon','lat','ilev'],""], # molecular nitrogen,mmr "HE" : ["psi_He",'variable description',5,'GDZ','sph',['time','lon','lat','ilev'],""], # helium , mmr "NO" : ["psi_NO",'variable description',6,'GDZ','sph',['time','lon','lat','ilev'],""], # nitric oxide , mmr "N4S" : ["psi_N4S",'variable description',7,'GDZ','sph',['time','lon','lat','ilev'],""], # N4S ?,mmr "N2D" : ["psi_N2D", 'variable description',8,'GDZ','sph',['time','lon','lat','ilev'],""], # N(2D) mmr "TE" : ["T_e",'variable description',9,'GDZ','sph',['time','lon','lat','ilev'],"K"], # ELECTRON TEMPERATURE, "TI" : ["T_i",'variable description',10,'GDZ','sph',['time','lon','lat','ilev'],"K"], # ION TEMPERATURE "O2P" : ["N_O2plus",'variable description',11,'GDZ','sph',['time','lon','lat','ilev'],"1/cm**3"], # O2+ ION "OP" : ["N_Oplus",'variable description',12,'GDZ','sph',['time','lon','lat','ilev'],"1/cm**3"], # O+ ION "N2N" : ["N_N2",'variable description',13,'GDZ','sph',['time','lon','lat','ilev'],"1/cm**3"], # molecular nitrogen (maybe number density),mmr "CO2_COOL" : ["Q_CO2cool",'variable description',14,'GDZ','sph',['time','lon','lat','ilev'],"erg/g/s"], # CO2 cooling rates "NO_COOL" : ["Q_NOcool",'variable description',15,'GDZ','sph',['time','lon','lat','ilev'],"erg/g/s"], # NO cooling rates "UN" : ["u_n",'variable description',16,'GDZ','sph',['time','lon','lat','ilev'],"cm/s"], # neutral ZONAL wind (+EAST) "VN" : ["v_n",'variable description',17,'GDZ','sph',['time','lon','lat','ilev'],"cm/s"], # neutral MERIDIONAL wind (+NORTH) "O2P_ELD" : ['O2P_ELD','variable description',18,'GDZ','sph',['time','lon','lat','ilev'],''], #NO DESCRIPTION GIVEN "N2P_ELD" :['N2P_ELD','variable description',19,'GDZ','sph',['time','lon','lat','ilev'],''], #NO DESCRIPTION GIVEN "NPLUS" :['N_Nplus','variable description',20,'GDZ','sph',['time','lon','lat','ilev'],'1/cm**3'], #GUESS ONLY based on other number densities "NOP_ELD" :['NOP_ELD','variable description',21,'GDZ','sph',['time','lon','lat','ilev'],''], #NO DESCRIPTION GIVEN "SIGMA_PED" :['Sigma_P','variable description',22,'GDZ','sph',['time','lon','lat','ilev'],'S/m'], #Pedersen Conductivity "SIGMA_HAL" :['Sigma_H','variable description',23,'GDZ','sph',['time','lon','lat','ilev'],'S/m'], #Hall Conductivity "QJOULE" :['Q_Joule','variable description',24,'GDZ','sph',['time','lon','lat','ilev'],'erg/g/s'], #Joule Heating "O_N2" :['psi_ON2','variable description',25,'GDZ','sph',['time','lon','lat','ilev'],''], #O/N2 RATIO "N2D_ELD" :['N2D_ELD','variable description',26,'GDZ','sph',['time','lon','lat','ilev'],''], #NO DESCRIPTION GIVEN "O2N" :['r_OtoN','variable description',27,'GDZ','sph',['time','lon','lat','ilev'],'1/cm**3'], #GUESS ONLY # ### 4D Variables, vertical coordinate on interface levels (ilev) "DEN" :["rho",'variable description',28,'GDZ','sph',['time','lon','lat','ilev1'],"g/cm**3"], # total neutral mass density "ZG" :["H_ilev1",'variable description',29,'GDZ','sph',['time','lon','lat','ilev1'],"cm"], # geometric height "Z" :["H_geopot",'variable description',30,'GDZ','sph',['time','lon','lat','ilev1'],"cm"], # geopotential height (cm) "NE" : ["N_e",'variable description',31,'GDZ','sph',['time','lon','lat','ilev1'],"1/cm**3"], # ELECTRON DENSITY "OMEGA" : ["omega",'variable description',32,'GDZ','sph',['time','lon','lat','ilev1'],"1/s"], # VERTICAL MOTION "POTEN" : ["V",'variable description',33,'GDZ','sph',['time','lon','lat','ilev1'],"V"], # ELECTRIC POTENTIAL "UI_ExB" : ["u_iExB",'variable description',34,'GDZ','sph',['time','lon','lat','ilev1'],'cm/s'], #Zonal ExB Velocity "VI_ExB" :["v_iExB",'variable description',35,'GDZ','sph',['time','lon','lat','ilev1'],'cm/s'], #Meridional ExB Velocity "WI_ExB" :["w_iExB", 'variable description',36,'GDZ','sph',['time','lon','lat','ilev1'], 'cm/s'], #Vertical ExB Velocity ### 4D Variables, vertical coordinate on interface mag levels (imlev) "ZMAG" : ["H_mag",'variable description',37,'MAG','sph',['time','mlon','mlat','milev'],"km"], # Geopotential Height on Geomagnetic Grid # ### 3D Variables, (time, lat, lon) "TEC" : ["TEC",'variable description',38,'GDZ','sph',['time','lon','lat'],"1/cm**2"], # Total Electron Content "TLBC" : ["T_nLBC",'variable description',39,'GDZ','sph',['time','lon','lat'],"K"], # Lower boundary condition for TN "ULBC" : ["u_nLBC",'variable description',40,'GDZ','sph',['time','lon','lat'],"cm/s"], # Lower boundary condition for UN "VLBC" : ["v_nLBC",'variable description',41,'GDZ','sph',['time','lon','lat'],"cm/s"], # Lower boundary condition for VN "TLBC_NM" : ["T_nLBCNM",'variable description',42,'GDZ','sph',['time','lon','lat'],"K"], # Lower boundary condition for TN (TIME N-1) "ULBC_NM" : ["u_nLBCNM",'variable description',43,'GDZ','sph',['time','lon','lat'],"cm/s"], # Lower boundary condition for UN (TIME N-1) "VLBC_NM" : ["v_nLBCNM",'variable description',44,'GDZ','sph',['time','lon','lat'],"cm/s"], # Lower boundary condition for VN (TIME N-1) "QJOULE_INTEG":["W_Joule",'variable description',45,'GDZ','sph',['time','lon','lat'],'erg/cm**2/s'], #Height-integrated Joule Heating "EFLUX" :['Eflux_aurora','variable description',46,'GDZ','sph',['time','lon','lat'],'erg/cm**2/s'], #Aurora Energy Flux "HMF2" :['HmF2','variable description',47,'GDZ','sph',['time','lon','lat'],'km'], # Height of the F2 Layer "NMF2" :['NmF2','variable description',48,'GDZ','sph',['time','lon','lat'],'1/cm**3'], #Peak Density of the F2 Layer } #####-------------------------------------------------------------------------------------- ##### Define some helpful functions for dealing with time systems def dts_to_ts(file_dts): '''Get datetime timestamp in UTC from datetime string''' return datetime.timestamp(datetime.strptime(file_dts, '%Y-%m-%d %H:%M:%S' ).replace(tzinfo=timezone.utc)) def year_mtime_todt0(year, mtime): #self.filedate '''Convert year and day to datetime object in UTC at midnight''' day, hour, minute = mtime #unpack mtime values return datetime(int(year),1,1).replace(tzinfo=timezone.utc)+\ timedelta(days=int(day-1)) def year_mtime_todt(year, mtime): '''Convert year and [day,hour,minute] to datetime object in UTC''' day, hour, minute = mtime #unpack mtime values return datetime(int(year),1,1).replace(tzinfo=timezone.utc)+\ timedelta(days=int(day-1),hours=int(hour),minutes=int(minute)) def year_mtime_todts(year, mtime): '''Convert year and mtime to a datetime string''' return datetime.strftime(year_mtime_todt(year, mtime), '%Y-%m-%d %H:%M:%S') def year_mtime_todate(year, mtime): '''Use year and mtime to determine the date in the file. Returns a datetime object.''' date_string = datetime.strftime(year_mtime_todt(year, mtime), '%Y-%m-%d') #'YYYY-MM-DD' return datetime.strptime(date_string, '%Y-%m-%d').replace(tzinfo=timezone.utc) @vectorize def year_mtime_tohrs(year, day, hour, minute, filedate): '''Convert year and mtime to hours since midnight using predetermined datetime object.''' mtime = [day, hour, minute] return (year_mtime_todt(year, mtime)-filedate).total_seconds()/3600. def ts_to_hrs(time_val, filedate): '''Convert utc timestamp to hours since midnight on filedate.''' return (datetime.utcfromtimestamp(time_val).replace(tzinfo=timezone.utc)-filedate).total_seconds()/3600. def MODEL(): from time import perf_counter from os.path import basename from numpy import zeros, transpose, array, append, insert, where, unique from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum from numpy import pi as nppi from netCDF4 import Dataset from kamodo import Kamodo #print('KAMODO IMPORTED!') from kamodo.readers.reader_utilities import regdef_3D_interpolators, regdef_4D_interpolators class MODEL(Kamodo): '''TIEGCM model data reader.''' def __init__(self, full_filename, variables_requested=[], runname="noname", filetime=False, verbose=False, gridded_int=True, printfiles=False, fulltime=True, **kwargs): #filename should include the full path #### Use a super init so that your class inherits any methods from Kamodo super(MODEL, self).__init__() #store time information for satellite flythrough layer to choose the right file t0 = perf_counter() filename = basename(full_filename) file_dir = full_filename.split(filename)[0] cdf_data = Dataset(full_filename, 'r') #calculate time information year = array(cdf_data.variables['year']) mtime = array(cdf_data.variables['mtime']) day, hour, minute = mtime.T #only matters for the vectorized function self.filedate = year_mtime_todt0(year[0], mtime[0]) #datetime object for file date at midnight UTC self.datetimes = [year_mtime_todts(y, m) for y, m \ in zip([year[0], year[-1]],[mtime[0],mtime[-1]])] #strings in format = YYYY-MM-DD HH:MM:SS self.filetimes=[dts_to_ts(file_dts) for file_dts in self.datetimes] #timestamps in UTC time = year_mtime_tohrs(year, day, hour, minute, self.filedate) #time = array([year_mtime_tohrs(y, m, self.filedate) for y, m in \ # zip(year, mtime)]) #hours since midnight of self.filedate self.dt = diff(time).max()*3600. #time is in hours if filetime and not fulltime: #(used when searching for neighboring files below) return #return times as is to prevent recursion #if variables are given as integers, convert to standard names if len(variables_requested)>0: if isinstance(variables_requested[0], int): tmp_var = [value[0] for key, value in model_varnames.items()\ if value[2] in variables_requested] variables_requested = tmp_var if fulltime: #add boundary time (default value) #find other files with same pattern from glob import glob file_pattern = file_dir+'s*.nc' #returns a string for tiegcm files = sorted(glob(file_pattern)) filenames = unique([basename(f) for f in files]) #find closest file by utc timestamp #tiegcm has an open time at the beginning, so need an end time from the previous file #files are automatically sorted by YYMMDD, so previous file is previous in the list current_idx = where(filenames==filename)[0] if current_idx==0: print('No earlier file available.') filecheck = False if filetime: return else: min_filename = file_dir+filenames[current_idx-1][0] #-1 for adding a beginning time kamodo_test = MODEL(min_filename, filetime=True, fulltime=False) time_test = abs(kamodo_test.filetimes[1]-self.filetimes[0]) if time_test<=self.dt: #if nearest file time at least within one timestep (hrs) filecheck = True #time only version if returning time for searching if filetime: kamodo_neighbor = MODEL(min_filename, fulltime=False, filetime=True) self.datetimes[0] = kamodo_neighbor.datetimes[1] self.filetimes[0] = kamodo_neighbor.filetimes[1] return #return object with additional time (for SF code) #get kamodo object with same requested variables to add to each array below if verbose: print(f'Took {perf_counter()-t0:.3f}s to find closest file.') kamodo_neighbor = MODEL(min_filename, variables_requested=variables_requested, fulltime=False) self.datetimes[0] = kamodo_neighbor.datetimes[1] self.filetimes[0] = kamodo_neighbor.filetimes[1] short_data = kamodo_neighbor.short_data if verbose: print(f'Took {perf_counter()-t0:.3f}s to get data from previous file.') else: print(f'No earlier file found within {self.dt:.1f}s') filecheck = False if filetime: return #These lists need to be the standardized variable name to match that above, #not the names from the data file. self.ilev1_list = [value[0] for key, value in model_varnames.items() if value[5][-1]=='ilev1'] self.ilev_list = [value[0] for key, value in model_varnames.items() if value[5][-1]=='ilev'] self.milev_list
None, 4, None, None, "StorageQuotaLimit"), 0x3ff6: (0x0003, None, None, 4, None, None, "ExcessStorageUsed"), 0x3ff7: (0x001f, None, None, 4, None, None, "SvrGeneratingQuotaMsg"), 0x3fc2: (0x000b, None, None, 4, None, None, "PrimaryMbxOverQuota"), 0x65c6: (0x0003, None, None, 4, None, None, "SecureSubmitFlags"), 0x673e: (0x0102, None, None, 4, None, None, "PropertyGroupInformation"), 0x6784: (0x0102, None, None, 4, None, None, "SearchRestriction"), 0x67b0: (0x00fd, None, None, 4, None, None, "ViewRestriction"), 0x6788: (0x0003, None, None, 4, None, None, "LCIDRestriction"), 0x676e: (0x0003, None, None, 4, None, None, "LCID"), 0x67f3: (0x0040, None, None, 4, None, None, "ViewAccessTime"), 0x689e: (0x0003, None, None, 4, None, None, "CategCount"), 0x6819: (0x000b, None, None, 4, None, None, "SoftDeletedFilter"), 0x681b: (0x000b, None, None, 4, None, None, "ConversationsFilter"), 0x689c: (0x0003, None, None, 4, None, None, "DVUIdLowest"), 0x689d: (0x0003, None, None, 4, None, None, "DVUIdHighest"), 0x6880: (0x101f, None, None, 4, None, None, "ConversationMvFrom"), 0x6881: (0x101f, None, None, 4, None, None, "ConversationMvFromMailboxWide"), 0x6882: (0x101f, None, None, 4, None, None, "ConversationMvTo"), 0x6883: (0x101f, None, None, 4, None, None, "ConversationMvToMailboxWide"), 0x6884: (0x0040, None, None, 4, None, None, "ConversationMsgDeliveryTime"), 0x6885: (0x0040, None, None, 4, None, None, "ConversationMsgDeliveryTimeMailboxWide"), 0x6886: (0x101f, None, None, 4, None, None, "ConversationCategories"), 0x6887: (0x101f, None, None, 4, None, None, "ConversationCategoriesMailboxWide"), 0x6888: (0x0003, None, None, 4, None, None, "ConversationFlagStatus"), 0x6889: (0x0003, None, None, 4, None, None, "ConversationFlagStatusMailboxWide"), 0x688a: (0x0040, None, None, 4, None, None, "ConversationFlagCompleteTime"), 0x688b: (0x0040, None, None, 4, None, None, "ConversationFlagCompleteTimeMailboxWide"), 0x688c: (0x000b, None, None, 4, None, None, "ConversationHasAttach"), 0x688d: (0x000b, None, None, 4, None, None, "ConversationHasAttachMailboxWide"), 0x688e: (0x0003, None, None, 4, None, None, "ConversationContentCount"), 0x688f: (0x0003, None, None, 4, None, None, "ConversationContentCountMailboxWide"), 0x6893: (0x0003, None, None, 4, None, None, "ConversationMessageSizeMailboxWide"), 0x6894: (0x101f, None, None, 4, None, None, "ConversationMessageClasses"), 0x6895: (0x101f, None, None, 4, None, None, "ConversationMessageClassesMailboxWide"), 0x6896: (0x0003, None, None, 4, None, None, "ConversationReplyForwardState"), 0x6897: (0x0003, None, None, 4, None, None, "ConversationReplyForwardStateMailboxWide"), 0x6898: (0x0003, None, None, 4, None, None, "ConversationImportance"), 0x6899: (0x0003, None, None, 4, None, None, "ConversationImportanceMailboxWide"), 0x689a: (0x101f, None, None, 4, None, None, "ConversationMvFromUnread"), 0x689b: (0x101f, None, None, 4, None, None, "ConversationMvFromUnreadMailboxWide"), 0x68a0: (0x1102, None, None, 4, None, None, "ConversationMvItemIds"), 0x68a1: (0x1102, None, None, 4, None, None, "ConversationMvItemIdsMailboxWide"), 0x68a2: (0x000b, None, None, 4, None, None, "ConversationHasIrm"), 0x68a3: (0x000b, None, None, 4, None, None, "ConversationHasIrmMailboxWide"), 0x682c: (0x0040, None, None, 4, None, None, "TransportSyncSubscriptionListTimestamp"), 0x3690: (0x0102, None, None, 4, None, None, "TransportRulesSnapshot"), 0x3691: (0x0048, None, None, 4, None, None, "TransportRulesSnapshotId"), 0x7c05: (0x0040, None, None, 4, None, None, "DeletedMessageSizeExtendedLastModificationTime"), 0x0082: (0x001f, None, None, 4, None, None, "ReportOriginalSender"), 0x0083: (0x001f, None, None, 4, None, None, "ReportDispositionToNames"), 0x0084: (0x001f, None, None, 4, None, None, "ReportDispositionToEmailAddress"), 0x0085: (0x001f, None, None, 4, None, None, "ReportDispositionOptions"), 0x0086: (0x0002, None, None, 4, None, None, "RichContent"), 0x0100: (0x101f, None, None, 4, None, None, "AdministratorEMail"), 0x0c24: (0x0102, None, None, 4, None, None, "ParticipantSID"), 0x0c25: (0x0102, None, None, 4, None, None, "ParticipantGuid"), 0x0c26: (0x001f, None, None, 4, None, None, "ToGroupExpansionRecipients"), 0x0c27: (0x001f, None, None, 4, None, None, "CcGroupExpansionRecipients"), 0x0c28: (0x001f, None, None, 4, None, None, "BccGroupExpansionRecipients"), 0x0e0b: (0x0102, None, None, 4, None, None, "ImmutableEntryId"), 0x0e2e: (0x0003, None, None, 4, None, None, "MessageIsHidden"), 0x0e33: (0x001f, None, None, 4, None, None, "OlcPopId"), 0x0e38: (0x0003, None, None, 4, None, None, "ReplFlags"), 0x0e40: (0x0102, None, None, 4, None, None, "SenderGuid"), 0x0e41: (0x0102, None, None, 4, None, None, "SentRepresentingGuid"), 0x0e42: (0x0102, None, None, 4, None, None, "OriginalSenderGuid"), 0x0e43: (0x0102, None, None, 4, None, None, "OriginalSentRepresentingGuid"), 0x0e44: (0x0102, None, None, 4, None, None, "ReadReceiptGuid"), 0x0e45: (0x0102, None, None, 4, None, None, "ReportGuid"), 0x0e46: (0x0102, None, None, 4, None, None, "OriginatorGuid"), 0x0e47: (0x0102, None, None, 4, None, None, "ReportDestinationGuid"), 0x0e48: (0x0102, None, None, 4, None, None, "OriginalAuthorGuid"), 0x0e49: (0x0102, None, None, 4, None, None, "ReceivedByGuid"), 0x0e4a: (0x0102, None, None, 4, None, None, "ReceivedRepresentingGuid"), 0x0e4b: (0x0102, None, None, 4, None, None, "CreatorGuid"), 0x0e4c: (0x0102, None, None, 4, None, None, "LastModifierGuid"), 0x0e4d: (0x0102, None, None, 4, None, None, "SenderSID"), 0x0e4e: (0x0102, None, None, 4, None, None, "SentRepresentingSID"), 0x0e4f: (0x0102, None, None, 4, None, None, "OriginalSenderSid"), 0x0e50: (0x0102, None, None, 4, None, None, "OriginalSentRepresentingSid"), 0x0e51: (0x0102, None, None, 4, None, None, "ReadReceiptSid"), 0x0e52: (0x0102, None, None, 4, None, None, "ReportSid"), 0x0e53: (0x0102, None, None, 4, None, None, "OriginatorSid"), 0x0e54: (0x0102, None, None, 4, None, None, "ReportDestinationSid"), 0x0e55: (0x0102, None, None, 4, None, None, "OriginalAuthorSid"), 0x0e56: (0x0102, None, None, 4, None, None, "ReceivedBySid"), 0x0e57: (0x0102, None, None, 4, None, None, "ReceivedRepresentingSid"), 0x0e58: (0x0102, None, None, 4, None, None, "CreatorSID"), 0x0e59: (0x0102, None, None, 4, None, None, "LastModifierSid"), 0x0e5a: (0x0102, None, None, 4, None, None, "RecipientCAI"), 0x0e5b: (0x0102, None, None, 4, None, None, "ConversationCreatorSID"), 0x0e5d: (0x000b, None, None, 4, None, None, "IsUserKeyDecryptPossible"), 0x0e5e: (0x0003, None, None, 4, None, None, "MaxIndices"), 0x0e5f: (0x0014, None, None, 4, None, None, "SourceFid"), 0x0e60: (0x0102, None, None, 4, None, None, "PFContactsGuid"), 0x0e61: (0x0003, None, None, 4, None, None, "UrlCompNamePostfix"), 0x0e62: (0x000b, None, None, 4, None, None, "URLCompNameSet"), 0x0e64: (0x0003, None, None, 4, None, None, "DeletedSubfolderCount"), 0x0e68: (0x0003, None, None, 4, None, None, "MaxCachedViews"), 0x0e6b: (0x001f, None, None, 4, None, None, "AdminNTSecurityDescriptorAsXML"), 0x0e6c: (0x001f, None, None, 4, None, None, "CreatorSidAsXML"), 0x0e6d: (0x001f, None, None, 4, None, None, "LastModifierSidAsXML"), 0x0e6e: (0x001f, None, None, 4, None, None, "SenderSIDAsXML"), 0x0e6f: (0x001f, None, None, 4, None, None, "SentRepresentingSidAsXML"), 0x0e70: (0x001f, None, None, 4, None, None, "OriginalSenderSIDAsXML"), 0x0e71: (0x001f, None, None, 4, None, None, "OriginalSentRepresentingSIDAsXML"), 0x0e72: (0x001f, None, None, 4, None, None, "ReadReceiptSIDAsXML"), 0x0e73: (0x001f, None, None, 4, None, None, "ReportSIDAsXML"), 0x0e74: (0x001f, None, None, 4, None, None, "OriginatorSidAsXML"), 0x0e75: (0x001f, None, None, 4, None, None, "ReportDestinationSIDAsXML"), 0x0e76: (0x001f, None, None, 4, None, None, "OriginalAuthorSIDAsXML"), 0x0e77: (0x001f, None, None, 4, None, None, "ReceivedBySIDAsXML"), 0x0e78: (0x001f, None, None, 4, None, None, "ReceivedRepersentingSIDAsXML"), 0x0e7a: (0x0102, None, None, 4, None, None, "MergeMidsetDeleted"), 0x0e7b: (0x0102, None, None, 4, None, None, "ReserveRangeOfIDs"), 0x0e97: (0x001f, None, None, 4, None, None, "AddrTo"), 0x0e98: (0x001f, None, None, 4, None, None, "AddrCc"), 0x0e9f: (0x101f, None, None, 4, None, None, "EntourageSentHistory"), 0x0ea2: (0x0003, None, None, 4, None, None, "ProofInProgress"), 0x0ea5: (0x001f, None, None, 4, None, None, "SearchAttachmentsOLK"), 0x0ea6: (0x001f, None, None, 4, None, None, "SearchRecipEmailTo"), 0x0ea7: (0x001f, None, None, 4, None, None, "SearchRecipEmailCc"), 0x0ea8: (0x001f, None, None, 4, None, None, "SearchRecipEmailBcc"), 0x0eaa: (0x0003, None, None, 4, None, None, "SFGAOFlags"), 0x0ece: (0x000b, None, None, 4, None, None, "SearchIsPartiallyIndexed"), 0x0ecf: (0x001f, None, None, 4, None, None, "SearchUniqueBody"), 0x0ed0: (0x0003, None, None, 4, None, None, "SearchErrorCode"), 0x0ed1: (0x0040, None, None, 4, None, None, "SearchReceivedTime"), 0x0ed2: (0x0003, None, None, 4, None, None, "SearchNumberOfTopRankedResults"), 0x0ed3: (0x0003, None, None, 4, None, None, "SearchControlFlags"), 0x0ed4: (0x001f, None, None, 4, None, None, "SearchRankingModel"), 0x0ed5: (0x0003, None, None, 4, None, None, "SearchMinimumNumberOfDateOrderedResults"), 0x0ed6: (0x001f, None, None, 4, None, None, "SearchSharePointOnlineSearchableProps"), 0x0ed7: (0x0002, None, None, 4, None, None, "SearchRelevanceRankedResults"), 0x0edd: (0x0102, None, None, 4, None, None, "MailboxSyncState"), 0x0f01: (0x0040, None, None, 4, None, None, "RenewTime"), 0x0f02: (0x0040, None, None, 4, None, None, "DeliveryOrRenewTime"), 0x0f03: (0x0102, None, None, 4, None, None, "ConversationThreadId"), 0x0f04: (0x0003, None, None, 4, None, None, "LikeCount"), 0x0f05: (0x0002, None, None, 4, None, None, "RichContentDeprecated"), 0x0f06: (0x0003, None, None, 4, None, None, "PeopleCentricConversationId"), 0x0f07: (0x0040, None, None, 4, None, None, "ReturnTime"), 0x0f08: (0x0040, None, None, 4, None, None, "LastAttachmentsProcessedTime"), 0x0f0a: (0x0040, None, None, 4, None, None, "LastActivityTime"), 0x100a: (0x0102, None, None, 4, None, None, "AlternateBestBody"), 0x100c: (0x0102, None, None, 4, None, None, "IsIntegJobCorruptions"), 0x100e: (0x0002, None, None, 4, None, None, "IsIntegJobPriority"), 0x100f: (0x0005, None, None, 4, None, None, "IsIntegJobTimeInServer"), 0x1017: (0x0102, None, None, 4, None, None, "AnnotationToken"), 0x1030: (0x001f, None, None, 4, None, None, "InternetApproved"), 0x1033: (0x001f, None, None, 4, None, None, "InternetFollowupTo"), 0x1036: (0x001f, None, None, 4, None, None, "InetNewsgroups"), 0x103d: (0x0102, None, None, 4, None, None, "PostReplyFolderEntries"), 0x1040: (0x001f, None, None, 4, None, None, "NNTPXRef"), 0x1084: (0x0003, None, None, 4, None, None, "Relevance"), 0x1092: (0x0003, None, None, 4, None, None, "FormatPT"), 0x10c0: (0x0102, None, None, 4, None, None, "SMTPTempTblData"), 0x10c1: (0x0003, None, None, 4, None, None, "SMTPTempTblData2"), 0x10c2: (0x0102, None, None, 4, None, None, "SMTPTempTblData3"), 0x10f0: (0x0102, None,
'loss {:5.2f} | ppl {:8.2f}' .format(model_id, epoch, internal_epoch, batch,train_data.size(0) // helper.params['bptt'], helper.params['lr'], elapsed * 1000 / helper.params['log_interval'], cur_loss, math.exp(cur_loss) if cur_loss < 30 else -1.)) total_loss = 0 start_time = time.time() # logger.info(f'model {model_id} distance: {helper.model_dist_norm(model, target_params_variables)}') epoch_loss, epoch_acc = test(helper=helper, epoch=epoch, data_source=helper.test_data, model=model, is_poison=False, visualize=False) # Save benign model helper.local_models_epoch[current_data_model] = copy.deepcopy(model) #data, targets = next(iter(helper.test_data)); data = data.cuda() #activations = model(helper.original_input) #helper.local_activations_epoch[current_data_model] = activations helper.save_local_model(model_id=current_data_model, model=model, epoch=epoch, val_loss=epoch_loss, val_acc=epoch_acc, adversary=False) if helper.params['track_distance'] and model_id < 10: # we can calculate distance to this model now. distance_to_global_model = helper.model_dist_norm(model, target_params_variables) logger.info( f'MODEL {model_id}. P-norm is {helper.model_global_norm(model):.4f}. ' f'Distance to the global model: {distance_to_global_model:.4f}. ' f'Dataset size: {train_data.size(0)}') helper.local_models_weight_delta[current_data_model] = {} pooled_array = np.array([]) mp_2d = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, return_indices=False, ceil_mode=False) # Track local model's distance to global model distance_to_global_model = helper.model_dist_norm(model, target_params_variables) local_global_dist_norms.append(round(distance_to_global_model,2)) for name, data in model.state_dict().items(): #### don't scale tied weights: if helper.params.get('tied', False) and name == 'decoder.weight' or '__'in name: continue weight_accumulator[name].add_(data - target_model.state_dict()[name]) helper.local_models_weight_delta[current_data_model][name] = data - target_model.state_dict()[name] ## Save pooled arrays for MAD outlier detection if name in helper.params["mad_layer_names"]: if helper.params["pool"]: if(len(data.shape) == 4): p = mp_2d(data.cpu()).reshape((1, 1, -1)) else: mp_1d = nn.MaxPool1d(kernel_size=data.shape[0], stride=data.shape[0], padding=0) p = mp_1d(data.cpu().reshape((1, 1, -1))) arr = np.array(p) pooled_array = np.append(pooled_array, arr) if not(helper.params["pool"]) or helper.params["global_model_aggregation"] == "foolsgold": output_weights = torch.cat([model.state_dict()[x].view(-1) for x in helper.params["mad_layer_names"]]) pooled_array = np.array(output_weights.cpu()) helper.pooled_arrays[current_data_model] = pooled_array # Foolsgold: Aggregate historical vector of the ouput layer if helper.params["global_model_aggregation"] == "foolsgold": target_weights = torch.cat([target_model.state_dict()[x].view(-1) for x in helper.params["mad_layer_names"]]) helper.historical_output_weights[current_data_model] += (output_weights - target_weights) logger.info(f'Finish training all local clients.') if helper.params["fake_participants_save"]: torch.save(weight_accumulator, f"{helper.params['fake_participants_file']}_" f"{helper.params['s_norm']}_{helper.params['no_models']}") elif helper.params["fake_participants_load"]: fake_models = helper.params['no_models'] - helper.params['number_of_adversaries'] fake_weight_accumulator = torch.load( f"{helper.params['fake_participants_file']}_{helper.params['s_norm']}_{fake_models}") logger.info(f"Faking data for {fake_models}") for name in target_model.state_dict().keys(): #### don't scale tied weights: if helper.params.get('tied', False) and name == 'decoder.weight' or '__'in name: continue weight_accumulator[name].add_(fake_weight_accumulator[name]) # Take the average distance to global models helper.median_distance_to_global.append(np.median(local_global_dist_norms)) return weight_accumulator ## Get loss and acc on data_source def test(helper, epoch, data_source, model, is_poison=False, visualize=True): model.eval() total_loss = 0 correct = 0 total_test_words = 0 if helper.params['type'] == 'text': hidden = model.init_hidden(helper.params['test_batch_size']) random_print_output_batch = \ random.sample(range(0, (data_source.size(0) // helper.params['bptt']) - 1), 1)[0] data_iterator = range(0, data_source.size(0)-1, helper.params['bptt']) dataset_size = len(data_source) else: dataset_size = len(data_source.dataset) data_iterator = data_source for batch_id, batch in enumerate(data_iterator): data, targets = helper.get_batch(data_source, batch, evaluation=True) if helper.params['type'] == 'text': output, hidden = model(data, hidden) output_flat = output.view(-1, helper.n_tokens) total_loss += len(data) * criterion(output_flat, targets).data hidden = helper.repackage_hidden(hidden) pred = output_flat.data.max(1)[1] correct += pred.eq(targets.data).sum().to(dtype=torch.float) total_test_words += targets.data.shape[0] ### output random result :) if batch_id == random_print_output_batch * helper.params['bptt'] and \ helper.params['output_examples'] and epoch % 5 == 0: expected_sentence = helper.get_sentence(targets.data.view_as(data)[:, 0]) expected_sentence = f'*EXPECTED*: {expected_sentence}' predicted_sentence = helper.get_sentence(pred.view_as(data)[:, 0]) predicted_sentence = f'*PREDICTED*: {predicted_sentence}' score = 100. * pred.eq(targets.data).sum() / targets.data.shape[0] logger.info(expected_sentence) logger.info(predicted_sentence) else: output = model(data) total_loss += nn.functional.cross_entropy(output, targets, reduction='sum').item() # sum up batch loss pred = output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(targets.data.view_as(pred)).cpu().sum().item() if helper.params['type'] == 'text': acc = 100.0 * (correct / total_test_words) total_l = total_loss.item() / (dataset_size-1) logger.info('___Test {} poisoned: {}, epoch: {}: Average loss: {:.4f}, ' 'Accuracy: {}/{} ({:.4f}%)'.format(model.name, is_poison, epoch, total_l, correct, total_test_words, acc)) acc = acc.item() total_l = total_l.item() else: acc = 100.0 * (float(correct) / float(dataset_size)) total_l = total_loss / dataset_size if is_poison: logger.info('___Test {} on poison test set, epoch: {}: Average loss: {:.4f}, ' 'Accuracy: {}/{} ({:.4f}%)'.format(model.name, epoch, total_l, correct, dataset_size, acc)) else: logger.info('___Test {} on benign test set, epoch: {}: Average loss: {:.4f}, ' 'Accuracy: {}/{} ({:.4f}%)'.format(model.name, epoch, total_l, correct, dataset_size, acc)) model.train() return (total_l, acc) # Test the model on poisoned images with labels swapped def test_poison(helper, epoch, data_source, model, is_poison=False, visualize=True): # Set the evaluation mode model.eval() total_loss = 0.0 correct = 0.0 total_test_words = 0.0 batch_size = helper.params['test_batch_size'] # Testing on text data if helper.params['type'] == 'text': ntokens = len(helper.corpus.dictionary) hidden = model.init_hidden(batch_size) data_iterator = range(0, data_source.size(0) - 1, helper.params['bptt']) dataset_size = len(data_source) for batch_id, batch in enumerate(data_iterator): data, targets = helper.get_batch(data_source, batch, evaluation=True) output, hidden = model(data, hidden) output_flat = output.view(-1, ntokens) total_loss += 1 * criterion(output_flat[-batch_size:], targets[-batch_size:]).data hidden = helper.repackage_hidden(hidden) ### Look only at predictions for the last words. # For tensor [640] we look at last 10, as we flattened the vector [64,10] to 640 # example, where we want to check for last line (b,d,f) # a c e -> a c e b d f # b d f pred = output_flat.data.max(1)[1][-batch_size:] correct_output = targets.data[-batch_size:] correct += pred.eq(correct_output).sum() total_test_words += batch_size acc = 100.0 * (correct / total_test_words) total_l = total_loss.item() / dataset_size # Testing on image data elif helper.params['type'] == 'image': data_iterator = data_source dataset_size = 1000 for batch_id, batch in enumerate(data_iterator): data, targets = helper.get_batch(data_source, batch, evaluation=True) output = model(data) total_loss += nn.functional.cross_entropy(output, targets, reduction='sum').data.item() # sum up batch loss pred = output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(targets.data.view_as(pred)).cpu().sum().to(dtype=torch.float) acc = 100.0 * (correct / dataset_size) total_l = total_loss / dataset_size logger.info('Test {} poisoned: {}, epoch: {}: Average loss: {:.4f}, ' 'Accuracy: {}/{} ({:.0f}%)'.format(model.name, is_poison, epoch, total_l, correct, dataset_size, acc)) model.train() return total_l, acc if __name__ == '__main__': print('Start federated training') time_start_load_everything = time.time() parser = argparse.ArgumentParser(description='PPDL') parser.add_argument('--params', dest='params') args = parser.parse_args() with open(f'./{args.params}', 'r') as f: params_loaded = yaml.load(f) current_time = datetime.datetime.now().strftime('%b.%d_%H.%M.%S') # Initialize the helper if params_loaded['type'] == "image": helper = ImageHelper(current_time=current_time, params=params_loaded, name=params_loaded.get('name', 'image')) else: helper = TextHelper(current_time=current_time, params=params_loaded, name=params_loaded.get('name', 'text')) # Load train/test data helper.load_data() helper.create_model() if helper.params["global_model_aggregation"] == "diff_input": diff_input_logger.info("epoch, img, label, loss1, loss2, final_loss, outliers") # Decide the adversary list if helper.params['is_poison']: # Determine the adversary list: 0 is the fixed adversary helper.params['adversary_list'] = [0]+ \ random.sample(range(helper.params['number_of_total_participants']), helper.params['number_of_adversaries']-1) logger.info(f"Poisoned following participants: {len(helper.params['adversary_list'])}") else: helper.params['adversary_list'] = list() best_loss = float('inf') participant_ids = range(len(helper.train_data)) mean_acc = list() results = {'poison': list(), 'number_of_adversaries': helper.params['number_of_adversaries'], 'poison_type': helper.params['poison_type'], 'current_time': current_time, 'sentence': helper.params.get('poison_sentences', False), 'random_compromise': helper.params['random_compromise'], 'baseline': helper.params['baseline']} weight_accumulator = None # FoolsGold: initialize historical weight vectors if helper.params["global_model_aggregation"] == "foolsgold": vector_len = 0 for layer_name in helper.params["mad_layer_names"]: vector_len += helper.target_model.state_dict()[layer_name].view(-1).shape[-1] zero_tensor = torch.zeros((vector_len)) for pcp_id in participant_ids: helper.historical_output_weights[pcp_id] = zero_tensor.cuda() # save parameters: with open(f'{helper.folder_path}/params.yaml', 'w') as f: yaml.dump(helper.params, f) dist_list = list() for epoch in range(helper.start_epoch, helper.params['epochs'] + 1): start_time = time.time() # Random compromise - randomly select clients based on no_models if helper.params["random_compromise"] and epoch > 1: subset_data_chunks = random.sample(participant_ids, helper.params['no_models']) if len(set(subset_data_chunks) & set(helper.params['adversary_list'])) > 0: helper.params["poison_epochs"].append(epoch) ### As we assume that compromised attackers can coordinate ### Then a single attacker will just submit scaled weights by # ### of attackers in selected round. Other attackers won't submit. # already_poisoning = False # for pos, loader_id in enumerate(subset_data_chunks): # if loader_id in helper.params['adversary_list']: # if already_poisoning: # logger.info(f'Compromised: {loader_id}. Skipping.') # subset_data_chunks[pos] = -1 # else: # logger.info(f'Compromised: {loader_id}') # already_poisoning = True # helper.params["poison_epochs"].append(epoch) ## Only sample non-poisoned participants until poisoned_epoch else: if epoch in helper.params['poison_epochs']: ### For poison epoch we put one adversary and other adversaries just stay quiet benign_ids = list(set(participant_ids) - set(helper.params['adversary_list'])) subset_data_chunks = helper.params['adversary_list'] + random.sample(benign_ids, helper.params['no_models'] - helper.params['number_of_adversaries']) # subset_data_chunks = [participant_ids[0]] + [-1] * ( # helper.params['number_of_adversaries'] - 1) + \ # random.sample(benign_ids, # helper.params['no_models'] - helper.params[ # 'number_of_adversaries']) else: benign_ids = list(set(participant_ids) - set(helper.params['adversary_list'])) subset_data_chunks = random.sample(benign_ids, helper.params['no_models']) logger.info(f'Selected models: {subset_data_chunks}') t=time.time() ## ====== Train all selected local clients ======== ## weight_accumulator = train(helper=helper, epoch=epoch, train_data_sets=[(pos, helper.train_data[pos]) for pos in subset_data_chunks], local_model=helper.local_model, target_model=helper.target_model, is_poison=helper.params['is_poison']) logger.info(f'time spent on training: {time.time() - t}') # Global model aggregation # Baseline agg_start = time.time() if helper.params["global_model_aggregation"] == "avg": logger.info("aggregate model updates with baseline averaging") helper.average_shrink_models(target_model=helper.target_model, weight_accumulator=weight_accumulator, epoch=epoch) # Aggregate MAD inlier weight updates if helper.params["global_model_aggregation"] == "mad": logger.info("aggregate model updates with MAD outlier detection") #weight_accumulator2 = helper.accumulate_inliers_weight_delta() weight_accumulator2 = helper.accumulate_inliers_weight_delta(ind_features=helper.params["mad_ind_features"]) helper.average_shrink_models(target_model=helper.target_model, weight_accumulator=weight_accumulator2, epoch=epoch) # Krum Aggregate if helper.params["global_model_aggregation"] == "krum": logger.info("aggregate model updates with Krum") weight_accumulator2 = helper.krum_aggregate() helper.average_shrink_models(target_model=helper.target_model, weight_accumulator=weight_accumulator2, epoch=epoch) # Aggregate based on coordinate wise median if helper.params["global_model_aggregation"] ==
the normalized representations of the given embeddings. :returns: normalized :raises: :py:exc:`ValueError` if :math:`mean\left(\frac{n_j}{|n_j|}\right)` of the scaled normal vectors :math:`n_j` is 0 """ # Value checks: # Check embeddings length cls._validate_embedding_list(embeddings) # Get normalized versions of embeddings with positive scaling factor normed_embs = [e.normalize().to_pos_scaling() for e in embeddings] scaling_factors = [float(e.scaling_factor) for e in normed_embs] # Set scaling factors to 1. to make .scale() have no effect normed_unscaled_embs: List[ConceptEmbedding] = [e.forget_scaling() for e in normed_embs] # Now calculate distance mean and normalize normal vector mean_embedding: ConceptEmbedding = cls.mean_by_distance( normed_unscaled_embs).normalize() # The scaling_factor should be the mean of the given scaling factors: mean_embedding.scaling_factor = np.mean(scaling_factors) return mean_embedding @staticmethod def _validate_embedding_list(embeddings: Sequence['ConceptEmbedding'] ) -> None: """Check whether given embeddings list is suitable for calculating a mean. :raises: :py:exc:`ValueError` if any condition is not fulfilled. """ # Check embeddings length if len(embeddings) == 0: raise ValueError("Got empty list of embeddings for calculating " "mean.") # Check that all use the same concept concept: Concept = embeddings[0].concept for emb in embeddings: if not concept.name == emb.concept.name: raise ValueError(("Called mean on embeddings of different " "concepts ({} and {})" ).format(concept.name, emb.concept.name)) @classmethod def mean_by_distance(cls, # TODO: optional weighting? embeddings: Sequence['ConceptEmbedding'] ) -> 'ConceptEmbedding': r"""Get embedding with distance measure being the mean of given embs. This routine only works if the mean of the scaled embeddings normal vectors is non-zero. The distance of a point :math:`x` from a hyperplane :math:`(n, b)` with normal vector :math:`n` and support vector :math:`b\cdot n` is defined as .. math:: d_{n,b}(x) = \left((x - b\cdot n) \circ n\right) = x \circ n - b \cdot |n|^2 For an embedding :math:`(n, b, s)` with scaling factor s the distance measure is the one of its scaled version :math:`(s n, \frac{b}{s}, 1)`, which turns out to be .. math:: d_{s n, \frac{b}{s}} = s \cdot d_{n,b} This routine determines the "average" hyperplane for the given embeddings, where here average hyperplane :math:`(n, b)` means the one with the following property: .. math:: d_{n,b} = mean(d_{n_j,b_j}) = \frac 1 N \sum_{j=1}^{N} d_{n_j,b_j} i.e. at any point :math:`x` in space the distance of the average hyperplane to :math:`x` is the mean of the distances of all N given hyperplanes :math:`(n_j,b_j)` to :math:`x`. It is unique (the points on the plane are those with distance 0 and thus all the same), and given by the following combination (with scaling factor 1): .. math:: n &= mean(n_j) \\ b &= \frac{1}{|n|^2} mean(b_j \cdot |n_j|^2) Possible problems: This will weight the contribution of the given embeddings by their confidence, i.e. their scaling factor. To avoid this, the mean can be taken over the normalized versions with scaling factor set to one and the scaling factor of the mean can be determined by confidence calibration. :returns: embedding describing the hyperplane with above properties :raises: ValueError if the mean of the scaled normal vectors of the given embeddings is 0 """ # Value checks: cls._validate_embedding_list(embeddings) meta_info: Dict[str, Any] = embeddings[0].meta_info # First apply the scaling to all embeddings scaled_embeddings: List[ConceptEmbedding] = \ [e.scale() for e in embeddings] normal_vecs: List[np.ndarray] = \ [e.normal_vec for e in scaled_embeddings] support_factors: List[np.ndarray] = \ [e.support_factor for e in scaled_embeddings] # Normal vector: mean(n_j) with n_j scaled normal vectors mean_normal_vec: np.ndarray = np.mean(normal_vecs, axis=0) # Get normal vector norm; must not be zero! squared_mean_normal_vec_norm: float = float( np.sum(mean_normal_vec * mean_normal_vec)) if np.allclose(squared_mean_normal_vec_norm, 0): raise ValueError("Mean of scaled embedding normal vectors is zero; " "cannot calculate mean embedding") # Support factor: - b = mean(b_j * |n_j|**2) / (|n|**2) # with b_j scaled support factor, n mean normal vector mean_support_factor: np.ndarray = \ (np.mean([b * np.sum(n_j * n_j) for b, n_j in zip(support_factors, normal_vecs)]) / squared_mean_normal_vec_norm) mean_embedding: ConceptEmbedding = cls( normal_vec=mean_normal_vec, support_factor=mean_support_factor, scaling_factor=1., **meta_info ) return mean_embedding @classmethod def variance(cls, embeddings: Sequence['ConceptEmbedding'], ddof: int = 1 ) -> Tuple[np.ndarray, float, float]: r"""Get the variance of a list of embeddings (by default unbiased). The variances are calculated on the unique normalized representations of the embeddings, and encompass variance of: - the normal vector - the support vector factor (= distance to 0) - the scaling factor (= length of the normal vector). :param embeddings: sequence of embeddings to take variance of :param ddof: delta degrees of freedom: the divisor used in calculations is :math:`\text{num_embeddings} - \text{ddof}`; if ``ddof=1`` (default), the unbiased variance is obtained :returns: Tuple of variance of ``(normal vecs, support factors, scaling factors)`` for normalized representations of given embeddings """ # First norm all embeddings and bring them to the same hemisphere to # compare them normed_embs = [e.normalize().to_pos_scaling() for e in embeddings] # Now calculate the variances of the embedding specifiers var_normal_vec: np.ndarray = \ np.var([e.normal_vec for e in normed_embs], axis=0, ddof=ddof) var_supp_factor: float = float( np.var([e.support_factor for e in normed_embs], ddof=ddof)) var_scale_factor: float = float( np.var([e.scaling_factor for e in normed_embs], ddof=ddof)) return var_normal_vec, var_supp_factor, var_scale_factor @classmethod def std_deviation(cls, embeddings: Sequence['ConceptEmbedding'], ddof: int = 1 ) -> Tuple[np.ndarray, float, float]: r"""Get the (by default unbiased) standard deviation of a list of embs. The standard deviations are calculated on the unique normalized representations of the embeddings, and encompass standard deviation of: - the normal vector - the support vector factor (= distance to 0) - the scaling factor (= length of the normal vector). The deviations are calculated as the square root of the variances (see :py:meth:`variance`). :param embeddings: sequence of embeddings :param ddof: delta degrees of freedom: the divisor used in calculations is :math:`\text{num_embeddings} - \text{ddof}`; if ``ddof=1`` (default), the unbiased standard deviation is obtained :returns: Tuple of standard deviation of ``(normal vecs, support factors, scaling factors)`` for normalized representations of given embeddings """ var_normal_vec, var_supp_factor, var_scale_factor = \ cls.variance(embeddings, ddof=ddof) return (np.sqrt(var_normal_vec), np.sqrt(var_supp_factor), np.sqrt(var_scale_factor)) @classmethod def mean_by_angle(cls, embeddings: Sequence['ConceptEmbedding']): r"""Get embedding where distance to the given hyperplanes at each point sums up to 0. **The Math Behind** This routine approximates an "average" hyperplane from the given embeddings where here average hyperplane means the one for which the following holds: Given a point :math:`x` on the average hyperplane, the signed distances to all hyperplanes along the average hyperplane's normal vector sum up to zero. The signed distance from :math:`x` to a hyperplane H non-orthogonal to the average hyperplane is .. math:: \left(\left( (R\cdot n + x) \cap H \right) - x \right) \circ n, where - :math:`n` is the normalized normal vector of the average hyperplane, - :math:`(R \cdot n + x)` is the 1-dim affine sub-space through :math:`x` in the direction of :math:`n`, and - :math:`((R \cdot n + x) \cap H)` is the unique intersection of above line with :math:`H`. The average hyperplane has the following properties: - The average hyperplane is unique. - The average normal vector only depends on the normal vectors of the hyperplanes, not their supports/biases. - Given the normalized normal vector n of the average hyperplane, a support vector is given by: .. math:: \frac{1}{N} \sum_{j=1}^{N} \frac{|b_j|^2}{n \circ b_j} \cdot n where the sum goes over the N hyperplanes, :math:`n` is a normalized normal vector of the average hyperplane and :math:`b_j` is the orthogonal support vector of the jth hyperplane (i.e. a support vector which is a multiple of the normal vector). - Assume normalized normal vectors of the hyperplanes which all lie in the same hypersphere and are given in angle coordinates of the 1-hypersphere. An entry in the average normal vector in angle coordinates is the mean of the entries in the other hyperplane's normal vectors. **Implementation Notes** Normal vector: The normal vector is computationally expensive to calculate (should be the spherical barycenter of the normed normal vectors in one hemisphere) and can be approximated by the normalized barycenter of the normalized normal vectors which lie in the same hemisphere. Support: If the normal vectors do not
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from msrest.polling import LROPoller, NoPolling from msrestazure.polling.arm_polling import ARMPolling from .. import models class PagingOperations(object): """PagingOperations operations. You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def get_single_pages( self, custom_headers=None, raw=False, **operation_config): """A paging operation that finishes on the first call without a nextlink. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of Product :rtype: ~paging.models.ProductPaged[~paging.models.Product] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.get_single_pages.metadata['url'] # Construct parameters query_parameters = {} else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ProductPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized get_single_pages.metadata = {'url': '/paging/single'} def get_multiple_pages( self, client_request_id=None, paging_get_multiple_pages_options=None, custom_headers=None, raw=False, **operation_config): """A paging operation that includes a nextLink that has 10 pages. :param client_request_id: :type client_request_id: str :param paging_get_multiple_pages_options: Additional parameters for the operation :type paging_get_multiple_pages_options: ~paging.models.PagingGetMultiplePagesOptions :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of Product :rtype: ~paging.models.ProductPaged[~paging.models.Product] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ maxresults = None if paging_get_multiple_pages_options is not None: maxresults = paging_get_multiple_pages_options.maxresults timeout = None if paging_get_multiple_pages_options is not None: timeout = paging_get_multiple_pages_options.timeout def prepare_request(next_link=None): if not next_link: # Construct URL url = self.get_multiple_pages.metadata['url'] # Construct parameters query_parameters = {} else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if client_request_id is not None: header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') if maxresults is not None: header_parameters['maxresults'] = self._serialize.header("maxresults", maxresults, 'int') if timeout is not None: header_parameters['timeout'] = self._serialize.header("timeout", timeout, 'int') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ProductPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized get_multiple_pages.metadata = {'url': '/paging/multiple'} def get_odata_multiple_pages( self, client_request_id=None, paging_get_odata_multiple_pages_options=None, custom_headers=None, raw=False, **operation_config): """A paging operation that includes a nextLink in odata format that has 10 pages. :param client_request_id: :type client_request_id: str :param paging_get_odata_multiple_pages_options: Additional parameters for the operation :type paging_get_odata_multiple_pages_options: ~paging.models.PagingGetOdataMultiplePagesOptions :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of Product :rtype: ~paging.models.ProductPaged1[~paging.models.Product] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ maxresults = None if paging_get_odata_multiple_pages_options is not None: maxresults = paging_get_odata_multiple_pages_options.maxresults timeout = None if paging_get_odata_multiple_pages_options is not None: timeout = paging_get_odata_multiple_pages_options.timeout def prepare_request(next_link=None): if not next_link: # Construct URL url = self.get_odata_multiple_pages.metadata['url'] # Construct parameters query_parameters = {} else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if client_request_id is not None: header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') if maxresults is not None: header_parameters['maxresults'] = self._serialize.header("maxresults", maxresults, 'int') if timeout is not None: header_parameters['timeout'] = self._serialize.header("timeout", timeout, 'int') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ProductPaged1(internal_paging, self._deserialize.dependencies, header_dict) return deserialized get_odata_multiple_pages.metadata = {'url': '/paging/multiple/odata'} def get_multiple_pages_with_offset( self, paging_get_multiple_pages_with_offset_options, client_request_id=None, custom_headers=None, raw=False, **operation_config): """A paging operation that includes a nextLink that has 10 pages. :param paging_get_multiple_pages_with_offset_options: Additional parameters for the operation :type paging_get_multiple_pages_with_offset_options: ~paging.models.PagingGetMultiplePagesWithOffsetOptions :param client_request_id: :type client_request_id: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of Product :rtype: ~paging.models.ProductPaged[~paging.models.Product] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ maxresults = None if paging_get_multiple_pages_with_offset_options is not None: maxresults = paging_get_multiple_pages_with_offset_options.maxresults offset = None if paging_get_multiple_pages_with_offset_options is not None: offset = paging_get_multiple_pages_with_offset_options.offset timeout = None if paging_get_multiple_pages_with_offset_options is not None: timeout = paging_get_multiple_pages_with_offset_options.timeout def prepare_request(next_link=None): if not next_link: # Construct URL url = self.get_multiple_pages_with_offset.metadata['url'] path_format_arguments = { 'offset': self._serialize.url("offset", offset, 'int') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if client_request_id is not None: header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') if maxresults is not None: header_parameters['maxresults'] = self._serialize.header("maxresults", maxresults, 'int') if timeout is not None: header_parameters['timeout'] = self._serialize.header("timeout", timeout, 'int') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ProductPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized get_multiple_pages_with_offset.metadata = {'url': '/paging/multiple/withpath/{offset}'} def get_multiple_pages_retry_first( self, custom_headers=None, raw=False, **operation_config): """A paging operation that fails on the first call with 500 and then retries and then get a response including a nextLink that has 10 pages. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of Product :rtype: ~paging.models.ProductPaged[~paging.models.Product] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.get_multiple_pages_retry_first.metadata['url'] # Construct parameters query_parameters = {} else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ProductPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized get_multiple_pages_retry_first.metadata = {'url': '/paging/multiple/retryfirst'} def get_multiple_pages_retry_second( self, custom_headers=None, raw=False, **operation_config): """A paging operation that includes a nextLink that has 10 pages, of which the 2nd call fails first with 500. The client should retry and finish all 10 pages eventually. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An
<gh_stars>10-100 #!/usr/bin/env python from themis.utils import isvalidtype from themis import ( FEATURES_CUSTOM_CALLBACK, METADATA_CUSTOM_CALLBACK, Features, ThemisMetaData, AI, Policy, RateLimiter ) from traceback import format_exc from netaddr import IPNetwork, IPAddress from datetime import datetime, timedelta from math import ceil from email.utils import parseaddr from multiprocessing import Process as Thread, Queue from themis.marshalredis import MarshalRedis import Milter, time, logging.config import re, sys, yaml, os, spf # TODO: Count receive messages by header # TODO: Implement greylist functionality # TODO: Rest interface for configuring # TODO: Send UDP requests containing statistics # TODO: Test performance: Anex, list, alias, severeal rcpts # TODO: Log inverted destination -> OK # TODO: RCPT and SENDER objects from postfix could be UPPER cases, should match insensitive -> OK class ThemisMilter(Milter.Base): REDIS, LOGGER, FEATS = [None, None, None] def __init__(self): self.id = Milter.uniqueID() # Integer incremented with each call. self.resync_config() self.redis = ThemisMilter.REDIS self.log = ThemisMilter.LOGGER self.gconf = ThemisMilter.FEATS self.policy = Policy(self.redis) self.log = logging.getLogger(__name__) self.policies = None def resync_config(self): config_file = ThemisMilter.REDIS.hget('config:themis:resync', 'config_file') if config_file: with open(config_file) as f: main_config, global_config, logger_config = yaml.load_all(f) redis_server = os.getenv('THEMIS_REDIS') or main_config['redis_server'] redis_password = os.getenv('THEMIS_REDISPASSWD') or main_config['redis_password'] ThemisMilter.REDIS = MarshalRedis(redis_server, redis_password) redis_version = ThemisMilter.REDIS.info().get('redis_version') if not redis_version: raise RuntimeError('Could not find redis version') redis_version = float('.'.join(redis_version.split('.')[:2])) if redis_version < 2.8: raise RuntimeError('Old redis version, should be at least 2.8+') global_config = ThemisMilter.REDIS.hgetall('config:themis:features:global', FEATURES_CUSTOM_CALLBACK) or global_config ThemisMilter.FEATS = Features(**global_config) logging.config.dictConfig(logger_config['logger']) ThemisMilter.LOGGER = logging.getLogger(__name__) if ThemisMilter.REDIS.exists('config:themis:features:global'): ThemisMilter.LOGGER.warning('RESYNC - Config file "%s" will be ignored. Using redis server config.' % config_file) ThemisMilter.REDIS.delete('config:themis:resync') @Milter.noreply def connect(self, IPname, family, hostaddr): # (self, 'ip068.subnet71.example.com', AF_INET, ('172.16.58.3', 4720) ) # (self, 'ip6.mxout.example.com', AF_INET6, # ('fc00:e968:6179::de52:7100', 4720, 1, 0) ) self.log.debug('CONNECT - ID %s From %s at %s ' % (self.id, IPname, hostaddr[0])) #self.log("connect from %s at %s in ID: %s" % (IPname, hostaddr, self.milter.id) ) self.from_ipaddress = hostaddr[0] return Milter.CONTINUE def hello(self, heloname): self.heloname = heloname return Milter.CONTINUE def envfrom(self, mailfrom, *str): try: self.mta_hostname = None if self.gconf.policiesByServerPoolFeature: self.mta_hostname = self.getsymval('{j}') self.log.debug('CONNECT - ID %s policiesByServerPoolFeature enabled, mta_hostname: %s' % (self.id, self.mta_hostname)) if self.gconf.messagesBySecFeature: now = datetime.now() storedays = self.gconf.messagesBySecStoreDays now_in_seconds = now.hour * 60 * (storedays * 60) + now.minute * (storedays * 60) + now.second self.redis.hincrby('requestsbysec:global', 'second:%s' % now_in_seconds) self.policies = self.policy.get_all_data_policies(self.mta_hostname, self.gconf.fallbackToNonPoolPolicies) self.milter_headers, self.recipients, self.subject = [dict(), list(), None] self.saslauth = self.getsymval('{auth_authen}') # authenticated user self.saslauth_domain = None if self.saslauth: self.saslauth = self.saslauth.lower() if not '@' in self.saslauth: self.log.info('ENVFROM - ID %s Got a broken SASLUsername: %s' % (self.id, self.saslauth)) # TODO: Try to fix the broken sasl accounts self.saslauth_domain = '@broken_sasl.tld' self.saslauth = self.saslauth + self.saslauth_domain else: self.saslauth_domain = '@' + self.saslauth.split('@')[1] # ('From Name', '<EMAIL>') _, self.mailfrom = parseaddr(mailfrom) self.mailfrom = self.mailfrom.lower() if not self.mailfrom: # Blank mailfrom self.mailfrom = '<EMAIL>' elif '@' not in self.mailfrom: self.mailfrom = self.mailfrom + '@mailfrom.tld' self.mailfrom_domain = '@' + self.mailfrom.split('@')[1] except Exception, e: self.log.error('ENVRCPT - BYPASS - %s Error processing envfrom' % self.id) self.log.exception(e) return Milter.ACCEPT return Milter.CONTINUE @Milter.noreply def envrcpt(self, to, *str): try: # This state happens a "loop" for each recipient rcptinfo = to, Milter.dictfromlist(str) # ('To Name', '<EMAIL>') _, rcpt = parseaddr(rcptinfo[0]) self.recipients.append(rcpt) except Exception, e: self.log.error('ENVRCPT - BYPASS - %s Error processing recipients' % self.id) self.log.exception(e) return Milter.ACCEPT return Milter.CONTINUE @Milter.noreply def header(self, name, hval): # If has key headers repeated the last one will prevail try: self.milter_headers[name] = hval if name == 'Subject': self.subject = hval or ':::blank_subject:::' except Exception, e: self.log.warning('HEADER - %s Error processing headers' % self.id) self.log.exception(e) return Milter.CONTINUE @Milter.noreply def eoh(self): self.queue_id = self.getsymval('{i}') self.queue_id = ':'.join((self.queue_id, str(self.id))) self.log.debug('ENVFROM - %s - Mail from: %s SASLUsername: %s' % (self.queue_id, self.mailfrom, self.saslauth or 'NULL')) try: for hdr, hdr_value in self.milter_headers.items(): self.log.debug('HEADER - %s - %s | %s' % (self.queue_id, hdr, hdr_value)) except Exception, e: self.log.warning('EOH - %s Could not debug headers. %s' % (self.queue_id, e)) self.log.exception(e) rcptlog = ', '.join(self.recipients) self.log.debug('ENVRCPT - %s - RCPT(s): %s' % (self.queue_id, rcptlog)) return Milter.CONTINUE @Milter.nocallback def body(self, chunk): # TODO: dont need body return Milter.CONTINUE def eom(self): eom_log_header = 'EOM - %s - %s - ' % (self.mta_hostname, self.queue_id) try: if not self.policies: self.log.warning(eom_log_header + 'BYPASS - Could not find any policies') return Milter.ACCEPT for pdata in self.policies: self.namespace = self.gconf.global_namespace if pdata.spf: try: spfresult, spfcode, spftext = spf.check(i=self.from_ipaddress, s=self.mailfrom, h=self.heloname) self.addheader('Received-SPF', spfresult) process_action = False if self.gconf.spfStrictRejectFeature: if spfresult in ['softfail', 'fail', 'neutral', '', 'none']: process_action = True else: if spfresult == 'fail': process_action = True self.log.info(eom_log_header + 'SPFCHECK - Result: %s, Code: %s, Explanation: %s' % (spfresult, spfcode, spftext)) if process_action: return self.milter_action(pdata, log_header=eom_log_header) except Exception, e: self.log.exception(e) # Custom Headers are included if a header is found and the value has been matched with a regular expression if pdata.actionheaders: for hdr_key, hdr_value in self.milter_headers.items(): try: if not hdr_key in pdata.actionheaders: continue rgxp = pdata.actionheaders[hdr_key].pop(0) if re.match(r'%s' % rgxp, hdr_value): self.log.info(eom_log_header + 'MATCH, regexp %s value %s' % (rgxp, hdr_value)) for actionheader in pdata.actionheaders[hdr_key]: new_hdr, new_hdr_value = actionheader self.log.info(eom_log_header + 'Adding header %s with value %s' % (new_hdr, new_hdr_value)) self.addheader(new_hdr, new_hdr_value) else: self.log.info(eom_log_header + 'NOT MATCH, regexp %s value %s' % (rgxp, hdr_value)) except Exception, e: self.log.error(eom_log_header + 'Error processing action headers: %s' % e) self.log.exception(e) break if pdata.onlyheaders: self.log.info(eom_log_header + 'BYPASS - Accepting connection, policy validate only headers') return Milter.ACCEPT jailby_namespace = pdata.jailby # Features and config by pool servers if self.gconf.featuresByServerPool and self.gconf.policiesByServerPoolFeature: global_config = self.redis.hgetall(':'.join(('config:themis:features', pdata.policy_name)), FEATURES_CUSTOM_CALLBACK) if global_config: self.gconf = Features(**global_config) if pdata.pool_policy: self.namespace = ':'.join((self.namespace, pdata.pool_name)) self.gconf.ai_namespace = self.namespace jailby_namespace = ':'.join((pdata.pool_name, jailby_namespace)) self.log.info(eom_log_header + 'Pool policy Name: %s namespace: %s jailby_namespace: %s' % (pdata.pool_name, self.namespace, jailby_namespace) ) self.log.info(eom_log_header + 'Executing policy: %s Pool Policy: %s' % (pdata.policy_name, pdata.pool_policy)) if pdata.requestsmon: # Monitoring requests by sec of a policy now = datetime.now() storedays = self.gconf.messagesBySecStoreDays now_in_seconds = now.hour * 60 * (storedays * 60) + now.minute * (storedays * 60) + now.second self.redis.hincrby('requestsbysec:%s' % pdata.policy_name, 'second:%s' % now_in_seconds) if '+' in pdata.jailby: # Jail by sasl @domain or user@domain if self.saslauth: self.mailfrom_domain = self.saslauth_domain self.mailfrom = self.saslauth self.milter_from_object = None if re.match(r'^SenderDomain\+$|^SenderDomain$', pdata.jailby): # Dont jail plus because sasl_auth may be None self.milter_from_object = self.mailfrom_domain elif re.match(r'^Sender\+$|^Sender$', pdata.jailby): self.milter_from_object = self.mailfrom elif re.match(r'^SenderIP$', pdata.jailby): self.milter_from_object = self.from_ipaddress elif 'SASLUsername' == pdata.jailby: if not self.saslauth: self.log.warning(eom_log_header + 'NEXT - Empty saslusername skipping policy: %s' % pdata.policy_name) continue self.milter_from_object = self.saslauth else: self.log.warning(eom_log_header + 'NEXT - Could NOT match jailby key: %s for policy: %s' % (pdata.jailby, pdata.policy_name)) continue # Evaluate to True if only one recipient match # If the destination match for domain destination, ACCEPT! Here it is a simple match of destination, only by @domain is_dest_match, recipient_bypass_match = False, False if pdata.is_destination_any: is_dest_match = True self.log.debug(eom_log_header + "DEST_MATCH - 'any' found") else: for rcpt in self.recipients: rcpt_domain = '@' + rcpt.split('@')[1] if self.policy.hasmember(pdata.destination, [rcpt_domain], pdata.inverted_destination): self.log.debug(eom_log_header + 'DEST_MATCH - Recipient: %s Policy: %s Inverted: %s' % (rcpt_domain, pdata.policy_name, pdata.inverted_destination)) # Bypass complex, only bypass if source and destination match if pdata.type == 'bypass+': recipient_bypass_match = True break # We break because we need only one match to start rate limiting is_dest_match = True break else: self.log.debug(eom_log_header + 'DEST_NOT_MATCH - RCPT: %s Policy: %s' % (rcpt, pdata.policy_name)) # Check if the objects are in a specific redis SET. This prevents unnecessary looping is_source_match = pdata.is_source_any if not is_source_match: objects = [self.mailfrom_domain, self.mailfrom, self.from_ipaddress] is_source_match = self.policy.hasmember(pdata.source, objects, invert=pdata.inverted_source) if not is_source_match: # This is only necessary to validate if an ipaddress belongs to a CIDR for group_src_member in self.policy.getgroupips(pdata.source): self.log.debug(eom_log_header + 'Looping through groups. group: %s member(s): %s' % (pdata.source, group_src_member)) try: if IPNetwork is type(isvalidtype(group_src_member)): is_source_match = self.match(group_src_member, invert=pdata.inverted_source) except Exception, e: self.log.warning(eom_log_header + 'Source check error. Policy: %s Error: %s' % (pdata.policy_name, e)) continue if is_source_match: local_milter_from_object = self.milter_from_object self.log.debug(eom_log_header + 'SOURCE_MATCH - group_source_name: %s milter_from_object: %s from_ipaddress: %s mailfrom_domain: %s mailfrom: %s invert: %s' % (pdata.source, local_milter_from_object, self.from_ipaddress, self.mailfrom_domain, self.mailfrom, pdata.inverted_source)) #
'Zn1nuBos3cdJLNAxpbEYYvaDH187eW01': 'No ba, nie widzę specjalnej różnicy, z napojem czy bez.', 'fB3TcaVCh61x8X6a4TVAtRzWK9VHgJIR': 'Obeliks?', 'yR0P3o5c7TrdCpzBE5JpmFuEMdfMhSKu': 'Panoramiks?', '6SaT6C5Qnff5ZkXkxWtexrf6mFpCElQc': 'Taak?', 'FPMj95dYFbe9l9ElZdYru5nRDJgduwpm': 'Panoramiks?', 'pFJR67zhzmwlVZFSqDLUYls0VHQ5WMDG': 'Obeliks?', 'OAH4xnuZIeQroxaDkGmQDGvSQE6biBY6': 'Asterix? Uuu?', 'OtYmh6EZiNrDyEPxKci2LARH34JhKQLC': 'Nie, Panoramix.', 'LSWW8Wm69wV6LBYdfo2dtQpcNTMgv4JF': 'Tu jestem.', 'SHk9OgSHlmaRMGB21SSQZDinG17tWvmr': 'Oooo', 'ftslZ4WOeynRSKenD4K4CTINcOlxTjcT': 'Dziesiąty raz już się tu spotykamy', 'yOQGrIsefvVtMXgt4kTGHmsbJAkGEW4b': 'Niezłych tu mają architektów, nie ma co', 'azAK9yopIiA14FEuJ45J5J4xnAw3esGI': 'Robi się nieciekawie', 'tGPOhBo1SkTuvGgs4fggdj4xLAcGCVO2': 'Nawet bardzo', 'JoB4taN0wVNwAjeEGg4FRBHVBfNHjPa1': 'Zaczynam być głodny <3', 'abPSiUqll6TwjY0YppMOYa4CKHnagDAa': 'Zaczynam być głodny', 'plsyatfFH50k0xoX3ZGi6MOqT665NeM1': 'o kurde to ja odwołuje te moje wcześniejsze :O moja mea kulpa, wybaczcie', 'oiX8wHltMbUhg3lIMzX4FedmuntdYAPK': 'Wybaczam xD', '8B7N16Wdw0MpiX6fB3ipL85aB7x8lR1O': 'Ciekawe co te wszystkie rysunki znaczą?', 'NDpEgFlRJbLyU1z3MqCCsIIaPfhuf9nc': 'Obelix. Jest czas na zabawę z hieroglifami i czas na wyjście z piramidy', '3O65IVVKyKfQhFDUoc4tQ49EECiaU6wD': 'Yy? Może w końcu zrozumiesz, że są jakieś priorytety?', 'ReXnoNzLUdlvS8i5otmtXv7lcLBGmLhb': 'Informuję pana Asterixa, że jestem głodny i staram się jakoś zapomnieć o głodzie', 'SzgxWqwkRgxabPxJrh1i1N3z1TGCoP1Y': 'Oo, pardon. Zapomniałem, że żołądek Paana Obelixa jest najważniejszy i że micha i kilku Rzymian do lania to wszystko, czego Panu Obelixowi potrzeba.', 'HUU6EAxjbphzQ6AtPsBG6gvsOT2z9cC5': 'Och, ale oczywiście pan Asterix zawsze ma przecież rację.', '0xb3gLCjMbsOohLBQrqgricHzGzjKAfB': 'A mam!', 'gLWVAkWCK5uEug9avAi6HF8GETS0dyg5': 'Informuję pana Asterixa, który koniecznie chciał zwiedzać tę piramidę, że w menhirze to by nas nie zamknęli!', 'KeR3YHJPZKDwLv5Mc8zIRAQvgHqi44FF': 'Ooooo, no argument nie do podważenia, ooohoho.', 'fnRayddOUwowSB2OfUkFcxluxbISGgrE': 'Co oni robią? To już półtora piasku, kamienie załadowane, no co jest? Ech...', 'cbx0PIlfzXe2H2j9pc65sRikSTs7gHXP': 'Zły byłem, to naopowiadałem bzdur. Mea culpa.', 'C1MFm9mTVGrXG76L00cfNglbAi1ZXKeW': 'Nie, moja mea culpa. W menhirze też mogliby nas zamknąć.', '3Q0HLbkLty3ZoVMHUs3lzecPBNtRthcb': 'Moje czary niestety nie wystarczą, żeby nas stąd wyciągnąć. Obawiam się moi drodzy, że to może być koniec naszych przygód.', 'LwobGhYwHbl2620KjkF9HDexIEP5NaE3': 'Uuu... O.', 'dIhqpAZWWOVkrFWjh9DCSDOUsdzNU5Jr': 'Mamy czym podpalić pochodnię?', 'kzOSn5AqwSGD5Mh8I2J5oic7BIYYIJXH': 'Nie, niestety. Mamy tylko te talizmany od Numernabisa, ale to nie wypali.', 'tY6SOz4eAXf6Fhf8d6pnf5GtaAcGWMv4': 'Właśnie, biedny Numernabis.', 'bZfJZnEl8O7ZGND7gPBZcyVsVqbKbWF6': 'A ja się martwię o mojego Idefixa.', 'WfoS0lmXSgQzE0C8tsCQEtt8XQWtC3Pp': 'Prawda, Idefix?', '8oo5tfOdBQz6zuN8nk3ilVMdm3iohpyj': 'Hau!', '3FGHk99ybJcfekFfS2bQ1XAsrBCwne8S': 'IDEFIX?!', 'Q2trC8HeQNPpejk9vGl93RQ6zlZk1Ak5': 'No tak, Idefix. Tylko się nie czepiajcie, że go zabrałem, sam przyszedł!', 'aSgSOTLXeBLQZY8ZFQMkcDHffR14kB7Z': 'No właśnie, jak znalazł nas po zapachu, to teraz może nas poprowadzić z powrotem!', '1wpAXzpgq14BNhsOnhtpE2eyb6dqOBZp': 'Ojej, prawda. Idefix, skup się. Jak nas wyprowadzisz na zewnątrz to dostaniesz karuzelę kości.', 'zNoCASc7ySZWx0NQBTKJZLNYuPs3bPwH': 'HAU! hau hau', 'CNAYgSWW4nwviTtIDlrTsiOAJNUEOSe9': '<muzyka z <NAME> przy akompaniamencie której bohaterowie uciekają z piramidy>', 'DmfoQhBQ3VMEetNOkw0x5A7xA6IrQkUk': 'Hyhyhyhyhy oj tak! Dzielny piesek tak', 'ephr4AwAJ7atVlKtf3FuHD99xYX6wPgo': 'Bardzo dzielny, a Ty miałeś rację, że go zabrałeś!', 'dDv8rT5BmjAWmMGgyyKh44XSd7r6RuYK': 'Czasem to... ja myślę że on wszystko rozumie no', 'roZU49xKOMxCelqgWBEcyenU4ASTZkA0': 'Tak oto nasi bohaterowie wracają na statkach po burty załadowanych budulcem.', 'bkePATiUTPx0y2H9hQ9iWk0k1HO8Obhx': 'Wolne te statki', 'aknYjvXHv2kVErvefwH62ErprBiR1yUM': 'Za wolne. W takim tempie nigdy nie wrócimy do Galii na ucztę z dzikami. A soczewica to mi się trochę przejadła.', '7hdmh1d0o8WIHAdSPp5OxpGFKSj1rKxd': 'Widzicie... Nil to kapryśna rzeka: bywa, że rwąca i gniewna, innym znów razem senna. Nie wiem, czy wiecie, ale jest taki bardzo piękny poemat. O Nilu oczywiście. A brzmi tak:', 'G0gwkSYG8O3W9kpJVLiKkVHSbCFK18sQ': '"Nilu, Nilu, Nilu..."', 'Se2XCk34ecmijp03bW1rT2CcS2dbxMa5': '<<plusk>> A co on przepraszam robi?', 'fLfNvLEkGtPY73hSro1pQJlRG3nuNe8g': 'Chłopcy. Po raz kolejny stanęliśmy na nogi. Hip hip hip!', 'MViuvei9bwSMM3aCu8hr0tRjMQLROBUU': 'huła...', 'kHHDUjRUqTG4vxVF6EiQLX8KomLojXNz': 'Hip hip hip no', 'o4thTT7QOWIN1V5dvv9zE7crvbex3U3M': 'huła', 'F6f7MFqJioNt2KA7X4G2WV5LNhcIW6HH': 'Ostrożnie na zapleczu!', 'yhD6JtHGWIORKJrNCd7f9QlxeGHEt6Rf': '*jeb w statek* Juhułuuuuuu', 'pHfYgjxLmUM4bLSHe1QnFuKdZqDewtdk': 'Na Tutatisa! Niby wiem, ze jak był mały wpadł do kociołka z magicznym napojem, a mimo to wciąż mnie zaskakuje!', '7VFPUGFI9bjpQctqAxdE0IXIPAsRhnEm': 'Hip hip hip! Hułaa...', 'b8IQFV1kfgT7Eu84g1lScAShpWn8SsUU': 'Przestań, dobrze?', 'ibnKdEYA4ZzQ9d9SpqbVizWh2iicgcZq': 'Prace znów mogły ruszyć pełną parą, a Numernabis mógł być pewien, że dotrzyma terminu. A to w budownictwie rzadki przypadek.', 'MEcnDJZO2bZNIfSv6PORG5CLrJfLVC5B': 'Łyk dla każdego! Tylko łyk! Ktoś rozprowadza na obiekcie podróbkę napoju magicznego. Wygląd ten sam, smak ten sam, ale to nie napój magiczny, to zupa jest z brukselek. Prawdziwy, skuteczny napój to ten, który daje wam Panoramix, o tam, przy kotle. Pół godziny czekania, ale warto.', 's6C5AUlsNTIR45ZWhbVUh4VQ19ijMAgc': '*No tak, rozumie się, nie ma sprawy*', 'hoYW1eoBlLhBsdCEm40RqMdqM2y3GDZb': 'A wtedy zamknąłem drzwi i mówię: Ten grobowiec będzie waszym grobowcem!', 'qZ58LoQRp6tLJ1pADVCBVXS0k2yUkXQe': 'Hmm, to dobre', 'vFtzBuCl7V0zXDUbkfCj0nzko4FjythK': 'He?', 'FwObUgjOnfuGDRLpaMSPxD5i2HcLxDFf': 'Hm.', 'LlPUkE3p39eQI7JxefPe15NfgO0tXYkY': 'Myślałem czy by nie "bando szakali, zdechniecie tu jak szakale" ehehee... eee... ale by było dwa razy "szakale" więc...', 'jQUgW0VFBB6xHuepaI9Oqj0lxvnq8lGp': 'Co? Jak szakalowie?', '5hUvdvKSg9kT3YqMGuhrpuUrviLFAYBy': 'Zamknięci w piramidzie, tak? Tssss', 'Kc1FBXxQYMcYnzH6QsoTSsoEYljReX0O': 'To niemożliwe! To jakaś magia, albo jacyś bogowie żeby znaleźli wyjście z piramidy', 'euD9FQtXDpQCNo5SR4PJDUvfSsWEDnNO': 'Nieźli są, nie ma co. Ale czekaj, wymyśle coś mmmm no i wymyśliłem, uknułem iście szatańską intrygę, nie, dwie, właściwie trzy ojoj ojojoj ale natłok myśli jestem bogiem no, ssssss tatatatatatatatatata', 'WyT8M5R0F6bB3QK9wV5nwW0ms6HOF8dt': 'Nie lubię jak tak się śmiejesz szefie... się boję', 'r8JFJbvQVuZyQc73eIarofhitwFMponx': 'Do roboty', 'kptObIcEIIU2YPXtDZXXcSAr8sBpPQoM': 'Hmm... Trochę skromnie jak na królewskie ogrody. A mamy tylko dwa tygodnie.', 'XRcTgjxxJEXDkerytdWgevKriFsBlaJU': 'Paranoixie? a nie masz może czegoś co by roślinkom szybciej się rosło no wiesz yy wiesz na-na-na na przykład takie nasionka co jak się zasieje to od razu palma odbija', 'gzM2ONtk1oal3smsYNF6hkn8Cew5Qhc8': 'Nie, Numernabisie, nie na wszystko są magiczne napoje. Są rzeczy niezmienne. Naturze potrzebny jest czas. Trzeba lat i miesięcy, żeby z małego nasionka, dzięki deszczom i słońcu, wykluła się łodyżka. A wtedy... z tej łodyżki...', 'BNTYgKgThbnG8IF553Pj4RmW4V37liMc': 'Ptosie...', 'T7BwYOdxIabFRP3VTRn3HDJvDaTDMLyM': 'Ptosie...', 'DmcEF6bmpWTOmoeyhmfYGvvSkVWJZgMu': '<zasadzona palma rośnie w kilka sekund> Szybciej nie da rady', 'HoPk9LsNHuZs3o7fpX0G8478VX8OgOth': 'Ale moc! Alle moc! haha aaa Może mnie ktoś ściągnąć?', '3qxcucXanPLpRtK373p1lXMys0ROMEKt': 'Jak to wznowiono budowę?', 'YgsuP2tnb7ousPnrSDBEuvXgfxsCAOmI': 'Mnie też Szlachetny Cezarze nie cieszy postęp tych prac. Z powodów, które przemilczę, chciałbym, by <NAME> poniósł klęskę i pomyślałem, że może dojdziemy do porozumienia, pod warunkiem oczywiście, że zechcesz, o panie.', '9govELRPNbyLP4CmJctTJNXxahbA929D': 'Nie tutaj, w namiocie', 'y6TJaOYL779eeajaAjBS4sj9ja3fiRY3': 'Sssss...', 'mouSJgFF5tNvPBamZSIRa5SQGKFXFnb8': 'Więc mów', 'ZZl1aB76mzuVDKZlkzHQIzWgasLHTlm2': 'Boski Cezarze, jeżeli uda mi się powstrzymać budowę tego pałacu, powierzysz ją mnie, a ja dokończę ją dwa miesiące po czasie. Wtedy ty wygrasz swój zakład, a Kleopatra się wścieknie. Ssssss...', 'AaZLZdTIzNGmyc0IkV4LU7uvt0mmJfQT': 'Zgoda ale skończ cztery miesiące po czasie', 'Qsa6ip2T8DqQ31OlF8hE5cHHPfm7MlAs': 'Trzy, więcej nie mogę', 'iKGMiKe6GTCLJy9boIRBzskmhtfGkzPl': 'Niech będą trzy, ale nie mniej albo zapłacisz odsetki za każdy dzień.', 'zex25IC0GwWUbtXAmlKGwJPEmkvZ4pWE': 'Iżby. Umowa stoi, bossski Cezarze.', 'aXtvhEV2RvozkC7DOnR3IO6dXKHB0eyx': 'Świetnie. A jak właściwie zamierzasz opóźnić prace?', 'AyQDyTraZ8FWRY94nw7dJjpZyIapue1U': 'Dzięki temu.', 'wO1V3bTIth171MhBOzA1lwXNZ7QRzvTs': 'HehehehehahahahahaHAHAHAHAHAHA', 'dQ4vD2wQ3GZn9I1EMBOpLNSbuPZN4hDo': 'Ssssssssssssssss', 'j4vpuX8HDlWlniEbkV34AHjD1OvBnXRx': 'Mmmmmm, w co by się tu ubrać...', 'MSTUEOHUk3xdzNzuCjbLHAmbOTs6fILh': 'Ostrożnie, mleko ledwie, ledwie letnie', 'OOHPQWarpaCMGFC4zy67x14sfJ9fiqdW': 'Akado pula, rende ren', 'Nrru7zJnmLI7qsqg6C2051K411Ul9MOf': 'W hołdzie dla piękna najjaśniejszej podarek od trzech Galów - Panoramixa, Obelixa i Asterixa. Mm, to bardzo miłe.', 'EOmE5l4dhh6o0Y4eMRnwT0murWDPKbT5': 'Mogę otworzyć?', 'db9icRi4JjICpb48j0ZxFvmCWBHSM5MY': 'internet.. już pousuwałem', 'CPtZfmsfFMBLmA3TnpbmDSsDjUyKBY6M': 'Fff, ojej. Letnie', 'pE9rwuh5ckV9Lbi8oedEXcdEWjFG7JxZ': 'ooo, ciasto! Wieczorem podamy je na uczcie...kameralnie, 40 tancerzy, 60 tancerek i 300 lekkostrawnych dań', 'TdEgP5mJOk8kf5kFEddOLePr2vaMbrBa': 'Ja też wolę jak jest mniej osób.', 'CtCjbsO0c5YdBf7q83lXRHZapLwumJCr': '*ciam ciam ciam ciam*', 'kwEh5Cqy7Y8RkewwqJOecuXdyyZOMdvF': 'Nareszcie normalny posiłek', 'Vs16GPmovOxtIpkzplhCcywaggWBKIZK': 'A tak!', 'rFUIgdOU04tnnItZEzRaFfZk3bCCuFvB': 'Jak to dzik?!', '8bsK7h49BzLW9cC35cL7dKT84jgsiY7D': 'Z soczewicą !', 'U0gp9PIttooGh3kZiU388LjHjmVztg8M': 'Prz-prz-przecież to zabronione, co Wy, nie wiecie? O ile mi wiadomo to prawie wieprzowina.', '4lhqHrtqwJMTFSI1RQRobiXhQ7SWBc5E': 'No to obejdziemy trochę przepisy.', 'dKbNSVaGT9bUE9MjtQVr3XfWWUUt0Ojt': '<tup-tup-tup> Nie ruszać się. Przyszliśmy zatrzymać Galów.', '9yn6gECvRYbdxlQl03rxKHaUzqo0MLho': 'Rozkaz królowej.', '5DIiiYi2FgWNFTR7FT3qf8aAAxYide3e': 'No... No już zjeść nie można? Dobrze. To ja im spuszczę lanie, a potem jemy.', 'uBVDmp12Uark6TJX6YvH08SHjixX61iJ': '-Aaaa!', 'NmWKAY1KjXPorBeNd7fgJndA5tyxUTwx': 'No, a co?', 'ee2XzoaGjUBCP86pMztqhZNrWqHTy0cH': 'No co? To tez tu jest zabronione? To co tu u was wolno?', 'e6yQ0hEqpYjolYFcc6dTNer9RcUodkz7': 'Numernabis ma rację, dowiedzmy się, o co chodzi.', 'uJUfO5X7snGsHIv1QIvUav8JasfGL0yB': 'Tak, tak, to świetna okazja żeby odwiedzić pałac', 'Y45LMaSDEKHf7vfUSZhlo0ToBFCh9mCN': 'A moje dziki?', 'gRMhDK7MVyxOEO7pF3j2M5p04HK5LLmS': 'To ja tu zostanę. Popilnuje ognia. Bo jeszcze się coś zajmie... Aż strach. To idę za-za-za-zagaszać.', 'QKLJ8IVzHgeUO4GPXTTphWpI4e9J6YyM': 'Puuuf pufff puf', '4ptkk8sOXCydHTunFVUMLM21VxcFjarS': 'A, a, a, a, a, a...', 'OGcD7mGZ3Jdv5KFzMAxGE2JIx0P8zcWm': 'Nigdy niczego nie jem, póki on nie spróbuje. Wasz plan zawiódł Galowie. Nie ja, a mój niewolnik zatruł się ciastem, które mi przysłaliście.', 'eiTj3oXWxBcD9YVW9dLJSBHMaghX7d2Q': 'My nic nie przysyłaliśmy. Jakie ciasto?', 'LfxytvfDZ2epPQhSllO3yysWAyu6WVGB': 'To ciasto oo, widzisz tu jakieś inne?', 'olN7V4sTktwtEmVbEf8CeVoGAMkccvbs': 'Jeśli chodzi o dziki to owszem, ale...', 'tlwSaKz2xRuRMi7DyP8v9AUo4MCUafsB': 'MILCZEĆ!', 'XYeiSUM7UqtwKQrkz25T7HMTR8XCdCXI': 'Próbowaliście mnie zgładzić, zapłacicie za to skórą.', 'YxhqlPLYLDMp140TaRHChuN1A9jDmHEn': 'To bzdura, niczego nie... Jesteśmy niewinni!', 'CAG6CflAJizUHikoJB1Nr3k0KgGo62Fx': 'Co za tupet!', 'iKDpRHAtqnLv4T85SCpnUaeEsCAkGVnB': 'Przecież to nie możliwe, od rana do wieczora jesteśmy na budowie, nic tylko łykamy piasek', '1j4rA68IHDlLaf8x3v7sdS8Sm6rPJS9P': 'I soczewicę.', 'S7z8O9qtUzqb6QYP02B3LjjyCxZxlvbW': 'No wiesz, to akurat teraz nieważne.', 'thzTzMxGrYZacoH8ZqPvoNXuH324xsUj': 'Ale to prawda', '2RlPbJW8MwMtrKUu8oDlSp9NQdQg5dd1': 'No dobra, ale to kiedy indziej... Nie my, nie my, to nie my przysłaliśmy to zatrute ciasto!', 'URolU5HI9onJYU8inhACNVZfHUkEE8qI': 'Jakbym miał takie ciasto to sam bym je zjadł', '3ESGXoV3PquCdLs98kEfFz1uherWb5k2': 'Heh, myślmy logicznie.', 'KRbu707ytJAbV1zDxQdNv9Yxz7hlbIIR': 'No ba', 'S6GuxyyfT3CR4CZ6tBdRvbbDHfP5xbQo': 'DOSYĆ TEGO!', '4ZcMNjkO4RA7rco0ZDIytiYZ5hg23ECJ': 'Ale przecież to jakiś absurd...', 'TWXTl5f8DleMJizPRViNxivcAN9WdCE9': 'Asterixie, jeśli królowa
<filename>babelscan/hdf.py """ Subclass data holder for .hdf and .nxs files """ import os import re import datetime import numpy as np import h5py from . import functions as fn from .babelscan import Scan from .volume import ImageVolume, DatasetVolume "----------------------------LOAD FUNCTIONS---------------------------------" def load(filename): """Load a hdf5 or nexus file""" try: return h5py.File(filename, 'r') except OSError: raise Exception('File does not exist: %s' % filename) def reload(hdf): """Reload a hdf file, hdf = reload(hdf)""" filename = hdf.filename return load(filename) def load_hdf_values(files, address, default=None): """ Load single dataset value (metadata) from hdf files Will return str or float value as per dataset. Array datsets will be averaged to return a single float. :param files: str or list of str file names :param address: str hdf dataset address :param default: value to return if dataset not in file :return: array of floats or strings """ files = fn.liststr(files) values = np.empty(len(files), dtype=object) for n, file in enumerate(files): with load(file) as hdf: if address in hdf: dataset = hdf.get(address) if dataset.ndim > 0: values[n] = np.mean(dataset) else: values[n] = hdf.get(address)[()] else: values[n] = default return values "--------------------------DATASET FUNCTIONS--------------------------------" def address_name(address): """Convert hdf address to name""" return os.path.basename(address) def address_group(address, group_name=None): """ Return part of address upto group_name :param address: str hdf address :param group_name: str name of group :return: reduced str """ if group_name is None: names = address.replace('\\', '/').split('/') return '/'.join(names[:-1]) return re.findall(r'(.+?%s.*?)(?:\/|$)' % group_name, address, re.IGNORECASE)[0] def address_group_name(address): """ Return name of dataset group /entry/[group]/name :param address: str hdf address :return: str """ names = address.replace('\\', '/').split('/') return names[-2] def is_dataset(dataset): """ Check if input is a hdf dataset e.g. is_dataset(hdf_group.get(address)) """ return hasattr(dataset, 'size') def is_group(dataset): """ Check if input is a hdf group :param dataset: :return: True/ False """ return hasattr(dataset, 'keys') def dataset_name(dataset): """ Return name of the dataset the name is the final part of the hdf dataset address equivalent to: dataset_name = dataset.name.split('/')[-1] Warning - dataset.name is not always stored as the correct value """ return address_name(dataset.name) def dataset_data(dataset): """Get data from dataset, return float, array or str""" # convert arrays of length 1 to values if not dataset: return None if dataset.size == 1 and len(dataset.shape) == 1: data = np.asarray(dataset)[0] else: data = dataset[()] # Handle bytes strings to return string try: data = data.decode(fn.BYTES_DECODER) except (UnicodeDecodeError, AttributeError): pass return data def dataset_string(dataset): """Generate string from dataset""" data = dataset_data(dataset) try: # single value return fn.VALUE_FORMAT % data except TypeError: # array if dataset.size > 1: return fn.data_string(data) # probably a string return fn.shortstr('%s' % data) def dataset_datetime(dataset, input_format=None, output_format=None): """ Read time stamps from hdf file at specific address If input is a string (or bytes), input_format is used to parse the string If input is a float, it is assumed to be a timestamp from the Unix Epoch (1970-01-01 00:00:00) Useful Format Specifiers (https://strftime.org/): %Y year %m month %d day %H hours %M minutes %S seconds %f microseconds %y year (short) %b month name %a day name %I 12-hour %p AM or PM %z UTC offset :param dataset: hdf dataset :param input_format: str datetime.strptime format specifier to parse dataset :param output_format: str datetime.strftime format specifier to generate output string (if None, returns datetime) :return datetime or list of datetime """ if input_format is None: input_format = fn.DATE_FORMAT data = dataset_data(dataset) data = np.asarray(data, dtype=str).reshape(-1) try: # str date passed, e.g. start_time: '2020-10-22T09:33:11.894+01:00' dates = np.array([datetime.datetime.strptime(date, input_format) for date in data]) except ValueError: # float timestamp passed, e.g. TimeFromEpoch: 1603355594.96 dates = np.array([datetime.datetime.fromtimestamp(float(time)) for time in data]) if output_format: if len(data) == 1: return dates[0].strftime(output_format) else: return [date.strftime(output_format) for date in dates] else: if len(data) == 1: return dates[0] return dates def show_attrs(dataset): """Return formatted string of attributes for hdf object""" out = '%s with %d attrs\n' % (dataset, len(dataset.attrs)) out += '%s\n' % dataset.name for key, value in dataset.attrs.items(): out += '%30s : %s\n' % (key, value) return out "-------------------------HDF ADDRESS FUNCTIONS-------------------------------" def dataset_addresses(hdf_group, addresses='/', recursion_limit=100, get_size=None, get_ndim=None): """ Return list of addresses of datasets, starting at each address :param hdf_group: hdf5 File or Group object :param addresses: list of str or str : time_start in this / these addresses :param recursion_limit: Limit on recursivley checking lower groups :param get_size: None or int, if int, return only datasets with matching size :param get_ndim: None or int, if int, return only datasets with matching ndim :return: list of str """ addresses = np.asarray(addresses, dtype=str).reshape(-1) out = [] for address in addresses: data = hdf_group.get(address) if data and is_dataset(data): # address is dataset if (get_size is None and get_ndim is None) or (get_size is not None and data.size == get_size) or ( get_ndim is not None and data.ndim == get_ndim): out += [address] elif data and recursion_limit > 0: # address is Group new_addresses = ['/'.join([address, d]).replace('//', '/') for d in data.keys()] out += dataset_addresses(hdf_group, new_addresses, recursion_limit - 1, get_size, get_ndim) #elif recursion_limit > 0: # # address is None, search for group address and iterate # new_address = get_address(hdf_group, address, return_group=True) # this goes forever if a group fails to load # if new_address: # out += dataset_addresses(hdf_group, new_address, recursion_limit - 1, get_size, get_ndim) return out def find_name(name, address_list, match_case=False, whole_word=False): """ Find datasets using field name :param name: str : name to match in dataset field name :param address_list: list of str: list of str to search in :param match_case: if True, match case of name :param whole_word: if True, only return whole word matches :return: list of str matching dataset addresses """ out = [] if not match_case: name = name.lower() for address in address_list: a_name = (address_name(address) if whole_word else address) a_name = (a_name if match_case else a_name.lower()) if whole_word and name == a_name: out += [address] elif not whole_word and name in a_name: out += [address] return out def find_cascade(name, address_list, exact_only=False, find_any=False): """ Find dataset using field name in a cascading fashion: 1. Find exact match (matching case, whole_word) 2. any case, whole_word 3. any case, anywhere in address 4. Return None otherwise :param name: str : name to match in dataset field name :param address_list: list of str: list of str to search in :param exact_only: return list of exact matches only (may be length 0) :param find_any: if True, return matches where string appears anywhere in address :return: list of str addresses matching name """ # fast return of full address if address_list.count(name) == 1: return [name] if '/' in name: # address, or part of address given. # Addresses are unique but exact match not found, return closest match return [address for address in address_list if address.lower().endswith(name.lower())] # only match the address name name_list = [address_name(address) for address in address_list] # Exact match exact_match = [address for idx, address in enumerate(address_list) if name == name_list[idx]] if exact_match or exact_only: return exact_match # If not found, try matching lower case lower_match = [address for idx, address in enumerate(address_list) if name.lower() == name_list[idx].lower()] if lower_match: return lower_match # If not found, try matching any if find_any: any_match = [address for address in address_list if address.lower().endswith(name.lower())] if any_match: return any_match # If not found, try matching group group_match = [address for address in address_list if name == address_group_name(address)] return group_match def tree(hdf_group, detail=False, groups=False, recursion_limit=100): """ Return str of the full tree of data in a hdf object :param hdf_group: hdf5 File or Group object :param detail: False/ True - provide further information about each group and dataset :param groups: Fasle/ True - only display group level structure :param recursion_limit: int max number of levels :return: str """ if recursion_limit < 1: return '' outstr = '%s\n' % hdf_group.name if detail: for attr, val in hdf_group.attrs.items(): outstr += ' @%s: %s\n' % (attr, val) try: for branch in hdf_group.keys(): new_group = hdf_group.get(branch) if new_group: outstr += tree(new_group, detail, groups, recursion_limit-1) return outstr except AttributeError: # doesn't have .keys(), hdf_group = dataset, should have .name, .size, .shape if groups: out = "" elif
#! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.forms.models import model_to_dict from appconf.models import Project, Product, Supply import ast import enum import datetime ASSET_STATUS = ( (-3, u"库存"), (-2, u"待确认"), (-1, u"离线"), (1, u"在用"), (2, u"闲置"), (3, u"出库"), (4, u"维修"), (5, u"报废"), ) ASSET_TYPE = ( (1, u"固资"), (2, u"借货"), (3, u'供应商') ) FITTING_STATUS = ( (-3, u"库存"), (-1, u"待确认"), (1, u"在用"), (2, u"闲置"), (3, u"出库"), (4, u"维修"), (5, u"报废") ) DEVICE_STATUS = ( (-3, u"库存"), (-2, u"待确认"), (-1, u"离线"), (1, u"在用"), (2, u"闲置"), (3, u"出库"), (4, u"维修"), (5, u"报废"), (6, u"在用(非自动抓取)") ) DEVICE_TYPE = ( (u'Switch', u"交换机"), (u'Router', u"路由器"), (u'FW', u"防火墙"), (u'AP', u"无线接入点"), (u'WAPC', u"无线控制器"), (u'Other', u"其它设备"), ) DEVICE_ASSET_TYPE = ( (1, u"固资"), (2, u"借货"), (3, u'供应商') ) IDC_PDU_TYPE = ( (1, u"欧标"), (2, u"国标"), (3, u'C14-13 * 2') ) IDC_BEAR_MEDIA_TYPE = ( (1, u"角铁"), (2, u"托盘"), (3, u'其它') ) IDC_CARRIER_TYPE = ( (1, u"联通"), (2, u"移动"), (3, u'电信'), (4, u'BGP'), (5, u'其它') ) REPAIR_STATUS = ( (1, u"维护中"), (2, u"维护完成"), ) OPER_STATUS = ( (1, u"ADD"), (2, u"UPDATE"), (3, u"DELETE"), ) ''' 数据来源 - EXCEL更新、AGENT更新、人工修改 ''' SOURCE_STATUS = ( (1, u"EXCEL"), (2, u"AGENT"), (3, u"人工"), ) ''' 数据来源 - EXCEL更新、AGENT更新、人工修改 ''' class SourceStatus(enum.Enum): excel = 1 agent = 2 manual = 3 ''' 操作 - 新增、修改、删除 ''' class OperStatus(enum.Enum): add = 1 update = 2 delete = 3 class FittingStatus(enum.Enum): # 库存 store_room = -3 # 待确认 un_confirmed = -1 # 在用 online = 1 # 闲置 idle_server = 2 # 闲置 out_store = 3 # 故障 on_repair = 4 # 报废 out_server = 5 class UserInfo(models.Model): username = models.CharField(max_length=100, null=True) password = models.CharField(max_length=100, null=True) def __unicode__(self): return self.username class Idc(models.Model): ids = models.CharField(u"机房标识", max_length=255, unique=True) name = models.CharField(u"机房名称", max_length=255, unique=True) address = models.CharField(u"机房地址", max_length=100, blank=True) tel = models.CharField(u"机房电话", max_length=30, blank=True) contact = models.CharField(u"客户经理", max_length=30, blank=True) contact_phone = models.CharField(u"移动电话", max_length=30, blank=True) jigui = models.CharField(u"机柜信息", max_length=30, blank=True) ip_range = models.CharField(u"IP范围", max_length=30, blank=True) bandwidth = models.CharField(u"接入带宽", max_length=30, blank=True) memo = models.TextField(u"备注信息", max_length=200, blank=True) pdu_type = models.IntegerField(u"PDU类型", choices=IDC_PDU_TYPE, null=True) pdu_num = models.CharField(u"插口数量", max_length=100, blank=True) bear_media = models.IntegerField(u"承重介质", choices=IDC_BEAR_MEDIA_TYPE, null=True) bear_weight = models.CharField(u"承重公斤", max_length=100, blank=True) workday_response = models.CharField(u"工作日响应时间", max_length=100, blank=True) holiday_response = models.CharField(u"节假日响应时间", max_length=100, blank=True) use_cabinet_num = models.CharField(u"使用机柜总数", max_length=100, blank=True) free_cabinet_num = models.CharField(u"空闲机柜总数", max_length=100, blank=True) maintain_service = models.CharField(u"代维服务商", max_length=100, blank=True) maintain_contact = models.CharField(u"代维联系方式", max_length=100, blank=True) delivery_address = models.CharField(u"收货地址", max_length=100, blank=True) data_center = models.CharField(u"数据中心仓库", max_length=100, blank=True) carrier = models.IntegerField(u"运营商名称", choices=IDC_CARRIER_TYPE, null=True) extra_service = models.CharField(u"附加服务", max_length=100, blank=True) search_path = models.CharField(u"查询路径", max_length=200, null=True) def __unicode__(self): return self.name class Meta: verbose_name = u'数据中心' verbose_name_plural = verbose_name class Host(models.Model): hostname = models.CharField(max_length=50, verbose_name=u"主机名", blank=True, null=True) ip = models.GenericIPAddressField(u"业务IP", max_length=15, null=True) ip_mac = models.CharField(u"业务网卡MAC", max_length=50, blank=True, null=True) idc = models.ForeignKey(Idc, verbose_name=u"所在机房", on_delete=models.SET_NULL, null=True, blank=True) other_ip = models.CharField(u"其它IP", max_length=255, blank=True, null=True) domain = models.CharField(u"域名", max_length=255, blank=True, null=True) asset_no = models.CharField(u"资产编号", max_length=50, blank=True, null=True) asset_type = models.IntegerField(u"设备类型", choices=ASSET_TYPE, null=True) status = models.IntegerField(u"设备状态", choices=ASSET_STATUS, null=True) os = models.CharField(u"操作系统", max_length=100, blank=True, null=True) vendor = models.CharField(u"设备厂商", max_length=50, blank=True, null=True) model = models.CharField(u"设备型号", max_length=50, blank=True, null=True) cpu_model = models.CharField(u"CPU型号", max_length=100, blank=True, null=True) cpu_num = models.CharField(u"CPU数量", max_length=100, blank=True, null=True) memory = models.CharField(u"内存大小", max_length=30, blank=True, null=True) disk = models.CharField(u"硬盘信息", max_length=255, blank=True, null=True) ssd = models.CharField(u"SSD信息", max_length=255, blank=True, null=True) raid = models.CharField(u"Raid卡", max_length=100, blank=True, null=True) sn = models.CharField(u"SN号码", max_length=60, blank=True) sn_mac = models.CharField(u"设备唯一编号", max_length=100, blank=True, null=True) bmc_address = models.CharField(u"带外管理", max_length=100, blank=True, null=True) nic = models.CharField(u"网卡", max_length=100, blank=True, null=True) manage_nic_mac = models.CharField(u"管理网卡Mac", max_length=100, blank=True, null=True) arrive_time = models.DateTimeField(u"到货时间", blank=True, null=True) launched_time = models.DateTimeField(u"上架时间", blank=True, null=True) order_no = models.CharField(u"订单编号", max_length=100, blank=True, null=True) contract_no = models.CharField(u"合同编号", max_length=100, blank=True, null=True) warranty_period = models.DateTimeField(u"保修期", blank=True, null=True) power = models.CharField(u"电源", max_length=200, blank=True, null=True) floor = models.CharField(u"楼层", max_length=100, blank=True, null=True) room = models.CharField(u"房间", max_length=100, blank=True, null=True) cabinet_col = models.CharField(u"机柜列", max_length=100, blank=True, null=True) cabinet_num = models.CharField(u"机柜号", max_length=100, blank=True, null=True) position = models.CharField(u"具体U位", max_length=100, blank=True, null=True) pod = models.CharField(u"POD", max_length=50, blank=True) height = models.CharField(u"设备高度", max_length=100, blank=True) memo = models.TextField(u"备注信息", max_length=200, blank=True, null=True) created_at = models.DateTimeField(u"创建时间", blank=True, null=True) updated_at = models.DateTimeField(u"更新时间", blank=True, null=True) deleted_at = models.DateTimeField(u"删除时间", blank=True, null=True) is_wrong = models.BooleanField(default=False) wrong_info = models.TextField(u"异常对比", max_length=255, blank=True) is_confirmed = models.BooleanField(default=False) role = models.CharField(u"旷视角色", max_length=50, blank=True, null=True) kernel_version = models.CharField(u"内核版本", max_length=100, blank=True, null=True) os_disk = models.CharField(u"OS内硬盘", max_length=100, blank=True, null=True) usage_person = models.CharField(u"使用人", max_length=100, blank=True, null=True) host_statistics = models.TextField(u"硬件统计信息", max_length=255, blank=True, null=True) rapair_status = models.IntegerField(u"维护状态", choices=REPAIR_STATUS, null=True) uuid = models.CharField(u"UUID", max_length=50, blank=True) department = models.ForeignKey( Product, null=True, blank=True, on_delete=models.SET_NULL, verbose_name=u"所属部门" ) project = models.ForeignKey( Project, null=True, blank=True, on_delete=models.SET_NULL, verbose_name=u"所属项目" ) supply = models.ForeignKey( Supply, null=True, blank=True, on_delete=models.SET_NULL, verbose_name=u"所属供应商" ) def __unicode__(self): return self.sn def to_dict(self): result = model_to_dict(self) for k, v in result.items(): if isinstance(v, datetime.datetime): result[k] = v.strftime('%Y-%m-%d %H:%M:%S') if v is None: result[k] = '' return result class Cabinet(models.Model): idc = models.ForeignKey(Idc, verbose_name=u"所在机房", on_delete=models.SET_NULL, null=True, blank=True) name = models.CharField(u"机柜", max_length=100) desc = models.CharField(u"描述", max_length=100, blank=True) serverList = models.ManyToManyField( Host, blank=True, verbose_name=u"所在服务器" ) def __unicode__(self): return self.name class HostGroup(models.Model): name = models.CharField(u"服务器组名", max_length=30, unique=True) desc = models.CharField(u"描述", max_length=100, blank=True) serverList = models.ManyToManyField( Host, blank=True, verbose_name=u"所在服务器" ) def __unicode__(self): return self.name class IpSource(models.Model): net = models.CharField(max_length=30) subnet = models.CharField(max_length=30, null=True) describe = models.CharField(max_length=30, null=True) def __unicode__(self): return self.net class InterFace(models.Model): name = models.CharField(max_length=30) vendor = models.CharField(max_length=30, null=True) bandwidth = models.CharField(max_length=30, null=True) tel = models.CharField(max_length=30, null=True) contact = models.CharField(max_length=30, null=True) startdate = models.DateField() enddate = models.DateField() price = models.IntegerField(verbose_name=u'价格') def __unicode__(self): return self.name class NetDevice(models.Model): ip = models.CharField(u"管理IP", max_length=100, blank=True, null=True) mac = models.CharField(u"管理Mac", max_length=100, blank=True, null=True) sn = models.CharField(u"SN号码", max_length=60, blank=True) model = models.CharField(u"型号", max_length=50, blank=True) device_type = models.CharField(u"类型", choices=DEVICE_TYPE, max_length=50, blank=True) idc = models.ForeignKey(Idc, verbose_name=u"所在机房", on_delete=models.SET_NULL, null=True, blank=True) asset_no = models.CharField(u"资产编号", max_length=50, blank=True, null=True) asset_type = models.IntegerField(u"资产类型", choices=DEVICE_ASSET_TYPE, null=True) floor = models.CharField(u"楼层", max_length=100, blank=True, null=True) room = models.CharField(u"房间", max_length=100, blank=True, null=True) cabinet_col = models.CharField(u"机柜列", max_length=100, null=True, blank=True) cabinet_num = models.CharField(u"机柜号", max_length=100, null=True, blank=True) position = models.CharField(u"机架位", max_length=100, blank=True, null=True) power = models.CharField(u"电源", max_length=200, blank=True, null=True) height = models.CharField(u"设备高度", max_length=100, blank=True, null=True) os = models.CharField(u"操作系统", max_length=255, blank=True, null=True) hostname = models.CharField(u"主机名", max_length=50, blank=True, null=True) port_num = models.CharField(u"端口数量", max_length=50, blank=True, null=True) vendor = models.CharField(u"厂商", max_length=100, blank=True, null=True) arch_type = models.CharField(u"架构类型", max_length=100, blank=True, null=True) usage = models.CharField(u"用途", max_length=100, blank=True, null=True) sw_version = models.CharField(u"软件版本号", max_length=100, blank=True, null=True) status = models.IntegerField(u"设备状态", choices=DEVICE_STATUS, null=True) arrive_time = models.DateTimeField('到货时间', blank=True, null=True) warranty_period = models.DateTimeField(u'保修期', blank=True, null=True) order_no = models.CharField(u"订单编号", max_length=100, blank=True, null=True) contract_no = models.CharField(u"合同编号", max_length=100, blank=True, null=True) memo = models.TextField(u"备注信息", max_length=200, blank=True, null=True) created_at = models.DateTimeField(blank=True, null=True) updated_at = models.DateTimeField(blank=True, null=True) deleted_at = models.DateTimeField(blank=True, null=True) launched_time = models.DateTimeField(u"上架时间", blank=True, null=True) is_wrong = models.BooleanField(default=False) wrong_info = models.TextField(u"异常对比", blank=True, null=True) is_confirmed = models.BooleanField(default=False) usage_person = models.CharField(u"使用人", max_length=100, blank=True, null=True) department = models.ForeignKey( Product, null=True, blank=True, on_delete=models.SET_NULL, verbose_name=u"所属部门" ) project = models.ForeignKey( Project, null=True, blank=True, on_delete=models.SET_NULL, verbose_name=u"所属项目" ) supply = models.ForeignKey( Supply, null=True, blank=True, on_delete=models.SET_NULL, verbose_name=u"所属供应商" ) def __unicode__(self): return self.sn def to_dict(self): result = model_to_dict(self) for k, v in result.items(): if isinstance(v, datetime.datetime): result[k] = v.strftime('%Y-%m-%d %H:%M:%S') if v is None: result[k] = '' return result class FittingBase(): ''' 配件类型基类 ''' def get_import_attributes(self): ''' 获取导出属性 :return: ''' pass def get_specify_col_list(self, col_lambda=None): return filter(col_lambda, self.get_import_attributes()) def get_judge_rule_by_col(self, col): ''' 根据字段获取规则 :return: ''' # 获取导出属性 col_attributes = self.get_import_attributes() # 获取列规则 col_rules = filter(lambda x: x['name'] == col, col_attributes) return col_rules def get_judge_func_by_rule(self, col_rule): ''' 根据规则 获取需要判定的方法 :param col_rules: :return: ''' # 列方法验证前缀 col_rule_func_prefix = 'judge_attribute_is_' for col_rule_key, col_rule_vakue in col_rule.items(): # 属性需要验证标识 1-需要验证 0-不用验证 if col_rule_vakue == 1: col_rule_name = '{}{}'.format(col_rule_func_prefix, col_rule_key) judge_func = getattr(self, col_rule_name, '') if judge_func: yield judge_func def judge_attribute_is_required(self, value): ''' 判断属性是否填写 :return: ''' return True if value else False, value if value else '不能为空' def judge_attribute_is_int(self, value): ''' 判断属性是否为整数 :return: ''' try: return True, int(value) except Exception, e: return False, '属性不为整数类型' def judge_attribute_is_string(self, value): ''' 判断属性是否为字符串 :return: ''' try: if isinstance(value, str): return True, value elif isinstance(value, int): return True, str(value) elif isinstance(value, float): return True, str(int(value)) else: return True, str(value) except Exception, e: return False, '属性无法转换成字符串类型' def judge_attribute_is_float(self, value): ''' 判断属性是否为浮点数 :return: ''' try: return True, float(value) except Exception, e: return False, '属性不为浮点类型' def judge_attribute_is_fitting_status(self, value): ''' 判断属性是否为fitting_status类型 :param value: :return: ''' try: if value: fitting_status_value = int(value.split('-')[0]) return True, fitting_status_value else: return True, FittingStatus.un_confirmed.value except Exception, e: return False, '属性不为fitting_status类型' def set_attr(self, *args, **kwargs): ''' 给model赋值 :param args: :param kwargs: :return: ''' for item in kwargs: if
# SPDX-License-Identifier: Apache-2.0 # -*- coding: utf-8 -*- """ Code used to update legacy files for naming / organization Material Names old: {asset_name}_Mat new: {texture_name} """ import bpy import os from pathlib import Path import xrs.log import xrs.convert import xrs.collection import xrs.filename import xrs.object import xrs.render import xrs.select import time def add_emission_node(matName, colorVal): """ Adds emission node to a given material """ if "Emission" not in bpy.data.materials[matName].node_tree.nodes: bpy.data.materials[matName].node_tree.nodes.new("ShaderNodeEmission") bpy.data.materials[matName].node_tree.nodes["Emission"].inputs["Color"].default_value = (colorVal, colorVal, colorVal, 1) def apply_checkerboard(): """ Applies Blender checkerboard to check UV layout of selected object """ obj = bpy.context.active_object mat = bpy.data.materials.get("uv_checker") if mat == None: mat = bpy.data.materials.new(name="uv_checker") mat.use_nodes = True xrs.material.new_image_texture("uv_checker", "uv_checker", resolution = bpy.data.scenes['Scene'].render.resolution_x) bpy.data.images["uv_checker"].source = 'GENERATED' bpy.data.images["uv_checker"].generated_type = 'COLOR_GRID' img_out = mat.node_tree.nodes['uv_checker'].outputs[0] main_bsdf = mat.node_tree.nodes['Principled BSDF'] xrs.material.link_output_to_slot_named(mat, img_out, main_bsdf, 'Base Color') if obj.data.materials: obj.data.materials[0] = mat else: obj.data.materials.append(mat) def assign_to_object(material_name, object_name, slot_index = 0): """ Assign the selected material to the given material slot """ mat = bpy.data.materials[material_name] obj = bpy.data.objects[object_name] if (len(obj.material_slots) <= slot_index): obj.data.materials.append(mat) else: obj.material_slots[slot_index].material = mat def bake_all(obj): """ Goes through and bakes out ao, diffuse, roughness, metallic, and normal maps for a selected object """ xrs.material.bake_selected_ao() xrs.log.info("AO map baked. Beginning baking diffuse map now.") xrs.material.bake_selected_diffuse() xrs.log.info("Diffuse map baked. Beginning baking roughness map now.") xrs.material.bake_selected_roughness() xrs.log.info("Roughness map baked. Beginning baking metallic map now.") xrs.material.bake_selected_metallic() xrs.log.info("Metallic map baked. Beginning baking normal map now.") xrs.material.bake_selected_normal() xrs.log.info("All maps for " + obj + " have been baked.") def bake_ao(collection): """ Bakes the AO of all the objects in a collection """ xrs.collection.join_objects_into_one(collection) bpy.ops.object.transform_apply() aoName = bpy.context.active_object.name + "_4k_ao.png" obj = bpy.context.active_object xrs.render.set_bake_render(bpy.data.scenes['Scene'].render.resolution_x) xrs.material.make_material() path = xrs.filename.get_sibling_dir("textures") + xrs.filename.get_filename() + "_4k_ao.png" bpy.data.scenes['Scene'].render.filepath = path bpy.context.active_object.location = [100, 100, 100] bpy.ops.object.bake(type="AO",filepath=path, save_mode='EXTERNAL') bpy.data.images[1].save_render(filepath=bpy.context.scene.render.filepath) bpy.context.active_object.location = [0, 0, 0] # For Testing With Macros def bake_ao(): working_dir = xrs.filename.get_parent_dir() bpy.ops.object.bake(type="AO",filepath=working_dir + "textures", save_mode='EXTERNAL') return {'FINISHED'} def bake_lighting(): """ Bakes the lighting setup onto an object """ xrs.render.set_bake_render(bpy.data.scenes['Scene'].render.resolution_x) matName = bpy.context.active_object.active_material.name imageTexName = xrs.filename.get_filename() + "_2k_diffuse" new_image_texture(matName,imageTexName,color_array=(0, 0, 0, 0), resolution = bpy.data.scenes['Scene'].render.resolution_x) bpy.context.scene.cycles.bake_type = 'COMBINED' bpy.context.active_object.select_set(True) working_dir = xrs.filename.get_parent_dir() bpy.ops.object.bake("INVOKE_DEFAULT",type="DIFFUSE",filepath=working_dir + "textures", save_mode='EXTERNAL') def bake_selected_ao(): """ Bakes the seleted AO on an object """ xrs.render.set_bake_render(bpy.data.scenes['Scene'].render.resolution_x) aoName = bpy.context.active_object.name + "_4k_ao" make_material() activeMaterialName = bpy.context.active_object.active_material.name bpy.data.objects["dimensions_cube"].hide_render = True bpy.data.collections["reference"].hide_render = True bpy.data.objects["front"].hide_render = True nodes = bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes # bsdf = get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") currentColor = 0 working_dir = xrs.filename.get_parent_dir() bpy.data.scenes['Scene'].render.filepath = working_dir + "textures/" + aoName + ".png" print("File path set") if len(bpy.context.selected_objects) == 1: bpy.data.objects[bpy.context.active_object.name].location[0] = 20 for allObj in bpy.data.objects: if allObj != bpy.context.active_object: allObj.hide_render = True for eachObject in bpy.data.collections['web'].all_objects: eachObject.select_set(True) print("Baking ambient occlusion data now.") new_image_texture_float(activeMaterialName, aoName, resolution = bpy.data.scenes['Scene'].render.resolution_x) # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # bsdf = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name] # imgTex = bpy.data.materials[activeMaterialName].node_tree.nodes[aoName].outputs[0] # link_output_to_slot_named(bpy.data.materials[activeMaterialName], imgTex, bsdf, "Base Color") bpy.context.scene.render.bake.use_selected_to_active = False bpy.ops.object.bake("INVOKE_DEFAULT", type="AO",filepath=working_dir + "textures", save_mode='EXTERNAL') print("Ambient Occlusion Bake Done.") #bpy.data.images[aoName].save_render(filepath=bpy.context.scene.render.filepath) print(aoName + " has been saved.") bpy.data.objects[bpy.context.active_object.name].location[0] = 0 return {'FINISHED'} else: print("There is more than 1 object selected.") for allObj in bpy.data.objects: allObj.hide_render = False bpy.context.scene.render.bake.use_selected_to_active = True for eachObject in bpy.data.collections['master'].all_objects: eachObject.select_set(True) for eachObject in bpy.data.collections['web'].all_objects: eachObject.select_set(True) print("Baking ambient occlusion data now.") new_image_texture_float(activeMaterialName, aoName, resolution = bpy.data.scenes['Scene'].render.resolution_x) # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # bsdf = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name] # imgTex = bpy.data.materials[activeMaterialName].node_tree.nodes[aoName].outputs[0] # link_output_to_slot_named(bpy.data.materials[activeMaterialName], imgTex, bsdf, "Base Color") bpy.ops.object.bake("INVOKE_DEFAULT",type="AO",filepath=working_dir + "textures", save_mode='EXTERNAL') print("Ambient Occlusion Bake Done.") #bpy.data.images[aoName].save_render(filepath=bpy.context.scene.render.filepath) print(aoName + " has been saved.") return {'FINISHED'} def bake_selected_diffuse(): xrs.render.set_bake_render(bpy.data.scenes['Scene'].render.resolution_x) xrs.render.disable_direct_indirect_for_bake() diffuseName = bpy.context.active_object.name + "_4k_diffuse" make_material() activeMaterialName = bpy.context.active_object.active_material.name nodes = bpy.data.materials[activeMaterialName].node_tree.nodes # bsdf = get_one_node_of_type(nodes, "BSDF_PRINCIPLED") # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # currentColor = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name].inputs[0].default_value working_dir = xrs.filename.get_parent_dir() bpy.data.scenes['Scene'].render.filepath = working_dir + "textures/" + diffuseName + ".png" print("File path set") # for anyMat in bpy.data.materials: # print(anyMat.name) # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # if anyMat.node_tree.nodes[bsdf.name].inputs[4].default_value == 1.0: # print(anyMat) # anyMat.tag = True # anyMat.node_tree.nodes[bsdf.name].inputs[4].default_value = 0 if len(bpy.context.selected_objects) == 1: for allObj in bpy.data.objects: allObj.hide_render = False bpy.context.scene.render.bake.use_selected_to_active = False print("There is 1 object selected.") new_image_texture(activeMaterialName, diffuseName, resolution = bpy.data.scenes['Scene'].render.resolution_x) # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # if check_node_link(activeMaterialName, bsdf.name, "Base Color") == False: # new_image_texture(activeMaterialName, diffuseName, currentColor) # bpy.data.materials[activeMaterialName].node_tree.nodes[diffuseName].location = (-500, 200) # bsdf = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name] # imgTex = bpy.data.materials[activeMaterialName].node_tree.nodes[diffuseName].outputs[0] # link_output_to_slot_named(bpy.data.materials[activeMaterialName], imgTex, bsdf, "Base Color") #bpy.data.images[diffuseName].save_render(filepath=bpy.context.scene.render.filepath) #print(diffuseName + " has been saved.") print("Baking diffuse data now.") # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # new_image_texture(activeMaterialName, diffuseName, currentColor) # else: # new_image_texture(activeMaterialName, diffuseName, (0, 0, 0, 0)) bpy.ops.object.bake("INVOKE_DEFAULT",type="DIFFUSE",filepath=working_dir + "textures", save_mode='EXTERNAL') print("Diffuse Bake Done.") #bpy.data.images[diffuseName].save_render(filepath=bpy.context.scene.render.filepath) print(diffuseName + " has been saved.") # for anyMat in bpy.data.materials: # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # if anyMat.tag == True: # anyMat.node_tree.nodes[bsdf.name].inputs[4].default_value = 1 # anyMat.tag == False else: print("There is more than 1 object selected.") for allObj in bpy.data.objects: allObj.hide_render = False bpy.context.scene.render.bake.use_selected_to_active = True for eachObject in bpy.data.collections['master'].all_objects: eachObject.select_set(True) for eachObject in bpy.data.collections['web'].all_objects: eachObject.select_set(True) print("Baking diffuse data now.") new_image_texture(activeMaterialName, diffuseName, resolution = bpy.data.scenes['Scene'].render.resolution_x) # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # new_image_texture(activeMaterialName, diffuseName, currentColor) # else: # new_image_texture(activeMaterialName, diffuseName, (0, 0, 0, 0)) bpy.ops.object.bake("INVOKE_DEFAULT",type="DIFFUSE",filepath=working_dir + "textures", save_mode='EXTERNAL') print("Diffuse Bake Done.") #bpy.data.images[diffuseName].save_render(filepath=bpy.context.scene.render.filepath) print(diffuseName + " has been saved.") # for anyMat in bpy.data.materials: # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # if anyMat.tag == True: # anyMat.node_tree.nodes[bsdf.name].inputs[4].default_value = 1 # anyMat.tag == False return {'FINISHED'} # Full Functionality Dependent on Synchonous Baking def bake_selected_metallic(): """ Bakes metallic map, needs materials with BSDF to work """ xrs.render.set_bake_render(bpy.data.scenes['Scene'].render.resolution_x) metallicName = bpy.context.active_object.name + "_4k_metallic" make_material() activeMaterialName = bpy.context.active_object.active_material.name nodes = bpy.data.materials[activeMaterialName].node_tree.nodes if check_if_bsdf() == False: return {'FINISHED'} bsdf = get_one_node_of_type(nodes, "BSDF_PRINCIPLED") if bsdf == None: print("Function not grabbing node correctly.") return outputNode = get_one_node_of_type(nodes, "OUTPUT_MATERIAL") # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: currentColor = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name].inputs[4].default_value bsdfMatName = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name] matOut = bpy.data.materials[activeMaterialName].node_tree.nodes[outputNode.name] working_dir = xrs.filename.get_parent_dir() bpy.data.scenes['Scene'].render.filepath = working_dir + "textures/" + metallicName + ".png" print("File path set") # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: if currentColor == 0.0 or currentColor == 1.0: print("Metallic values are either 1 or 0.") else: print("Metallic values need to be either 1 or 0.") return False if len(bpy.context.selected_objects) == 1: for allObj in bpy.data.objects: allObj.hide_render = False bpy.context.scene.render.bake.use_selected_to_active = False print("There is 1 object selected.") # new_image_texture(activeMaterialName, metallicName, (0, 0, 0, 1)) # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: if check_node_link(activeMaterialName, bsdf.name, "Metallic") == False: print("Metallic Node is False") if currentColor == 0.0: #if metallicName in bpy.data.images: #bpy.data.images[metallicName].remove #print("Duplicate image removed.") #else: #print("No duplicate images.") print("Color is 0.0 and making a new image texture now.") new_image_texture(activeMaterialName, metallicName, (0, 0, 0, 1), bpy.data.scenes['Scene'].render.resolution_x) imgTex = bpy.data.materials[activeMaterialName].node_tree.nodes[metallicName].outputs[0] link_output_to_slot_named(bpy.data.materials[activeMaterialName].node_tree.nodes[metallicName].outputs, imgTex, bsdfMatName, "Metallic") else: #if metallicName in bpy.data.images: #bpy.data.images[metallicName].remove #print("Duplicate image removed.") #else: #print("No duplicate images.") print("Color is 1.0 and making a new image texture now.") new_image_texture(activeMaterialName, metallicName, (1, 1, 1, 1), bpy.data.scenes['Scene'].render.resolution_x) imgTex = bpy.data.materials[activeMaterialName].node_tree.nodes[metallicName].outputs[0] link_output_to_slot_named(bpy.data.materials[activeMaterialName].node_tree.nodes[metallicName].outputs, imgTex, bsdfMatName, "Metallic") #bpy.data.images[metallicName].save_render(filepath=bpy.context.scene.render.filepath) return {'FINISHED'} else: print("There is a metallic connection to " + bpy.context.active_object.name +".") bpy.ops.object.bake("INVOKE_DEFAULT",type="EMIT",filepath=working_dir + "textures", save_mode='EXTERNAL') #bpy.data.images[metallicName].save_render(filepath=bpy.context.scene.render.filepath) return {'FINISHED'} else: print("There is more than 1 object selected.") for allObj in bpy.data.objects: allObj.hide_render = False bpy.context.scene.render.bake.use_selected_to_active = True for anyMat in bpy.data.materials: bsdf = get_one_node_of_type(anyMat.node_tree.nodes, "BSDF_PRINCIPLED") outputNode = get_one_node_of_type(anyMat.node_tree.nodes, "OUTPUT_MATERIAL") # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: if anyMat.node_tree.nodes[bsdf.name].inputs[4].default_value == 1.0: print("Metallic Found For " + anyMat.name) add_emission_node(anyMat.name, 1) emTex = anyMat.node_tree.nodes["Emission"].outputs[0] link_output_to_slot_named(anyMat, emTex, outputNode, "Surface") print("Metallic Linked For " + anyMat.name) else: print("Not metallic Found For " + anyMat.name) add_emission_node(anyMat.name, 0) emTex = anyMat.node_tree.nodes["Emission"].outputs[0] link_output_to_slot_named(anyMat, emTex, outputNode, "Surface") print("Not metallic Linked For " + anyMat.name) for eachObject in bpy.data.collections['web'].all_objects: eachObject.select_set(True) for eachObject in bpy.data.collections['master'].all_objects: eachObject.select_set(True) new_image_texture(activeMaterialName, metallicName, (1, 1, 1, 1), bpy.data.scenes['Scene'].render.resolution_x) print("Baking metallic data now.") bpy.ops.object.bake("INVOKE_DEFAULT",type="EMIT",filepath=working_dir + "textures", save_mode='EXTERNAL') print("Metallic Bake Done.") #bpy.data.images[metallicName].save_render(filepath=bpy.context.scene.render.filepath) # temporary fix until threading is figured out return {'FINISHED'} # for anyMat in bpy.data.materials: # bsdf = get_one_node_of_type(anyMat.node_tree.nodes, "BSDF_PRINCIPLED") # outputNode = get_one_node_of_type(anyMat.node_tree.nodes, "OUTPUT_MATERIAL") # bsdfMatName = anyMat.node_tree.nodes[bsdf.name] # link_output_to_slot_named(anyMat, bsdfMatName.outputs[0], outputNode, "Surface") # try: # anyMat.node_tree.nodes["Emission"] # outputNode = get_one_node_of_type(anyMat.node_tree.nodes, "OUTPUT_MATERIAL") # bsdfMatName = anyMat.node_tree.nodes[bsdf.name] # link_output_to_slot_named(anyMat, bsdfMatName.outputs[0], outputNode, "Surface") # except: # print("There's no emission node to unlink") # #bpy.data.images[metallicName].save_render(filepath=bpy.context.scene.render.filepath) # print(metallicName + " has been saved.") # return {'FINISHED'} def bake_selected_normal(): xrs.render.set_bake_render(bpy.data.scenes['Scene'].render.resolution_x) xrs.render.disable_direct_indirect_for_bake() regNormColor = (0.389, 0.441, 0.80, 1) normalName = bpy.context.active_object.name + "_4k_normal" xrs.material.make_material() activeMaterialName = bpy.context.active_object.active_material.name nodes = bpy.data.materials[activeMaterialName].node_tree.nodes # bsdf = get_one_node_of_type(nodes, "BSDF_PRINCIPLED") # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # currentColor = bpy.data.materials[activeMaterialName].node_tree.nodes[bsdf.name].inputs[19].default_value working_dir = xrs.filename.get_parent_dir() bpy.data.scenes['Scene'].render.filepath = working_dir + "textures/" + normalName + ".png" print("File path set") if len(bpy.context.selected_objects) == 1: for allObj in bpy.data.objects: allObj.hide_render = False bpy.context.scene.render.bake.use_selected_to_active = False print("There is 1 object selected.") # if get_one_node_of_type(bpy.data.materials[bpy.context.active_object.active_material.name].node_tree.nodes, "BSDF_PRINCIPLED") != None: # if check_node_link(activeMaterialName, bsdf.name, "Normal") == False: # new_image_texture(activeMaterialName, normalName, regNormColor) #bpy.data.materials[activeMaterialName].node_tree.nodes["Image
<reponame>ClementJ18/ModDB import re import bs4 import sys import datetime import requests from ..utils import ( BASE_URL, concat_docs, get_date, get_page, raise_for_status, join, get_views, prepare_request, ) from .base import BaseMetaClass from ..enums import FileCategory, AddonCategory, ThumbnailType, MediaCategory from ..boxes import Mirror, Thumbnail @concat_docs class File(BaseMetaClass): """An oject representing a file on ModDB, a file is something posted by the page owner which is directly linked to the page. It is endorsed by the page owner and they should do everythign they can to make sure that it is safe. As compared to an addon that may be added by fans to the page and that are files meant to work with the page but that are not directly related to the page. E.x the file of a mod page would be the mod files used to install the mod whereas an addon could be something like a fan-made texture pack for the mod or a map. Parameters ----------- html : bs4.BeautifulSoup The html to parse. Allows for finer control. Filtering ---------- category : :class:`.FileCategory` The type of file (audio, video, demo, full version....) categoryaddon : :class:`.AddonCategory` The type of addon (map, textures, ect...) game : Union[:class:`.Game`, :class:`.Object`] An game object or an object with an id attribute which represents the game the file belongs to. timeframe : :class:`.TimeFrame` The time period this was released in (last 24hr, last week, last month) Sorting -------- * **released** - when the object was released, asc is oldest, desc is most recent * **id** - when it was added to moddb, asc is oldest, desc is most recent * **ranktoday** - order by daily ranking, asc is highest ranked, desc is lowest rank * **visitstotal** - order by most views, asc is highest views, desc is lowest views * **rating** - order by rating, asc is highest rating, desc is lowest rating * **name** - order alphabetically, asc is a-z, desc is z-a * **date** - order by upload date, asc is most recent first, desc is oldest first Attributes ----------- filename : str The name of the file hash : str The MD5 hash of the file name : str The name of the page size : int the file size in bytes today : int The number of downloads today downloads : int The total number of times this file has been downloaded category : FileCategory The category of the file author : Thumbnail A member type thumbnail of the member who uploaded the file date : datetime.datetime The date the file was uploaded button : str html code for the embed button widget : str html code for the embed widget description : str Description of the file, as written by the author preview : str URL of the preview image for the file """ def __init__(self, html: bs4.BeautifulSoup): if html.find("span", string="File Deleted", class_="heading"): raise ValueError("This file has been removed") info = html.find("div", class_="table tablemenu") file = { x.string.lower(): x.parent.span.string.strip() for x in info.find_all("h5", string=("Filename", "Size", "MD5 Hash")) } self.name = html.find("a", title="Report").parent.parent.find("span", class_="heading").string self.filename = file["filename"] super().__init__(html) self.hash = file["md5 hash"] self.size = int(re.sub(r"[(),bytes]", "", file["size"].split(" ")[1])) downloads = html.find("h5", string="Downloads").parent.a.string self.today = int(re.sub(r"[(),today]", "", downloads.split(" ")[1])) self.downloads = int(downloads.split(" ")[0].replace(",", "")) try: self.category = FileCategory( int(info.find("h5", string="Category").parent.a["href"].split("=")[-1]) ) except ValueError: self.category = AddonCategory( int(info.find("h5", string="Category").parent.a["href"].split("=")[-1]) ) uploader = info.find("h5", string="Uploader").parent.a self.author = Thumbnail(url=uploader["href"], name=uploader.string, type=ThumbnailType.member) self.date = get_date(info.find("h5", string="Added").parent.span.time["datetime"]) self.button = info.find("h5", string="Embed Button").parent.span.input["value"] self.widget = info.find("h5", string="Embed Widget").parent.span.input["value"] self.description = html.find("p", id="downloadsummary").string self.preview = html.find_all("img", src=True)[0]["src"] def __repr__(self): return f"<{self.__class__.__name__} name={self.name} type={self.category.name}>" def save(self, file_obj, *, mirror=None): """Save the file to an object. This functions makes two requests. If you pass a valid mirror it will make only one request. Parameters ----------- file_obj : typing.BinaryIO The file obj to save the file to. The binary data will be streamed to that object. mirror : Optional[Mirror] An optional mirror object to download the file from a specific moddb mirror """ if mirror is None: download = get_page(f"{BASE_URL}/downloads/start/{self.id}") url = download.find("a", string=f"download {self.filename}")["href"] else: url = mirror._url SESSION = sys.modules["moddb"].SESSION prepped = prepare_request(requests.Request("GET", join(url)), SESSION) with SESSION.send(prepped, stream=True) as r: raise_for_status(r) for chunk in r.iter_content(chunk_size=8192): file_obj.write(chunk) def get_mirrors(self): """Get all the mirrors from which a file can be downloaded. This can then be passed to File.save to download from a specific mirror. Returns -------- List[Mirror] A list of Mirror objects""" html = get_page(f"https://www.moddb.com/downloads/start/{self.id}/all") mirrors_div = html.find("div", class_="mirrors").find_all("div", recursive=False) mirrors = [] for mirror in mirrors_div: mirror_match = re.match(r"(.*) #([0-9]*) \((.{2}), (.{2})\)", mirror.div.p.contents[-1].strip()) stats_match = re.match( r"([0-9,]*) downloads? served, ([0-9.]*)% capacity", mirror.div.span.string, ) mirrors.append( Mirror( name=mirror_match.group(1), index=int(mirror_match.group(2)), city=mirror_match.group(3), country=mirror_match.group(4), served=int(stats_match.group(1).replace(",", "")), capacity=float(stats_match.group(2)), url=mirror.div.p.a["href"], ) ) return mirrors @concat_docs class Addon(File): """Object representing an addon. Seemingly the only difference between an addon and a file is in the semantics. A file often represents something official released by the page, e.g. the mod installation or an official guide where as addons are often fan made and might not be directly endorsed by the page owners even if it is allowed. They literally add on to the page's content without becoming part of it. There is a slight difference in their profiles but nothing beyond that. Parameters ----------- html : bs4.BeautifulSoup The html to parse. Allows for finer control. Filtering ---------- categoryaddon : :class:`.AddonCategory` The type of addon (map, textures, ect...) licence : :class:`.Licence` The licence of the addon game : Union[:class:`.Game`, :class:`.Object`] An game object or an object with an id attribute which represents the game the addon belongs to. timeframe : :class:`.TimeFrame` The time period this was released in (last 24hr, last week, last month) Sorting -------- * **released** - when the object was released, asc is oldest, desc is most recent * **id** - when it was added to moddb, asc is oldest, desc is most recent * **ranktoday** - order by daily ranking, asc is highest ranked, desc is lowest rank * **visitstotal** - order by most views, asc is highest views, desc is lowest views * **rating** - order by rating, asc is highest rating, desc is lowest rating * **name** - order alphabetically, asc is a-z, desc is z-a * **licence** - order based on licence * **date** - order by upload date, asc is most recent first, desc is oldest first """ pass @concat_docs class Media(BaseMetaClass): """Represents an image, audio file or video file on Parameters ----------- html : bs4.BeautifulSoup The html to parse. Allows for finer control. Filtering ----------- sitearea : :class:`.Category` The type of model the media belongs to. Category.downloads is not valid for this. Sorting -------- * **ranktoday** - order by daily ranking, asc is highest ranked, desc is lowest rank * **visitstotal** - order by most views, asc is highest views, desc is lowest views * **name** - order alphabetically, asc is a-z, desc is z-a * **id** - order by upload date, asc is most recent first, desc is oldest first Exclusive to videos and audios * **duration** - order by duration, asc is shortest to longest, desc is longest first Attributes ----------- date : datetime.datetime The date the media was uploaded name : str The name of the media author : Thumbnail Member type thumbnail of the media uploader duration : datetime.timedelta Duration of the media in seconds, 0 if it's an image size : int Size of the files in bytes views : int Total amount of views today : int Amount of views today filename : str The name of the file for the media fileurl : str The url of the file for the media category : MediaCategory Whether the media is an image, a video or an audio description : str The description of the file as given by the file uploader. """ def __init__(self, html: bs4.BeautifulSoup): try: self.name = html.find("meta", itemprop="name")["content"] except TypeError: self.name = html.find("img", id="mediaimage")["title"] super().__init__(html) medias = html.find_all("h5", string=("Date", "By", "Duration",
"""The tests for Z-Wave JS device actions.""" from unittest.mock import patch import pytest import voluptuous_serialize from zwave_js_server.client import Client from zwave_js_server.const import CommandClass from zwave_js_server.model.node import Node from homeassistant.components import automation from homeassistant.components.device_automation import DeviceAutomationType from homeassistant.components.zwave_js import DOMAIN, device_action from homeassistant.components.zwave_js.helpers import get_device_id from homeassistant.config_entries import ConfigEntry from homeassistant.const import STATE_UNAVAILABLE from homeassistant.core import HomeAssistant from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import config_validation as cv, device_registry from homeassistant.setup import async_setup_component from tests.common import async_get_device_automations async def test_get_actions( hass: HomeAssistant, client: Client, lock_schlage_be469: Node, integration: ConfigEntry, ) -> None: """Test we get the expected actions from a zwave_js node.""" node = lock_schlage_be469 dev_reg = device_registry.async_get(hass) device = dev_reg.async_get_device({get_device_id(client, node)}) assert device expected_actions = [ { "domain": DOMAIN, "type": "clear_lock_usercode", "device_id": device.id, "entity_id": "lock.touchscreen_deadbolt", }, { "domain": DOMAIN, "type": "set_lock_usercode", "device_id": device.id, "entity_id": "lock.touchscreen_deadbolt", }, { "domain": DOMAIN, "type": "refresh_value", "device_id": device.id, "entity_id": "lock.touchscreen_deadbolt", }, { "domain": DOMAIN, "type": "set_value", "device_id": device.id, }, { "domain": DOMAIN, "type": "ping", "device_id": device.id, }, { "domain": DOMAIN, "type": "set_config_parameter", "device_id": device.id, "parameter": 3, "bitmask": None, "subtype": f"{node.node_id}-112-0-3 (Beeper)", }, ] actions = await async_get_device_automations( hass, DeviceAutomationType.ACTION, device.id ) for action in expected_actions: assert action in actions async def test_get_actions_meter( hass: HomeAssistant, client: Client, aeon_smart_switch_6: Node, integration: ConfigEntry, ) -> None: """Test we get the expected meter actions from a zwave_js node.""" node = aeon_smart_switch_6 dev_reg = device_registry.async_get(hass) device = dev_reg.async_get_device({get_device_id(client, node)}) assert device actions = await async_get_device_automations( hass, DeviceAutomationType.ACTION, device.id ) filtered_actions = [action for action in actions if action["type"] == "reset_meter"] assert len(filtered_actions) > 0 async def test_actions( hass: HomeAssistant, client: Client, climate_radio_thermostat_ct100_plus: Node, integration: ConfigEntry, ) -> None: """Test actions.""" node = climate_radio_thermostat_ct100_plus device_id = get_device_id(client, node) dev_reg = device_registry.async_get(hass) device = dev_reg.async_get_device({device_id}) assert device assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "event", "event_type": "test_event_refresh_value", }, "action": { "domain": DOMAIN, "type": "refresh_value", "device_id": device.id, "entity_id": "climate.z_wave_thermostat", }, }, { "trigger": { "platform": "event", "event_type": "test_event_ping", }, "action": { "domain": DOMAIN, "type": "ping", "device_id": device.id, }, }, { "trigger": { "platform": "event", "event_type": "test_event_set_value", }, "action": { "domain": DOMAIN, "type": "set_value", "device_id": device.id, "command_class": 112, "property": 1, "value": 1, }, }, { "trigger": { "platform": "event", "event_type": "test_event_set_config_parameter", }, "action": { "domain": DOMAIN, "type": "set_config_parameter", "device_id": device.id, "parameter": 1, "bitmask": None, "subtype": "2-112-0-3 (Beeper)", "value": 1, }, }, ] }, ) with patch("zwave_js_server.model.node.Node.async_poll_value") as mock_call: hass.bus.async_fire("test_event_refresh_value") await hass.async_block_till_done() mock_call.assert_called_once() args = mock_call.call_args_list[0][0] assert len(args) == 1 assert args[0].value_id == "13-64-1-mode" with patch("zwave_js_server.model.node.Node.async_ping") as mock_call: hass.bus.async_fire("test_event_ping") await hass.async_block_till_done() mock_call.assert_called_once() args = mock_call.call_args_list[0][0] assert len(args) == 0 with patch("zwave_js_server.model.node.Node.async_set_value") as mock_call: hass.bus.async_fire("test_event_set_value") await hass.async_block_till_done() mock_call.assert_called_once() args = mock_call.call_args_list[0][0] assert len(args) == 2 assert args[0] == "13-112-0-1" assert args[1] == 1 with patch( "homeassistant.components.zwave_js.services.async_set_config_parameter" ) as mock_call: hass.bus.async_fire("test_event_set_config_parameter") await hass.async_block_till_done() mock_call.assert_called_once() args = mock_call.call_args_list[0][0] assert len(args) == 3 assert args[0].node_id == 13 assert args[1] == 1 assert args[2] == 1 async def test_lock_actions( hass: HomeAssistant, client: Client, lock_schlage_be469: Node, integration: ConfigEntry, ) -> None: """Test actions for locks.""" node = lock_schlage_be469 device_id = get_device_id(client, node) dev_reg = device_registry.async_get(hass) device = dev_reg.async_get_device({device_id}) assert device assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "event", "event_type": "test_event_clear_lock_usercode", }, "action": { "domain": DOMAIN, "type": "clear_lock_usercode", "device_id": device.id, "entity_id": "lock.touchscreen_deadbolt", "code_slot": 1, }, }, { "trigger": { "platform": "event", "event_type": "test_event_set_lock_usercode", }, "action": { "domain": DOMAIN, "type": "set_lock_usercode", "device_id": device.id, "entity_id": "lock.touchscreen_deadbolt", "code_slot": 1, "usercode": "1234", }, }, ] }, ) with patch("homeassistant.components.zwave_js.lock.clear_usercode") as mock_call: hass.bus.async_fire("test_event_clear_lock_usercode") await hass.async_block_till_done() mock_call.assert_called_once() args = mock_call.call_args_list[0][0] assert len(args) == 2 assert args[0].node_id == node.node_id assert args[1] == 1 with patch("homeassistant.components.zwave_js.lock.set_usercode") as mock_call: hass.bus.async_fire("test_event_set_lock_usercode") await hass.async_block_till_done() mock_call.assert_called_once() args = mock_call.call_args_list[0][0] assert len(args) == 3 assert args[0].node_id == node.node_id assert args[1] == 1 assert args[2] == "1234" async def test_reset_meter_action( hass: HomeAssistant, client: Client, aeon_smart_switch_6: Node, integration: ConfigEntry, ) -> None: """Test reset_meter action.""" node = aeon_smart_switch_6 device_id = get_device_id(client, node) dev_reg = device_registry.async_get(hass) device = dev_reg.async_get_device({device_id}) assert device assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "event", "event_type": "test_event_reset_meter", }, "action": { "domain": DOMAIN, "type": "reset_meter", "device_id": device.id, "entity_id": "sensor.smart_switch_6_electric_consumed_kwh", }, }, ] }, ) with patch( "zwave_js_server.model.endpoint.Endpoint.async_invoke_cc_api" ) as mock_call: hass.bus.async_fire("test_event_reset_meter") await hass.async_block_till_done() mock_call.assert_called_once() args = mock_call.call_args_list[0][0] assert len(args) == 2 assert args[0] == CommandClass.METER assert args[1] == "reset" async def test_get_action_capabilities( hass: HomeAssistant, client: Client, climate_radio_thermostat_ct100_plus: Node, integration: ConfigEntry, ): """Test we get the expected action capabilities.""" node = climate_radio_thermostat_ct100_plus dev_reg = device_registry.async_get(hass) device = device_registry.async_entries_for_config_entry( dev_reg, integration.entry_id )[0] # Test refresh_value capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "type": "refresh_value", }, ) assert capabilities and "extra_fields" in capabilities assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [{"type": "boolean", "name": "refresh_all_values", "optional": True}] # Test ping capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "type": "ping", }, ) assert not capabilities # Test set_value capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "type": "set_value", }, ) assert capabilities and "extra_fields" in capabilities cc_options = [ (133, "Association"), (89, "Association Group Information"), (128, "Battery"), (129, "Clock"), (112, "Configuration"), (90, "Device Reset Locally"), (122, "Firmware Update Meta Data"), (135, "Indicator"), (114, "Manufacturer Specific"), (96, "Multi Channel"), (142, "Multi Channel Association"), (49, "Multilevel Sensor"), (115, "Powerlevel"), (68, "Thermostat Fan Mode"), (69, "Thermostat Fan State"), (64, "Thermostat Mode"), (66, "Thermostat Operating State"), (67, "Thermostat Setpoint"), (134, "Version"), (94, "Z-Wave Plus Info"), ] assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [ { "name": "command_class", "required": True, "options": cc_options, "type": "select", }, {"name": "property", "required": True, "type": "string"}, {"name": "property_key", "optional": True, "type": "string"}, {"name": "endpoint", "optional": True, "type": "string"}, {"name": "value", "required": True, "type": "string"}, {"type": "boolean", "name": "wait_for_result", "optional": True}, ] # Test enumerated type param capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "type": "set_config_parameter", "parameter": 1, "bitmask": None, "subtype": f"{node.node_id}-112-0-1 (Temperature Reporting Threshold)", }, ) assert capabilities and "extra_fields" in capabilities assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [ { "name": "value", "required": True, "options": [ (0, "Disabled"), (1, "0.5° F"), (2, "1.0° F"), (3, "1.5° F"), (4, "2.0° F"), ], "type": "select", } ] # Test range type param capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "type": "set_config_parameter", "parameter": 10, "bitmask": None, "subtype": f"{node.node_id}-112-0-10 (Temperature Reporting Filter)", }, ) assert capabilities and "extra_fields" in capabilities assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [ { "name": "value", "required": True, "type": "integer", "valueMin": 0, "valueMax": 124, } ] # Test undefined type param capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "type": "set_config_parameter", "parameter": 2, "bitmask": None, "subtype": f"{node.node_id}-112-0-2 (HVAC Settings)", }, ) assert not capabilities async def test_get_action_capabilities_lock_triggers( hass: HomeAssistant, client: Client, lock_schlage_be469: Node, integration: ConfigEntry, ): """Test we get the expected action capabilities for lock triggers.""" dev_reg = device_registry.async_get(hass) device = device_registry.async_entries_for_config_entry( dev_reg, integration.entry_id )[0] # Test clear_lock_usercode capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "entity_id": "lock.touchscreen_deadbolt", "type": "clear_lock_usercode", }, ) assert capabilities and "extra_fields" in capabilities assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [{"type": "string", "name": "code_slot", "required": True}] # Test set_lock_usercode capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "entity_id": "lock.touchscreen_deadbolt", "type": "set_lock_usercode", }, ) assert capabilities and "extra_fields" in capabilities assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [ {"type": "string", "name": "code_slot", "required": True}, {"type": "string", "name": "usercode", "required": True}, ] async def test_get_action_capabilities_meter_triggers( hass: HomeAssistant, client: Client, aeon_smart_switch_6: Node, integration: ConfigEntry, ) -> None: """Test we get the expected action capabilities for meter triggers.""" node = aeon_smart_switch_6 dev_reg = device_registry.async_get(hass) device = dev_reg.async_get_device({get_device_id(client, node)}) assert device capabilities = await device_action.async_get_action_capabilities( hass, { "platform": "device", "domain": DOMAIN, "device_id": device.id, "entity_id": "sensor.meter", "type": "reset_meter", }, ) assert capabilities and "extra_fields" in capabilities assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [{"type": "string", "name": "value", "optional": True}] async def test_failure_scenarios( hass: HomeAssistant, client: Client, hank_binary_switch: Node, integration: ConfigEntry, ): """Test failure scenarios.""" dev_reg = device_registry.async_get(hass) device = device_registry.async_entries_for_config_entry( dev_reg, integration.entry_id )[0] with pytest.raises(HomeAssistantError): await device_action.async_call_action_from_config( hass, {"type": "failed.test", "device_id": device.id}, {}, None ) assert ( await device_action.async_get_action_capabilities( hass, {"type": "failed.test",
# -*- coding: utf-8 -*- """Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2016, 2017, 2018, 2019, 2020 <NAME> <<EMAIL>> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This module contains a database of metadata on ~70000 chemicals from the PubChem datase. It contains comprehensive feature for searching the metadata. It also includes a small database of common mixture compositions. For reporting bugs, adding feature requests, or submitting pull requests, please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_. .. contents:: :local: Search Functions ---------------- .. autofunction:: chemicals.identifiers.CAS_from_any .. autofunction:: chemicals.identifiers.MW .. autofunction:: chemicals.identifiers.search_chemical .. autofunction:: chemicals.identifiers.IDs_to_CASs CAS Number Utilities -------------------- .. autofunction:: chemicals.identifiers.check_CAS .. autofunction:: chemicals.identifiers.CAS_to_int .. autofunction:: chemicals.identifiers.int_to_CAS .. autofunction:: chemicals.identifiers.sorted_CAS_key Database Objects ---------------- There is an object used to represent a chemical's metadata, an object used to represent a common mixture's composition, and an object used to hold the mixture metadata. .. autoclass:: chemicals.identifiers.ChemicalMetadata .. autoclass:: chemicals.identifiers.CommonMixtureMetadata .. autoclass:: chemicals.identifiers.ChemicalMetadataDB .. autofunction:: chemicals.identifiers.get_pubchem_db Chemical Groups --------------- It is convenient to tag some chemicals with labels like "refrigerant", or in a certain database or not. The following chemical groups are available. .. autodata:: chemicals.identifiers.cryogenics .. autodata:: chemicals.identifiers.inerts .. autofunction:: chemicals.identifiers.dippr_compounds """ from __future__ import division __all__ = ['check_CAS', 'CAS_from_any', 'MW', 'search_chemical', 'mixture_from_any', 'cryogenics', 'inerts', 'dippr_compounds', 'IDs_to_CASs', 'get_pubchem_db', 'CAS_to_int', 'sorted_CAS_key', 'int_to_CAS'] import os from io import open from chemicals.utils import mark_numba_incompatible from chemicals.utils import (PY37, source_path, os_path_join, can_load_data, to_num) from chemicals.elements import (periodic_table, homonuclear_elements, charge_from_formula, serialize_formula, simple_formula_parser) folder = os_path_join(source_path, 'Identifiers') @mark_numba_incompatible def check_CAS(CASRN): """Checks if a CAS number is valid. Returns False if the parser cannot parse the given string. Parameters ---------- CASRN : str A three-piece, dash-separated set of numbers Returns ------- result : bool Boolean value if CASRN was valid. If parsing fails, return False also. Notes ----- Check method is according to Chemical Abstract Society. However, no lookup to their service is performed; therefore, this function cannot detect false positives. Function also does not support additional separators, apart from '-'. CAS numbers up to the series 1 XXX XXX-XX-X are now being issued. A long can hold CAS numbers up to 2 147 483-64-7 Examples -------- >>> check_CAS('7732-18-5') True >>> check_CAS('77332-18-5') False """ try: check = CASRN[-1] # Don't store the int - it is not necessary and is slower productsum = 0 i = 1 for num in CASRN.replace('-', '')[:-1][::-1]: productsum += i*int(num) i += 1 return productsum % 10 == int(check) except: return False @mark_numba_incompatible def CAS_to_int(i): r'''Converts CAS number of a compounds from a string to an int. This is helpful when storing large amounts of CAS numbers, as their strings take up more memory than their numerical representational. All CAS numbers fit into 64 bit ints. Parameters ---------- CASRN : str CASRN [-] Returns ------- CASRN : int CASRN [-] Notes ----- Accomplishes conversion by removing dashes only, and then converting to an int. An incorrect CAS number will change without exception. Examples -------- >>> CAS_to_int('7704-34-9') 7704349 ''' return int(i.replace('-', '')) @mark_numba_incompatible def int_to_CAS(i): r'''Converts CAS number of a compounds from an int to an string. This is helpful when dealing with int CAS numbers. Parameters ---------- CASRN : int CASRN [-] Returns ------- CASRN : str CASRN [-] Notes ----- Handles CAS numbers with an unspecified number of digits. Does not work on floats. Examples -------- >>> int_to_CAS(7704349) '7704-34-9' ''' i = str(i) return i[:-3]+'-'+i[-3:-1]+'-'+i[-1] @mark_numba_incompatible def sorted_CAS_key(CASs): r'''Takes a list of CAS numbers as strings, and returns a tuple of the same CAS numbers, sorted from smallest to largest. This is very convenient for obtaining a unique hash of a set of compounds, so as to see if two groups of compounds are the same. Parameters ---------- CASs : list[str] CAS numbers as strings [-] Returns ------- CASs_sorted : tuple[str] Sorted CAS numbers from lowest (first) to highest (last) [-] Notes ----- Does not check CAS numbers for validity. Examples -------- >>> sorted_CAS_key(['7732-18-5', '64-17-5', '108-88-3', '98-00-0']) ('64-17-5', '98-00-0', '108-88-3', '7732-18-5') ''' int_CASs = [CAS_to_int(i) for i in CASs] return tuple(CAS for _, CAS in sorted(zip(int_CASs, CASs))) class ChemicalMetadata(object): """Class for storing metadata on chemicals. Attributes ---------- pubchemid : int Identification number on pubchem database; access their information online at https://pubchem.ncbi.nlm.nih.gov/compound/<pubchemid> [-] formula : str Formula of the compound; in the same format as :obj:`chemicals.elements.serialize_formula` generates, [-] MW : float Molecular weight of the compound as calculated with the standard atomic abundances; consistent with the element weights in :obj:`chemicals.elements.periodic_table`, [g/mol] smiles : str SMILES identification string, [-] InChI : str InChI identification string as given in pubchem (there can be multiple valid InChI strings for a compound), [-] InChI_key : str InChI key identification string (meant to be unique to a compound), [-] iupac_name : str IUPAC name as given in pubchem, [-] common_name : str Common name as given in pubchem, [-] synonyms : list[str] List of synonyms of the compound, [-] CAS : int CAS number of the compound; stored as an int for memory efficiency, [-] """ __slots__ = ('pubchemid', 'formula', 'MW', 'smiles', 'InChI', 'InChI_key', 'iupac_name', 'common_name', 'synonyms', 'CAS', '_charge') def __repr__(self): return ('<ChemicalMetadata, name=%s, formula=%s, smiles=%s, MW=%g>' %(self.common_name, self.formula, self.smiles, self.MW)) @property def charge(self): """Charge of the species as an integer. Computed as a property as most species do not have a charge and so storing it would be a waste of memory. """ try: return self._charge except AttributeError: self._charge = charge_from_formula(self.formula) return self._charge @property def CASs(self): """CAs number of the compound as a string. """ return int_to_CAS(self.CAS) def __init__(self, pubchemid, CAS, formula, MW, smiles, InChI, InChI_key, iupac_name, common_name, synonyms): self.pubchemid = pubchemid self.CAS = CAS self.formula = formula self.MW = MW self.smiles = smiles self.InChI = InChI self.InChI_key = InChI_key self.iupac_name = iupac_name self.common_name = common_name self.synonyms = synonyms class ChemicalMetadataDB(object): '''Object which holds the main database of chemical metadata. .. warning:: To allow the `chemicals` to grow and improve, the details of this class may change in the future without notice! ''' loaded_main_db = False def __init__(self, elements=True, main_db=os_path_join(folder, 'chemical identifiers pubchem large.tsv'), user_dbs=[os_path_join(folder, 'chemical identifiers pubchem small.tsv'), os_path_join(folder, 'chemical identifiers example user db.tsv'), os_path_join(folder, 'Cation db.tsv'), os_path_join(folder, 'Anion db.tsv'), os_path_join(folder, 'Inorganic db.tsv')]): '''Construct the database from its parameters, loading all of the files in `user_dbs`, the periodic table, and defering loading of `main_db` as it is very large until a search doesn't find a chemical in the smaller database. ''' self.pubchem_index = {} self.smiles_index = {} self.InChI_index = {} self.InChI_key_index = {} self.name_index = {} self.CAS_index = {} self.formula_index = {} self.main_db = main_db self.user_dbs = user_dbs self.elements = elements for db in self.user_dbs: self.load(db) self.load_elements() def load_elements(self): '''Load elements into the indexes. ''' if not self.elements: return None for ele in periodic_table: CAS = int(ele.CAS.replace('-', '')) # Store as int for easier lookup synonyms = [ele.name.lower()] obj = ChemicalMetadata(pubchemid=ele.PubChem, CAS=CAS, formula=ele.symbol, MW=ele.MW, smiles=ele.smiles, InChI=ele.InChI, InChI_key=ele.InChI_key, iupac_name=ele.name.lower(), common_name=ele.name.lower(), synonyms=synonyms) if ele.InChI_key in self.InChI_key_index: if ele.number not in homonuclear_elements: obj_old = self.InChI_key_index[ele.InChI_key] for name in obj_old.synonyms: self.name_index[name] = obj self.InChI_key_index[ele.InChI_key] = obj self.CAS_index[CAS] = obj self.pubchem_index[ele.PubChem] = obj self.smiles_index[ele.smiles] = obj self.InChI_index[ele.InChI] = obj if ele.number in homonuclear_elements: for name in synonyms: self.name_index['monatomic ' + name] = obj else: for name in synonyms: self.name_index[name] = obj self.formula_index[obj.formula] = obj def load(self, file_name): '''Load a particular file into
h1 qc = -area_from_left x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa) cuts.append(x1 + x) if len(cuts) == npieces - 1: return cuts segment_remaining -= needed needed = size needed -= segment_remaining return areas def func_b47aad6298b542cbb925e3c9067dde17(npieces, upper, lower): li = ui = 0 areas = [] total_area = 0 x = 0 h1 = upper[0][1] - lower[0][1] W = lower[-1][0] while x < W: lnext = lower[li + 1] unext = upper[ui + 1] if lnext[0] == unext[0]: xnext = lnext[0] h2 = unext[1] - lnext[1] li += 1 ui += 1 elif lnext[0] < upper[ui + 1][0]: xnext = lnext[0] frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0]) yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1]) h2 = yupper - lnext[1] li += 1 else: xnext = unext[0] frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0]) ylower = lower[li][1] + frac * (lnext[1] - lower[li][1]) h2 = unext[1] - ylower ui += 1 da = (xnext - x) * (h1 + h2) / 2.0 total_area += da areas.append((x, xnext, h1, h2, da, total_area)) x = xnext h1 = h2 size = total_area / npieces cuts = [] needed = size for x1, x2, h1, h2, segment_area, total_area in areas: segment_remaining = segment_area area_from_left = 0 while segment_remaining >= needed: area_from_left += needed width = x2 - x1 if h1 == h2: x = area_from_left / h1 else: qa = (h2 - h1) / (2 * width) qb = h1 qc = -area_from_left x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa) cuts.append(x1 + x) if len(cuts) == npieces - 1: return cuts segment_remaining -= needed needed = size needed -= segment_remaining return h1 def func_c53da0f69daa41eb81a9a59148f29c5f(npieces, upper, lower): li = ui = 0 areas = [] total_area = 0 x = 0 h1 = upper[0][1] - lower[0][1] W = lower[-1][0] while x < W: lnext = lower[li + 1] unext = upper[ui + 1] if lnext[0] == unext[0]: xnext = lnext[0] h2 = unext[1] - lnext[1] li += 1 ui += 1 elif lnext[0] < upper[ui + 1][0]: xnext = lnext[0] frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0]) yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1]) h2 = yupper - lnext[1] li += 1 else: xnext = unext[0] frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0]) ylower = lower[li][1] + frac * (lnext[1] - lower[li][1]) h2 = unext[1] - ylower ui += 1 da = (xnext - x) * (h1 + h2) / 2.0 total_area += da areas.append((x, xnext, h1, h2, da, total_area)) x = xnext h1 = h2 size = total_area / npieces cuts = [] needed = size for x1, x2, h1, h2, segment_area, total_area in areas: segment_remaining = segment_area area_from_left = 0 while segment_remaining >= needed: area_from_left += needed width = x2 - x1 if h1 == h2: x = area_from_left / h1 else: qa = (h2 - h1) / (2 * width) qb = h1 qc = -area_from_left x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa) cuts.append(x1 + x) if len(cuts) == npieces - 1: return cuts segment_remaining -= needed needed = size needed -= segment_remaining return lnext def func_47fad57f15494356b939066e226d5a7e(npieces, upper, lower): li = ui = 0 areas = [] total_area = 0 x = 0 h1 = upper[0][1] - lower[0][1] W = lower[-1][0] while x < W: lnext = lower[li + 1] unext = upper[ui + 1] if lnext[0] == unext[0]: xnext = lnext[0] h2 = unext[1] - lnext[1] li += 1 ui += 1 elif lnext[0] < upper[ui + 1][0]: xnext = lnext[0] frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0]) yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1]) h2 = yupper - lnext[1] li += 1 else: xnext = unext[0] frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0]) ylower = lower[li][1] + frac * (lnext[1] - lower[li][1]) h2 = unext[1] - ylower ui += 1 da = (xnext - x) * (h1 + h2) / 2.0 total_area += da areas.append((x, xnext, h1, h2, da, total_area)) x = xnext h1 = h2 size = total_area / npieces cuts = [] needed = size for x1, x2, h1, h2, segment_area, total_area in areas: segment_remaining = segment_area area_from_left = 0 while segment_remaining >= needed: area_from_left += needed width = x2 - x1 if h1 == h2: x = area_from_left / h1 else: qa = (h2 - h1) / (2 * width) qb = h1 qc = -area_from_left x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa) cuts.append(x1 + x) if len(cuts) == npieces - 1: return cuts segment_remaining -= needed needed = size needed -= segment_remaining return width def func_9fce09243c8d443d8008c7ad5757c072(npieces, upper, lower): li = ui = 0 areas = [] total_area = 0 x = 0 h1 = upper[0][1] - lower[0][1] W = lower[-1][0] while x < W: lnext = lower[li + 1] unext = upper[ui + 1] if lnext[0] == unext[0]: xnext = lnext[0] h2 = unext[1] - lnext[1] li += 1 ui += 1 elif lnext[0] < upper[ui + 1][0]: xnext = lnext[0] frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0]) yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1]) h2 = yupper - lnext[1] li += 1 else: xnext = unext[0] frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0]) ylower = lower[li][1] + frac * (lnext[1] - lower[li][1]) h2 = unext[1] - ylower ui += 1 da = (xnext - x) * (h1 + h2) / 2.0 total_area += da areas.append((x, xnext, h1, h2, da, total_area)) x = xnext h1 = h2 size = total_area / npieces cuts = [] needed = size for x1, x2, h1, h2, segment_area, total_area in areas: segment_remaining = segment_area area_from_left = 0 while segment_remaining >= needed: area_from_left += needed width = x2 - x1 if h1 == h2: x = area_from_left / h1 else: qa = (h2 - h1) / (2 * width) qb = h1 qc = -area_from_left x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa) cuts.append(x1 + x) if len(cuts) == npieces - 1: return cuts segment_remaining -= needed needed = size needed -= segment_remaining return x1 def func_ea4eb3eef93f480f8dbbe06029737c98(npieces, upper, lower): li = ui = 0 areas = [] total_area = 0 x = 0 h1 = upper[0][1] - lower[0][1] W = lower[-1][0] while x < W: lnext = lower[li + 1] unext = upper[ui + 1] if lnext[0] == unext[0]: xnext = lnext[0] h2 = unext[1] - lnext[1] li += 1 ui += 1 elif lnext[0] < upper[ui + 1][0]: xnext = lnext[0] frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0]) yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1]) h2 = yupper - lnext[1] li += 1 else: xnext = unext[0] frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0]) ylower = lower[li][1] + frac * (lnext[1] - lower[li][1]) h2 = unext[1] - ylower ui += 1 da = (xnext - x) * (h1 + h2) / 2.0 total_area += da areas.append((x, xnext, h1, h2, da, total_area)) x = xnext h1 = h2 size = total_area / npieces cuts = [] needed = size for x1, x2, h1, h2, segment_area, total_area in areas: segment_remaining = segment_area area_from_left = 0 while segment_remaining >= needed: area_from_left += needed width = x2 - x1 if h1 == h2: x = area_from_left / h1 else: qa = (h2 - h1) / (2 * width) qb =
reverse zone."]) # zone discovery failures assert_failed_change_in_error_response(response[3], input_name=f"no.subzone.{parent_zone_name}", record_type="AAAA", record_data="fc00:db20:35b:7399::5", error_messages=[f'Zone Discovery Failed: zone for \"no.subzone.{parent_zone_name}\" does not exist in VinylDNS. ' f'If zone exists, then it must be connected to in VinylDNS.']) assert_failed_change_in_error_response(response[4], input_name="no.zone.at.all.", record_type="AAAA", record_data="fc00:db20:35b:7399::5", error_messages=["Zone Discovery Failed: zone for \"no.zone.at.all.\" does not exist in VinylDNS. " "If zone exists, then it must be connected to in VinylDNS."]) # context validations: duplicate name failure (always on the cname), conflicting recordsets, unauthorized error assert_failed_change_in_error_response(response[5], input_name=f"cname-duplicate.{parent_zone_name}", record_type="CNAME", record_data="test.com.", error_messages=[f"Record Name \"cname-duplicate.{parent_zone_name}\" Not Unique In Batch Change: " f"cannot have multiple \"CNAME\" records with the same name."]) assert_successful_change_in_error_response(response[6], input_name=f"cname-duplicate.{parent_zone_name}", record_type="AAAA", record_data="fc00:db20:35b:7399::5") assert_failed_change_in_error_response(response[7], input_name=existing_aaaa_fqdn, record_type="AAAA", record_data="fc00:db20:35b:7399::5", error_messages=[f"Record \"{existing_aaaa_fqdn}\" Already Exists: cannot add an existing record; " f"to update it, issue a DeleteRecordSet then an Add."]) assert_failed_change_in_error_response(response[8], input_name=existing_cname_fqdn, record_type="AAAA", record_data="fc00:db20:35b:7399::5", error_messages=[f"CNAME Conflict: CNAME record names must be unique. Existing record with name \"{existing_cname_fqdn}\" " f"and type \"CNAME\" conflicts with this record."]) assert_failed_change_in_error_response(response[9], input_name=f"user-add-unauthorized.{dummy_zone_name}", record_type="AAAA", record_data="fc00:db20:35b:7399::5", error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."]) finally: clear_recordset_list(to_delete, client) def test_aaaa_recordtype_update_delete_checks(shared_zone_test_context): """ Test all update and delete validations performed on AAAA records submitted in batch changes """ ok_client = shared_zone_test_context.ok_vinyldns_client dummy_client = shared_zone_test_context.dummy_vinyldns_client ok_zone = shared_zone_test_context.ok_zone dummy_zone = shared_zone_test_context.dummy_zone ok_zone_name = shared_zone_test_context.ok_zone["name"] dummy_zone_name = shared_zone_test_context.dummy_zone["name"] dummy_group_name = shared_zone_test_context.dummy_group["name"] rs_delete_name = generate_record_name() rs_delete_fqdn = rs_delete_name + f".{ok_zone_name}" rs_delete_ok = create_recordset(ok_zone, rs_delete_name, "AAAA", [{"address": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:5:6:7:8"}], 200) rs_update_name = generate_record_name() rs_update_fqdn = rs_update_name + f".{ok_zone_name}" rs_update_ok = create_recordset(ok_zone, rs_update_name, "AAAA", [{"address": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"}], 200) rs_delete_dummy_name = generate_record_name() rs_delete_dummy_fqdn = rs_delete_dummy_name + f".{dummy_zone_name}" rs_delete_dummy = create_recordset(dummy_zone, rs_delete_dummy_name, "AAAA", [{"address": "fc00:db20:35b:7399::5"}], 200) rs_update_dummy_name = generate_record_name() rs_update_dummy_fqdn = rs_update_dummy_name + f".{dummy_zone_name}" rs_update_dummy = create_recordset(dummy_zone, rs_update_dummy_name, "AAAA", [{"address": "fc00:e968:6179::de52:7100"}], 200) batch_change_input = { "comments": "this is optional", "changes": [ # valid changes get_change_A_AAAA_json(rs_delete_fqdn, record_type="AAAA", change_type="DeleteRecordSet", address="fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:5:6:7:8"), get_change_A_AAAA_json(rs_update_fqdn, record_type="AAAA", ttl=300, address="fc00:e968:6179::de52:7100"), get_change_A_AAAA_json(rs_update_fqdn, record_type="AAAA", change_type="DeleteRecordSet"), # input validations failures get_change_A_AAAA_json(f"invalid-name$.{ok_zone_name}", record_type="AAAA", change_type="DeleteRecordSet"), get_change_A_AAAA_json("reverse.zone.in-addr.arpa.", record_type="AAAA", change_type="DeleteRecordSet"), get_change_A_AAAA_json(f"bad-ttl-and-invalid-name$-update.{ok_zone_name}", record_type="AAAA", change_type="DeleteRecordSet"), get_change_A_AAAA_json(f"bad-ttl-and-invalid-name$-update.{ok_zone_name}", ttl=29, record_type="AAAA", address="fc00:e968:6179::de52:7100"), # zone discovery failure get_change_A_AAAA_json("no.zone.at.all.", record_type="AAAA", change_type="DeleteRecordSet"), # context validation failures get_change_A_AAAA_json(f"delete-nonexistent.{ok_zone_name}", record_type="AAAA", change_type="DeleteRecordSet"), get_change_A_AAAA_json(f"update-nonexistent.{ok_zone_name}", record_type="AAAA", change_type="DeleteRecordSet"), get_change_A_AAAA_json(f"update-nonexistent.{ok_zone_name}", record_type="AAAA", address="fc00:db20:35b:7399::5"), get_change_A_AAAA_json(rs_delete_dummy_fqdn, record_type="AAAA", change_type="DeleteRecordSet"), get_change_A_AAAA_json(rs_update_dummy_fqdn, record_type="AAAA", address="fc00:db20:35b:7399::5"), get_change_A_AAAA_json(rs_update_dummy_fqdn, record_type="AAAA", change_type="DeleteRecordSet") ] } to_create = [rs_delete_ok, rs_update_ok, rs_delete_dummy, rs_update_dummy] to_delete = [] try: for rs in to_create: if rs["zoneId"] == dummy_zone["id"]: create_client = dummy_client else: create_client = ok_client create_rs = create_client.create_recordset(rs, status=202) to_delete.append(create_client.wait_until_recordset_change_status(create_rs, "Complete")) # Confirm that record set doesn't already exist ok_client.get_recordset(ok_zone["id"], "delete-nonexistent", status=404) response = ok_client.create_batch_change(batch_change_input, status=400) # successful changes assert_successful_change_in_error_response(response[0], input_name=rs_delete_fqdn, record_type="AAAA", record_data=None, change_type="DeleteRecordSet") assert_successful_change_in_error_response(response[1], ttl=300, input_name=rs_update_fqdn, record_type="AAAA", record_data="fc00:e968:6179::de52:7100") assert_successful_change_in_error_response(response[2], input_name=rs_update_fqdn, record_type="AAAA", record_data=None, change_type="DeleteRecordSet") # input validations failures: invalid input name, reverse zone error, invalid ttl assert_failed_change_in_error_response(response[3], input_name=f"invalid-name$.{ok_zone_name}", record_type="AAAA", record_data=None, change_type="DeleteRecordSet", error_messages=[f'Invalid domain name: "invalid-name$.{ok_zone_name}", ' f'valid domain names must be letters, numbers, underscores, and hyphens, joined by dots, and terminated with a dot.']) assert_failed_change_in_error_response(response[4], input_name="reverse.zone.in-addr.arpa.", record_type="AAAA", record_data=None, change_type="DeleteRecordSet", error_messages=["Invalid Record Type In Reverse Zone: record with name \"reverse.zone.in-addr.arpa.\" and " "type \"AAAA\" is not allowed in a reverse zone."]) assert_failed_change_in_error_response(response[5], input_name=f"bad-ttl-and-invalid-name$-update.{ok_zone_name}", record_type="AAAA", record_data=None, change_type="DeleteRecordSet", error_messages=[f'Invalid domain name: "bad-ttl-and-invalid-name$-update.{ok_zone_name}", ' f'valid domain names must be letters, numbers, underscores, and hyphens, joined by dots, and terminated with a dot.']) assert_failed_change_in_error_response(response[6], input_name=f"bad-ttl-and-invalid-name$-update.{ok_zone_name}", ttl=29, record_type="AAAA", record_data="fc00:e968:6179::de52:7100", error_messages=['Invalid TTL: "29", must be a number between 30 and 2147483647.', f'Invalid domain name: "bad-ttl-and-invalid-name$-update.{ok_zone_name}", ' f'valid domain names must be letters, numbers, underscores, and hyphens, joined by dots, and terminated with a dot.']) # zone discovery failure assert_failed_change_in_error_response(response[7], input_name="no.zone.at.all.", record_type="AAAA", record_data=None, change_type="DeleteRecordSet", error_messages=["Zone Discovery Failed: zone for \"no.zone.at.all.\" does not exist in VinylDNS. " "If zone exists, then it must be connected to in VinylDNS."]) # context validation failures: record does not exist, not authorized assert_failed_change_in_error_response(response[8], input_name=f"delete-nonexistent.{ok_zone_name}", record_type="AAAA", record_data=None, change_type="DeleteRecordSet", error_messages=[f"Record \"delete-nonexistent.{ok_zone_name}\" Does Not Exist: cannot delete a record that does not exist."]) assert_failed_change_in_error_response(response[9], input_name=f"update-nonexistent.{ok_zone_name}", record_type="AAAA", record_data=None, change_type="DeleteRecordSet", error_messages=[f"Record \"update-nonexistent.{ok_zone_name}\" Does Not Exist: cannot delete a record that does not exist."]) assert_successful_change_in_error_response(response[10], input_name=f"update-nonexistent.{ok_zone_name}", record_type="AAAA", record_data="fc00:db20:35b:7399::5") assert_failed_change_in_error_response(response[11], input_name=rs_delete_dummy_fqdn, record_type="AAAA", record_data=None, change_type="DeleteRecordSet", error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."]) assert_failed_change_in_error_response(response[12], input_name=rs_update_dummy_fqdn, record_type="AAAA", record_data="fc00:db20:35b:7399::5", error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."]) assert_failed_change_in_error_response(response[13], input_name=rs_update_dummy_fqdn, record_type="AAAA", record_data=None, change_type="DeleteRecordSet", error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."]) finally: # Clean up updates dummy_deletes = [rs for rs in to_delete if rs["zone"]["id"] == dummy_zone["id"]] ok_deletes = [rs for rs in to_delete if rs["zone"]["id"] != dummy_zone["id"]] clear_recordset_list(dummy_deletes, dummy_client) clear_recordset_list(ok_deletes, ok_client) def test_cname_recordtype_add_checks(shared_zone_test_context): """ Test all add validations performed on CNAME records submitted in batch changes """ client = shared_zone_test_context.ok_vinyldns_client ok_zone = shared_zone_test_context.ok_zone ok_zone_name = shared_zone_test_context.ok_zone["name"] dummy_zone_name = shared_zone_test_context.dummy_zone["name"] dummy_group_name = shared_zone_test_context.dummy_group["name"] ip4_prefix = shared_zone_test_context.ip4_classless_prefix ip4_zone_name = shared_zone_test_context.classless_base_zone["name"] ip4_reverse_zone_name = shared_zone_test_context.ip4_reverse_zone["name"] parent_zone_name = shared_zone_test_context.parent_zone["name"] existing_forward_name = generate_record_name() existing_forward_fqdn = existing_forward_name + "." + shared_zone_test_context.parent_zone["name"] existing_forward = create_recordset(shared_zone_test_context.parent_zone, existing_forward_name, "A", [{"address": "1.2.3.4"}], 100) existing_reverse_fqdn = "0." + shared_zone_test_context.classless_base_zone["name"] existing_reverse = create_recordset(shared_zone_test_context.classless_base_zone, "0", "PTR", [{"ptrdname": "test.com. "}], 100) existing_cname_name = generate_record_name() existing_cname_fqdn = existing_cname_name + "." + shared_zone_test_context.parent_zone["name"] existing_cname = create_recordset(shared_zone_test_context.parent_zone, existing_cname_name, "CNAME", [{"cname": "cname.data. "}], 100) rs_a_to_cname_ok_name = generate_record_name() rs_a_to_cname_ok_fqdn = rs_a_to_cname_ok_name + f".{ok_zone_name}" rs_a_to_cname_ok = create_recordset(ok_zone, rs_a_to_cname_ok_name, "A", [{"address": "1.1.1.1"}]) rs_cname_to_A_ok_name = generate_record_name() rs_cname_to_A_ok_fqdn = rs_cname_to_A_ok_name + f".{ok_zone_name}" rs_cname_to_A_ok = create_recordset(ok_zone, rs_cname_to_A_ok_name, "CNAME", [{"cname": "test.com."}]) forward_fqdn = generate_record_name(parent_zone_name) reverse_fqdn = generate_record_name(ip4_reverse_zone_name) batch_change_input = { "changes": [ # valid change get_change_CNAME_json(forward_fqdn), get_change_CNAME_json(reverse_fqdn), # valid changes - delete and add of same record name but different type get_change_A_AAAA_json(rs_a_to_cname_ok_fqdn, change_type="DeleteRecordSet"), get_change_CNAME_json(rs_a_to_cname_ok_fqdn), get_change_A_AAAA_json(rs_cname_to_A_ok_fqdn), get_change_CNAME_json(rs_cname_to_A_ok_fqdn, change_type="DeleteRecordSet"), # input validations failures get_change_CNAME_json(f"bad-ttl-and-invalid-name$.{parent_zone_name}", ttl=29, cname="also$bad.name"), # zone discovery failure get_change_CNAME_json("no.zone.com."), # cant be apex get_change_CNAME_json(parent_zone_name), # context validation failures get_change_PTR_json(f"{ip4_prefix}.15"), get_change_CNAME_json(f"15.{ip4_zone_name}", cname="duplicate.other.type.within.batch."), get_change_CNAME_json(f"cname-duplicate.{parent_zone_name}"), get_change_CNAME_json(f"cname-duplicate.{parent_zone_name}", cname="duplicate.cname.type.within.batch."), get_change_CNAME_json(existing_forward_fqdn), get_change_CNAME_json(existing_cname_fqdn), get_change_CNAME_json(f"0.{ip4_zone_name}", cname="duplicate.in.db."), get_change_CNAME_json(f"user-add-unauthorized.{dummy_zone_name}") ] } to_create = [existing_forward, existing_reverse, existing_cname, rs_a_to_cname_ok, rs_cname_to_A_ok] to_delete = [] try: for create_json in to_create: create_result = client.create_recordset(create_json, status=202) to_delete.append(client.wait_until_recordset_change_status(create_result, "Complete")) response = client.create_batch_change(batch_change_input, status=400) # successful changes assert_successful_change_in_error_response(response[0], input_name=forward_fqdn, record_type="CNAME", record_data="test.com.") assert_successful_change_in_error_response(response[1], input_name=reverse_fqdn, record_type="CNAME", record_data="test.com.") # successful changes - delete and add of same record name but different type assert_successful_change_in_error_response(response[2], input_name=rs_a_to_cname_ok_fqdn, change_type="DeleteRecordSet") assert_successful_change_in_error_response(response[3], input_name=rs_a_to_cname_ok_fqdn, record_type="CNAME", record_data="test.com.") assert_successful_change_in_error_response(response[4], input_name=rs_cname_to_A_ok_fqdn) assert_successful_change_in_error_response(response[5], input_name=rs_cname_to_A_ok_fqdn, record_type="CNAME", change_type="DeleteRecordSet") # ttl, domain name, data assert_failed_change_in_error_response(response[6], input_name=f"bad-ttl-and-invalid-name$.{parent_zone_name}", ttl=29, record_type="CNAME", record_data="also$bad.name.", error_messages=['Invalid TTL: "29", must be a number between 30 and 2147483647.', f'Invalid domain name: "bad-ttl-and-invalid-name$.{parent_zone_name}", ' "valid domain names must be letters, numbers, underscores, and hyphens, joined by dots, and terminated with a dot.", 'Invalid domain name: "also$bad.name.", valid domain names must be letters, numbers, underscores, and hyphens, ' "joined by dots, and terminated with a dot."]) # zone discovery failure assert_failed_change_in_error_response(response[7], input_name="no.zone.com.", record_type="CNAME", record_data="test.com.", error_messages=["Zone Discovery Failed: zone for \"no.zone.com.\" does not exist in VinylDNS. " "If zone exists, then it must be connected to in VinylDNS."]) # CNAME cant be apex assert_failed_change_in_error_response(response[8], input_name=parent_zone_name, record_type="CNAME", record_data="test.com.", error_messages=[f"CNAME cannot be the same name as zone \"{parent_zone_name}\"."]) # context validations: duplicates in batch assert_successful_change_in_error_response(response[9], input_name=f"{ip4_prefix}.15", record_type="PTR", record_data="test.com.") assert_failed_change_in_error_response(response[10], input_name=f"15.{ip4_zone_name}", record_type="CNAME", record_data="duplicate.other.type.within.batch.", error_messages=[f"Record Name \"15.{ip4_zone_name}\" Not Unique In Batch Change: " f"cannot have multiple \"CNAME\" records with the same name."]) assert_failed_change_in_error_response(response[11], input_name=f"cname-duplicate.{parent_zone_name}", record_type="CNAME", record_data="test.com.", error_messages=[f"Record Name \"cname-duplicate.{parent_zone_name}\" Not Unique In Batch Change: " f"cannot have multiple \"CNAME\" records with the same name."]) assert_failed_change_in_error_response(response[12], input_name=f"cname-duplicate.{parent_zone_name}", record_type="CNAME", record_data="duplicate.cname.type.within.batch.", error_messages=[f"Record Name \"cname-duplicate.{parent_zone_name}\" Not Unique In Batch Change: " f"cannot have multiple \"CNAME\" records with the same name."]) # context validations: existing recordsets pre-request, unauthorized, failure on duplicate add assert_failed_change_in_error_response(response[13], input_name=existing_forward_fqdn, record_type="CNAME", record_data="test.com.", error_messages=[f"CNAME Conflict: CNAME record names must be unique. " f"Existing record with name \"{existing_forward_fqdn}\" and type \"A\" conflicts with this record."]) assert_failed_change_in_error_response(response[14], input_name=existing_cname_fqdn, record_type="CNAME", record_data="test.com.", error_messages=[f"Record \"{existing_cname_fqdn}\" Already Exists: cannot add an existing record; to update it, " f"issue a DeleteRecordSet then an Add.", f"CNAME Conflict: CNAME record names must be unique. " f"Existing record with name \"{existing_cname_fqdn}\" and type \"CNAME\" conflicts with this record."]) assert_failed_change_in_error_response(response[15], input_name=existing_reverse_fqdn, record_type="CNAME", record_data="duplicate.in.db.", error_messages=["CNAME Conflict: CNAME record names must be unique. " f"Existing record with name \"{existing_reverse_fqdn}\" and type \"PTR\" conflicts with this record."]) assert_failed_change_in_error_response(response[16], input_name=f"user-add-unauthorized.{dummy_zone_name}", record_type="CNAME", record_data="test.com.", error_messages=[f"User \"ok\" is not authorized. Contact zone
from __future__ import absolute_import from __future__ import unicode_literals from corehq.apps.tzmigration.timezonemigration import is_datetime_string, FormJsonDiff, json_diff PARTIAL_DIFFS = { 'XFormInstance*': [ {'path': ('_rev',)}, # couch only {'path': ('migrating_blobs_from_couch',)}, # couch only {'path': ('#export_tag',)}, # couch only {'path': ('computed_',)}, # couch only {'path': ('state',)}, # SQL only {'path': ('computed_modified_on_',)}, # couch only {'path': ('deprecated_form_id',), 'old_value': Ellipsis, 'new_value': None}, # SQL always has this {'path': ('path',)}, # couch only {'path': ('user_id',)}, # couch only {'path': ('external_blobs',)}, # couch only {'diff_type': 'type', 'path': ('openrosa_headers', 'HTTP_X_OPENROSA_VERSION')}, {'path': ('problem',), 'old_value': Ellipsis, 'new_value': None}, {'path': ('problem',), 'old_value': '', 'new_value': None}, {'path': ('orig_id',), 'old_value': Ellipsis, 'new_value': None}, {'path': ('edited_on',), 'old_value': Ellipsis, 'new_value': None}, {'path': ('repeats',), 'new_value': Ellipsis}, # report records save in form {'path': ('form_migrated_from_undefined_xmlns',), 'new_value': Ellipsis}, {'diff_type': 'missing', 'old_value': None, 'new_value': Ellipsis}, ], 'XFormInstance': [], 'XFormInstance-Deleted': [], 'HQSubmission': [], 'XFormArchived': [], 'XFormError': [], 'XFormDuplicate': [], 'XFormDeprecated': [], 'CommCareCase*': [ {'path': ('_rev',)}, # couch only {'path': ('initial_processing_complete',)}, # couch only {'path': ('actions', '[*]')}, # ignore case actions {'path': ('id',)}, # SQL only {'path': ('@xmlns',)}, # legacy {'path': ('_attachments',)}, # couch only {'path': ('external_blobs',)}, # couch only {'path': ('#export_tag',)}, # couch only {'path': ('computed_',)}, # couch only {'path': ('version',)}, # couch only {'path': ('deleted',)}, # SQL only {'path': ('export_tag',)}, # couch only {'path': ('computed_modified_on_',)}, # couch only {'path': ('case_id',)}, # legacy {'path': ('@case_id',)}, # legacy {'path': ('case_json',)}, # SQL only {'path': ('modified_by',)}, # SQL only # legacy bug left cases with no owner_id {'diff_type': 'diff', 'path': ('owner_id',), 'old_value': ''}, {'diff_type': 'type', 'path': ('owner_id',), 'old_value': None}, {'diff_type': 'type', 'path': ('user_id',), 'old_value': None}, {'diff_type': 'type', 'path': ('opened_on',), 'old_value': None}, {'diff_type': 'type', 'path': ('opened_by',), 'old_value': Ellipsis}, # form has case block with no actions {'diff_type': 'set_mismatch', 'path': ('xform_ids', '[*]'), 'old_value': ''}, {'diff_type': 'missing', 'path': ('case_attachments',), 'old_value': Ellipsis, 'new_value': {}}, {'diff_type': 'missing', 'old_value': None, 'new_value': Ellipsis}, ], 'CommCareCase': [ # couch case was deleted and then restored - SQL case won't have deletion properties {'diff_type': 'missing', 'path': ('-deletion_id',), 'new_value': Ellipsis}, {'diff_type': 'missing', 'path': ('-deletion_date',), 'new_value': Ellipsis}, ], 'CommCareCase-Deleted': [ {'diff_type': 'missing', 'path': ('-deletion_id',), 'old_value': Ellipsis, 'new_value': None}, { 'diff_type': 'complex', 'path': ('-deletion_id', 'deletion_id'), 'old_value': Ellipsis, 'new_value': None }, {'diff_type': 'missing', 'path': ('-deletion_date',), 'old_value': Ellipsis, 'new_value': None}, ], 'CommCareCaseIndex': [ # SQL JSON has case_id field in indices which couch JSON doesn't {'path': ('indices', '[*]', 'case_id')}, # SQL indices don't have doc_type { 'diff_type': 'missing', 'path': ('indices', '[*]', 'doc_type'), 'old_value': 'CommCareCaseIndex', 'new_value': Ellipsis }, # defaulted on SQL { 'diff_type': 'missing', 'path': ('indices', '[*]', 'relationship'), 'old_value': Ellipsis, 'new_value': 'child' }, ], 'LedgerValue': [ {'path': ('_id',)}, # couch only ], 'case_attachment': [ {'path': ('doc_type',)}, # couch only {'path': ('attachment_properties',)}, # couch only {'path': ('attachment_from',)}, # couch only {'path': ('attachment_src',)}, # couch only {'path': ('content_type',)}, # couch only {'path': ('server_mime',)}, # couch only {'path': ('attachment_name',)}, # couch only {'path': ('server_md5',)}, # couch only ] } FORM_IGNORED_DIFFS = ( FormJsonDiff( diff_type='missing', path=('history', '[*]', 'doc_type'), old_value='XFormOperation', new_value=Ellipsis ), FormJsonDiff( diff_type='diff', path=('doc_type',), old_value='HQSubmission', new_value='XFormInstance' ), FormJsonDiff(diff_type='missing', path=('deleted_on',), old_value=Ellipsis, new_value=None), FormJsonDiff(diff_type='missing', path=('location_',), old_value=[], new_value=Ellipsis), FormJsonDiff(diff_type='missing', path=('form', 'case', '#text'), old_value='', new_value=Ellipsis), FormJsonDiff(diff_type='type', path=('xmlns',), old_value=None, new_value=''), FormJsonDiff(diff_type='type', path=('initial_processing_complete',), old_value=None, new_value=True), FormJsonDiff(diff_type='missing', path=('backend_id',), old_value=Ellipsis, new_value='sql'), ) CASE_IGNORED_DIFFS = ( FormJsonDiff(diff_type='type', path=('name',), old_value='', new_value=None), FormJsonDiff(diff_type='type', path=('closed_by',), old_value='', new_value=None), FormJsonDiff(diff_type='missing', path=('location_id',), old_value=Ellipsis, new_value=None), FormJsonDiff(diff_type='missing', path=('referrals',), old_value=[], new_value=Ellipsis), FormJsonDiff(diff_type='missing', path=('location_',), old_value=[], new_value=Ellipsis), FormJsonDiff(diff_type='type', path=('type',), old_value=None, new_value=''), # this happens for cases where the creation form has been archived but the case still has other forms FormJsonDiff(diff_type='type', path=('owner_id',), old_value=None, new_value=''), FormJsonDiff(diff_type='missing', path=('closed_by',), old_value=Ellipsis, new_value=None), FormJsonDiff(diff_type='type', path=('external_id',), old_value='', new_value=None), FormJsonDiff(diff_type='missing', path=('deleted_on',), old_value=Ellipsis, new_value=None), FormJsonDiff(diff_type='missing', path=('backend_id',), old_value=Ellipsis, new_value='sql'), ) RENAMED_FIELDS = { 'XFormInstance': [('uid', 'instanceID')], 'XFormDeprecated': [('deprecated_date', 'edited_on')], 'XFormInstance-Deleted': [('-deletion_id', 'deletion_id'), ('-deletion_date', 'deleted_on')], 'CommCareCase': [('@user_id', 'user_id'), ('@date_modified', 'modified_on')], 'CommCareCase-Deleted': [('-deletion_id', 'deletion_id'), ('-deletion_date', 'deleted_on')], 'case_attachment': [('attachment_size', 'content_length'), ('identifier', 'name')], } def filter_form_diffs(couch_form, sql_form, diffs): doc_type = couch_form['doc_type'] filtered = _filter_exact_matches(diffs, FORM_IGNORED_DIFFS) partial_diffs = PARTIAL_DIFFS[doc_type] + PARTIAL_DIFFS['XFormInstance*'] filtered = _filter_partial_matches(filtered, partial_diffs) filtered = _filter_text_xmlns(filtered) filtered = _filter_date_diffs(filtered) filtered = _filter_renamed_fields(filtered, couch_form, sql_form) return filtered def _filter_text_xmlns(diffs): return [ diff for diff in diffs if not (diff.path[-1] in ('#text', '@xmlns') and diff.old_value in ('', Ellipsis)) ] def filter_case_diffs(couch_case, sql_case, diffs, forms_that_touch_cases_without_actions=None): doc_type = couch_case['doc_type'] filtered_diffs = _filter_exact_matches(diffs, CASE_IGNORED_DIFFS) partial_filters = PARTIAL_DIFFS[doc_type] + PARTIAL_DIFFS['CommCareCase*'] + PARTIAL_DIFFS['CommCareCaseIndex'] filtered_diffs = _filter_partial_matches(filtered_diffs, partial_filters) filtered_diffs = _filter_date_diffs(filtered_diffs) filtered_diffs = _filter_user_case_diffs(couch_case, sql_case, filtered_diffs) filtered_diffs = _filter_xform_id_diffs(couch_case, sql_case, filtered_diffs) filtered_diffs = _filter_case_attachment_diffs(couch_case, sql_case, filtered_diffs) filtered_diffs = _filter_case_index_diffs(couch_case, sql_case, filtered_diffs) filtered_diffs = _filter_renamed_fields(filtered_diffs, couch_case, sql_case) filtered_diffs = _filter_forms_touch_case(filtered_diffs, forms_that_touch_cases_without_actions) return filtered_diffs def _filter_forms_touch_case(diffs, forms_that_touch_cases_without_actions): """Legacy bug in case processing would not add the form ID to the list of xform_ids for the case if the case block had no actions""" if not forms_that_touch_cases_without_actions: return diffs form_id_diffs = [ diff for diff in diffs if diff.diff_type == 'set_mismatch' and diff.path[0] == ('xform_ids') ] if not len(form_id_diffs): return diffs for diff in form_id_diffs: diffs.remove(diff) form_ids = diff.new_value.split(',') diff_ids = [form_id for form_id in form_ids if form_id not in forms_that_touch_cases_without_actions] if diff_ids: diff_dict = diff._asdict() diff_dict['new_value'] = ','.join(diff_ids) diffs.append(FormJsonDiff(**diff_dict)) return diffs def filter_ledger_diffs(diffs): return _filter_partial_matches(diffs, PARTIAL_DIFFS['LedgerValue']) def _filter_exact_matches(diffs, diffs_to_ignore): filtered = [] for diff in diffs: try: if diff not in diffs_to_ignore: filtered.append(diff) except TypeError: # not all diffs support hashing, do slow comparison diff_dict = diff._asdict() if not any(diff_dict == ignore._asdict() for ignore in diffs_to_ignore): filtered.append(diff) return filtered def _filter_partial_matches(diffs, partial_diffs_to_exclude): """Filter out diffs that match a subset of attributes :type partial_diffs_to_exclude: dict([(attr, value)...]) """ def _partial_match(diff): for partial in partial_diffs_to_exclude: if all(getattr(diff, attr) == val for attr, val in partial.items()): return True return False return [ diff for diff in diffs if not _partial_match(diff) ] def _filter_renamed_fields(diffs, couch_doc, sql_doc, doc_type_override=None): doc_type = doc_type_override or couch_doc['doc_type'] if doc_type in RENAMED_FIELDS: renames = RENAMED_FIELDS[doc_type] for rename in renames: diffs = _check_renamed_fields(diffs, couch_doc, sql_doc, *rename) return diffs def _check_renamed_fields(filtered_diffs, couch_doc, sql_doc, couch_field_name, sql_field_name): from corehq.apps.tzmigration.timezonemigration import FormJsonDiff remaining_diffs = [ diff for diff in filtered_diffs if diff.path[0] != sql_field_name and diff.path[0] != couch_field_name ] if len(remaining_diffs) != len(filtered_diffs): sql_field = sql_doc.get(sql_field_name, Ellipsis) couch_field = couch_doc.get(couch_field_name, Ellipsis) if sql_field != couch_field \ and not _both_dates(couch_field, sql_field) \ and not (couch_field == Ellipsis and sql_field == ''): remaining_diffs.append(FormJsonDiff( diff_type='complex', path=(couch_field_name, sql_field_name), old_value=couch_field, new_value=sql_field )) return remaining_diffs def _both_dates(old, new): return is_datetime_string(old) and is_datetime_string(new) def _filter_date_diffs(diffs): return [ diff for diff in diffs if diff.diff_type != 'diff' or not _both_dates(diff.old_value, diff.new_value) ] def _filter_user_case_diffs(couch_case, sql_case, diffs): """SQL cases store the hq_user_id property in ``external_id`` for easier querying""" if 'hq_user_id' not in couch_case: return diffs filtered_diffs = [ diff for diff in diffs if diff.path[0] not in ('external_id', 'hq_user_id') ] hq_user_id_couch = couch_case['hq_user_id'] hq_user_id_sql = sql_case.get('external_id', Ellipsis) if hq_user_id_sql != hq_user_id_couch: filtered_diffs.append(FormJsonDiff( diff_type='complex', path=('hq_user_id', 'external_id'), old_value=hq_user_id_couch, new_value=hq_user_id_sql )) return filtered_diffs def _filter_xform_id_diffs(couch_case, sql_case, diffs): """Some couch docs have the xform ID's out of order so assume that if both docs contain the same set of xform IDs then they are the same""" remaining_diffs = [ diff for diff in diffs if diff.path != ('xform_ids', '[*]') ] if len(remaining_diffs) == len(diffs): return diffs ids_in_couch = set(couch_case['xform_ids']) ids_in_sql = set(sql_case['xform_ids']) if ids_in_couch ^ ids_in_sql: couch_only = ','.join(list(ids_in_couch - ids_in_sql)) sql_only = ','.join(list(ids_in_sql - ids_in_couch)) remaining_diffs.append( FormJsonDiff(diff_type='set_mismatch', path=('xform_ids', '[*]'), old_value=couch_only, new_value=sql_only) ) else: remaining_diffs.append( FormJsonDiff(diff_type='list_order', path=('xform_ids', '[*]'), old_value=None, new_value=None) ) return remaining_diffs def _filter_case_attachment_diffs(couch_case, sql_case, diffs): """Attachment JSON format is different between Couch and SQL""" remaining_diffs = [diff for diff in diffs if diff.path[0] != 'case_attachments'] if len(remaining_diffs) != len(diffs): couch_attachments = couch_case.get('case_attachments', {}) sql_attachments = sql_case.get('case_attachments', {}) for name, couch_att in couch_attachments.items(): sql_att = sql_attachments.get(name, Ellipsis) if sql_att == Ellipsis: remaining_diffs.append(FormJsonDiff( diff_type='missing', path=('case_attachments', name), old_value=couch_att, new_value=sql_att )) else: att_diffs = json_diff(couch_att, sql_att) filtered = _filter_partial_matches(att_diffs, PARTIAL_DIFFS['case_attachment']) filtered = _filter_renamed_fields(filtered, couch_att, sql_att, 'case_attachment') for diff in filtered: diff_dict = diff._asdict() # convert the path back to what it should be diff_dict['path'] = tuple(['case_attachments', name] + list(diff.path)) remaining_diffs.append(FormJsonDiff(**diff_dict)) return remaining_diffs def _filter_case_index_diffs(couch_case, sql_case, diffs): """Indices may be in different order - re-sort and compare again. """ if 'indices' not in couch_case: return diffs remaining_diffs = [diff for diff in diffs if diff.path[0] != 'indices'] if len(remaining_diffs) == len(diffs): return diffs couch_indices = couch_case['indices'] sql_indices = sql_case['indices'] if len(couch_indices)
<reponame>mdkrol/dask-geomodeling """ Module containing raster blocks that combine rasters. """ import itertools from datetime import timedelta as Timedelta import numpy as np from dask_geomodeling.utils import get_dtype_max, get_index, GeoTransform from .base import RasterBlock __all__ = ["Group"] def filter_none(lst): return [x for x in lst if x is not None] class BaseCombine(RasterBlock): """ Base block that combines rasters into a larger one. The ancestor stores are kept in ``self.args``. Attributes are greedy: ``period`` is the union of the ancestor periods, and ``extent`` the union of the ancestor extents. The ``timedelta`` is propagated only if the ancestor stores have equal ``timedelta`` and if they are aligned. Rasters without data are ignored. """ def __init__(self, *args): for arg in args: if not isinstance(arg, RasterBlock): raise TypeError("'{}' object is not allowed".format(type(arg))) super(BaseCombine, self).__init__(*args) @staticmethod def get_aligned_timedelta(sources): """ Checks provided sources and returns the timedelta when all sources are aligned. Stores without data are ignored. """ timedeltas = [] periods = [] for arg in sources: timedelta, period = arg.timedelta, arg.period if period is not None and timedelta is not None: timedeltas.append(timedelta) periods.append(period) if len(timedeltas) == 0: return None elif len(timedeltas) == 1: return timedeltas[0] # multiple timedeltas: return None if not equal if not timedeltas[1:] == timedeltas[:-1]: return None else: # the periods must be spaced an integer times timedelta apart timedelta_sec = timedeltas[0].total_seconds() first, _ = periods[0] for a, _ in periods[1:]: if (first - a).total_seconds() % timedelta_sec != 0: return None return timedeltas[0] @property def timedelta(self): """ The period between timesteps in case of equidistant time. """ return self.get_aligned_timedelta(self.args) @property def period(self): """ Return the combined period datetime tuple. """ periods = filter_none([x.period for x in self.args]) if len(periods) == 0: return None elif len(periods) == 1: return periods[0] # multiple periods: return the joined period return min([p[0] for p in periods]), max([p[1] for p in periods]) @property def extent(self): """ Boundingbox of combined contents in WGS84 projection. """ extents = filter_none([x.extent for x in self.args]) if len(extents) == 0: return None elif len(extents) == 1: return extents[0] # multiple extents: return the joined box x1 = min([e[0] for e in extents]) y1 = min([e[1] for e in extents]) x2 = max([e[2] for e in extents]) y2 = max([e[3] for e in extents]) return x1, y1, x2, y2 @property def dtype(self): return np.result_type(*self.args) @property def fillvalue(self): return get_dtype_max(self.dtype) @property def geometry(self): """Combined geometry in the projection of the first store geometry. """ geometries = filter_none([x.geometry for x in self.args]) if len(geometries) == 0: return elif len(geometries) == 1: return geometries[0] result = geometries[0] sr = result.GetSpatialReference() for geometry in geometries[1:]: if not geometry.GetSpatialReference().IsSame(sr): geometry = geometry.Clone() geometry.TransformTo(sr) result = result.Union(geometry) return result @property def projection(self): """Projection of the data if they match, else None""" projection = self.args[0].projection if projection is None: return for arg in self.args[1:]: if projection != arg.projection: return return projection @property def geo_transform(self): geo_transform = self.args[0].geo_transform if geo_transform is None: return geo_transform = GeoTransform(geo_transform) for arg in self.args[1:]: other = arg.geo_transform if other is None or not geo_transform.aligns_with(other): return return geo_transform class Group(BaseCombine): """ Combine multiple rasters into a single one. :param args: multiple RasterBlocks to be combined. :type args: list of RasterBlock Values at equal timesteps in the contributing rasters are pasted left to right. Therefore values from rasters that are more 'to the left' are shadowed by values from rasters more 'to the right'. However, 'no data' values are transparent and do not shadow underlying data values. In the case of aligned equidistant time characteristics, the procedure will use slicing in the processing of the result for optimum performance. """ def get_stores(self, start, stop): """ Return all relevant stores for given start and stop. """ # check stores and select those stores that contain data stores = [s for s in self.args if s.period is not None] # there could be no store with data at all if not stores: return stores # convenience starts, stops = zip(*(s.period for s in stores)) # pick latest store(s) in time if start is None: last = max(stops) return [s for b, s in zip(stops, stores) if b == last] if stop is None: # return any stores that contain start zipped = zip(starts, stops, stores) result = [s for a, b, s in zipped if a <= start <= b] if result: return result # no store contained start, return closest stores closest = min(starts + stops, key=lambda d: abs(d - start)) zipped = zip(stops + starts, stores + stores) return [s for d, s in zipped if d == closest] # start and stop given, return all relevant stores zipped = zip(starts, stops, stores) return [s for a, b, s in zipped if not (stop < a or start > b)] def get_sources_and_requests(self, **request): start = request.get("start", None) stop = request.get("stop", None) mode = request["mode"] sources = self.get_stores(start, stop) # just pass on the source and request if we only have one (or none) if len(sources) <= 1: requests = [(s, request) for s in sources] return [(dict(combine_mode="simple"), None)] + requests # plan for merging timedelta = self.get_aligned_timedelta(sources) mixed_time = timedelta is None or start is None or stop is None if mixed_time: # merge by time requests = [] time_requests = [] for source in sources: # add the stores and requests: requests.append((source, request)) # in case we need the time information, add time requests if mode != "time": time_request = dict(mode="time", start=start, stop=stop) time_requests.append((source, time_request)) process_kwargs = dict( combine_mode="by_time", mode=mode, start=start, stop=stop ) # note that time_requests is empty if mode is 'time' requests = requests + time_requests else: # merge by bands td_sec = timedelta.total_seconds() period = self.period origin = sources[0].period[0] # any will do; they are aligned if start <= period[0]: start = period[0] else: # ceil start to the closest integer timedelta start_delta = (origin - start).total_seconds() % td_sec start += Timedelta(seconds=start_delta) if stop >= period[1]: stop = period[1] else: # floor stop to the closest integer timedelta stop_delta = (stop - origin).total_seconds() % td_sec stop -= Timedelta(seconds=stop_delta) if mode == "time": return [ ( dict( combine_mode="by_bands", mode=mode, start=start, stop=stop, timedelta=timedelta, ), None, ) ] requests = [] bands = [] for source in sources: # compute 'bands': the index ranges into the result array this_start = max(start, source.period[0]) this_stop = min(stop, source.period[1]) first_i = int((this_start - start).total_seconds() // td_sec) last_i = int((this_stop - start).total_seconds() // td_sec) bands.append((first_i, last_i + 1)) this_request = request.copy() this_request.update(start=this_start, stop=this_stop) requests.append((source, this_request)) process_kwargs = dict(combine_mode="by_bands", mode=mode, bands=bands) # in case of a 'vals' request, keep track of the dtype if mode == "vals": process_kwargs["dtype"] = self.dtype return [(process_kwargs, None)] + requests @staticmethod def _unique_times(multi): times = filter_none([data.get("time", None) for data in multi]) return sorted(set(itertools.chain(*times))) @staticmethod def _nearest_index(time, start): if start is None: # last band return len(time) - 1 else: # nearest band return min(enumerate(time), key=lambda d: abs(d[1] - start))[0] @staticmethod def _merge_vals_by_time(multi, times, kwargs): """ Merge chunks using indices. """ # determine the unique times and assign result bands sorted_times = Group._unique_times(times) bands = dict((y, x) for x, y in enumerate(sorted_times)) fillvalue = get_dtype_max(kwargs["dtype"]) # initialize values array shape = (len(sorted_times),) + multi[0]["values"].shape[1:] values = np.full(shape, fillvalue, dtype=kwargs["dtype"]) # populate values array for data, time in zip(multi, times): # source_index is the index into the source array for source_index, datetime in enumerate(time["time"]): source_band = data["values"][source_index] # determine data index index = get_index( values=source_band, no_data_value=data["no_data_value"] ) # find corresponding target band by source datetime target_band = values[bands[datetime]] # paste source into target provided there is data target_band[index] = source_band[index] # check if single band result required start, stop = kwargs["start"], kwargs["stop"] if stop is None and len(sorted_times) > 1: index = Group._nearest_index(sorted_times, start) values = values[index : index + 1] return {"values": values, "no_data_value": fillvalue} @staticmethod def _merge_meta_by_time(multi, times, kwargs): # determine the unique times and assign result bands sorted_times
import struct from .base import STATUSCODES from .state import State from .utils import parse_number from .base import connection_timeout class BaseCommand: """ Represents one command. A command needs a prefix and `command` (meaning the second byte to identify the command). The `command_dict` is the configuration object from the json describing hwo the command behaves. `main` is for printing responses. The interesting function is `get_command_bytes`, which, given some verbose arguments, creates the actual bytes to send to the server. `get_timeout` specifies the timeout needed for this command. Subclasses can overwrite this, if they know, that the command may take some time. `format_for_help` returnes a printable representation of the command giving a help text and explainations for the arguments. """ def __init__(self, prefix, command, command_dict, main): self.main = main # Parse the prefix and command to numbers. prefix = parse_number(prefix) command = parse_number(command) # Make them the first two bytes. self.command_bytes = self.value_to_bytes(prefix, 'u8') self.command_bytes += self.value_to_bytes(command, 'u8') self.command_name = command_dict['command'] # get the arguments for the command. self.args = command_dict.get('args', []) def parse_args(self, args): """ Parses the args given and return the corresponding bytes. The arguments are splitted via `.split()`, meaning, that they are divided by ay whitespace (refer to the python-docs for more information). Every argument is tried to match to the argument description in `self.args`. If the argumetn fits the description and could be parsed, the resulting bytes for the command to send is build. All in all, all bytes from all arguments are concatinated and returned. """ args = [a.strip() for a in args.split()] # Check, if the right mount of arguments are given. if len(args) != len(self.args): raise ValueError('Expected {} arguments, {} given.\n{}'.format( len(self.args), len(args), self.format_for_help())) # Build up the arugment bytes argument_bytes = b'' # iterate over all args. `i` is the index, `arg` the decription and `arg_input` the string from the user. for i, (arg, arg_input) in enumerate(zip(self.args, args)): # Some information about the arugment. arg_value_limit, arg_type = self.get_value_limit_and_type(arg) # check for different argument types: range, in, ... if arg_value_limit == 'range': # We must have a number. Parse it and check, if it lays in the specified range. value = parse_number(arg_input) if value < arg['range']['from'] or value > arg['range']['to']: raise ValueError('Argument {} (value {}) is out of range.\n{}'.format( i+1, value, self.format_for_help())) elif arg_value_limit == 'in': # The argument must be one of `arg['in']` if arg_input not in arg['in']: raise ValueError('Argument {} (value {}) is not in the given set.\n{}'.format( i+1, arg_input, self.format_for_help())) value = arg['in'][arg_input] elif arg_value_limit == 'number': # A simple number. Parse the value. value = parse_number(arg_input) # `value` is a number. Check for overflows, by the argument type. signed, bits = self.get_signed_and_bits(arg_type) if ((signed and (value < -pow(2, bits-1) or value > pow(2, bits-1)+1)) or not signed and (value < 0 or value > pow(2, bits)-1)): raise ValueError('Argument {} (value {}) is too large (or small) for the range of {}.\n{}'.format( i+1, value, arg_type, self.format_for_help())) # Convert the given munber to bytes respecting the argument type. argument_bytes += self.value_to_bytes(value, arg_type) return argument_bytes def format_for_help(self): """ Returns a printable representation of this command """ if len(self.args) == 0: return 'Usage: {}'.format(self.command_name) # Build up strings for each argument. args_formatted = [] for arg in self.args: arg_value_limit_str = '' # For some limits of the argument arg_value_limit, arg_type = self.get_value_limit_and_type(arg) # Range: we have limits. Format them. if arg_value_limit == 'range': arg_value_limit_str = 'Range from {} to {} (all incl.)'.format( arg['range']['from'], arg['range']['to']) # In: The "limits" are all values, the argument can be. elif arg_value_limit == 'in': arg_value_limit_str = 'Value in {' + ', '.join(arg['in'].keys()) + '}' # For a number, we need to observe the argument type. Either the number is signed or not, # and giving the amount of bytes. elif arg_value_limit == 'number': signed, bits = self.get_signed_and_bits(arg_type) if signed: arg_value_limit_str = 'signed number with size of {} byte'.format(int(bits/8)) else: arg_value_limit_str = 'unsigned number with size of {} byte'.format(int(bits/8)) # Put everything together and add ahelp text. args_formatted.append(' <{}: {}>'.format(arg.get('help', 'not documented'), arg_value_limit_str)) return 'Usage: {}\n{}'.format(self.command_name, '\n'.join(args_formatted)) def get_signed_and_bits(self, type): """ Returns the amout of bits and the signess of the argument type """ signed = (type[0] == 's') bits = int(type[1:]) return (signed, bits) def get_value_limit_and_type(self, arg): """ Returns a tuple of the type (range, in, number, ...) and size in bits: """ limits = ['range', 'in'] keys = arg.keys() for limit in limits: if limit in keys: return (limit, arg.get('type', 'u8')) return ('number', arg.get('type', 'u8')) def value_to_bytes(self, val, type): """ Converts the given `val` (type number) to bytes. The type is respected. """ # https://docs.python.org/3/library/struct.html mapping = { 's8': 'b', 'u8': 'B', 's16': 'h', 'u16': 'H', 's32': 'i', 'u32': 'I', 's64': 'q', 'u64': 'Q' } if type not in mapping: raise NotImplementedError('The type {} is not known.'.format(type)) return struct.pack('<' + mapping[type], val) def get_command_bytes(self, args): """ Returnes the byte needed to send to the server given `args` """ return self.command_bytes + self.parse_args(args) def get_timeout(self): """ Returnes the timeout. Default: connection_timeout specified in the settings.py """ return connection_timeout class BashCommand(BaseCommand): """ A special type of command, that does not interact with the server. Let every non-server command inherit from this base class. """ def __init__(self, command, main): # Some dummy values. No arguments. Ties does not mean, that this command can take arguments, # but they are not validated with `parse_args`. super().__init__(0, 0, {'command': command}, main) def get_command_bytes(self, *args, **kwargs): raise NotImplementedError('This is not allowed for a dummy command.') def handle(self, args): raise NotImplementedError('A child class should implement handle!') class QuitCommand(BashCommand): """ Dummy. Does nothing. """ def handle(self, args): pass class ListCommand(BashCommand): """ Lists all available commands. """ def handle(self, args): command_names = [] for command in self.main.command_manager.get_all_commands(): command_names.append(' ' + command.command_name) self.main.ui.print('All available commands. Type help <command> for more info.\n{}'.format( '\n'.join(command_names))) class HelpCommand(BashCommand): """ Prints help. If no argument is given, the command will print general help instructions. If some arguments are specified, the command will print help for the specified command, if found. """ def handle(self, args): if len(args) == 0: self.main.ui.print( 'Help\n- Type q or quit to exit\n' + '- type list to list all available commands\n' + '- use help <command> to see the command structure') else: command = self.main.command_manager.get_command(args) if command is None: self.main.ui.print('Help: Command "{}" not found.'.format(args)) else: self.main.ui.print(command.format_for_help()) class RemoteCommand(BaseCommand): """ Handles all commands, that have to deal with the server. This is the class, a command will be instantiated with, if there is no special command class. The response statuscode will be checked, and any error will be printed to the ui. """ def print_error(self, status): """ Prints an error message to the ui with the given status code. """ self.main.ui.print('Error: {}'.format(STATUSCODES.get( status, 'Unknown status code {}'.format(status)))) def handle_response(self, response): # get statuscode status = response[0] if status != 0: self.print_error(status) else: self.main.ui.print('OK') class MeasurementCreateCommand(RemoteCommand): """ Retrievs the create measurement id. """ def handle_response(self, response): # get status: status, = struct.unpack('<B', response[0:1]) if status != 0: self.print_error(status) else: id, = struct.unpack('<B', response[1:2]) self.main.ui.print('Created measurement with id {}.'.format(id)) class AdcUpdateStateCommand(RemoteCommand): """ Retrievs the current status and prints it to the ui. """ def handle_response(self, response): # get status: status, = struct.unpack('<B', response[0:1]) if status != 0: self.print_error(status) else: state = State(response[1:]) self.main.update_state(state) self.main.ui.print(str(state)) class Base4BytesInReturnCommand(RemoteCommand): """ This base class accepts next to the status byte 4 additional bytes. `handle_data` is called, so a inheriting class can take care about the interpretation of the 4 bytes. """ def handle_response(self, response): # get status: status, = struct.unpack('<B', response[0:1]) if status != 0: self.print_error(status) return if len(response) < 5: self.ui.print("The server didn't returned enough data") return self.handle_data(response[1:5]) def handle_data(self, data): raise NotImplementedError('A child needs to implement this function') class MeasurementOneshotCommand(Base4BytesInReturnCommand): """ Does a oneshot measurement. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.timeout = connection_timeout def get_command_bytes(self, args): """ Override to get the measurement id. Then use it to get the
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities from . import outputs from ._inputs import * __all__ = ['ActionArgs', 'Action'] @pulumi.input_type class ActionArgs: def __init__(__self__, *, code: pulumi.Input[str], supported_triggers: pulumi.Input['ActionSupportedTriggersArgs'], dependencies: Optional[pulumi.Input[Sequence[pulumi.Input['ActionDependencyArgs']]]] = None, deploy: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, runtime: Optional[pulumi.Input[str]] = None, secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ActionSecretArgs']]]] = None): """ The set of arguments for constructing a Action resource. :param pulumi.Input[str] code: The source code of the action. :param pulumi.Input['ActionSupportedTriggersArgs'] supported_triggers: List of triggers that this action supports. At this time, an action can only target a single trigger at a time :param pulumi.Input[Sequence[pulumi.Input['ActionDependencyArgs']]] dependencies: List of third party npm modules, and their versions, that this action depends on :param pulumi.Input[bool] deploy: Deploying an action will create a new immutable version of the action. If the action is currently bound to a trigger, then the system will begin executing the newly deployed version of the action immediately. Default is `false`. :param pulumi.Input[str] name: Secret name. :param pulumi.Input[str] runtime: The Node runtime. For example `node16`, defaults to `node12` :param pulumi.Input[Sequence[pulumi.Input['ActionSecretArgs']]] secrets: List of secrets that are included in an action or a version of an action """ pulumi.set(__self__, "code", code) pulumi.set(__self__, "supported_triggers", supported_triggers) if dependencies is not None: pulumi.set(__self__, "dependencies", dependencies) if deploy is not None: pulumi.set(__self__, "deploy", deploy) if name is not None: pulumi.set(__self__, "name", name) if runtime is not None: pulumi.set(__self__, "runtime", runtime) if secrets is not None: pulumi.set(__self__, "secrets", secrets) @property @pulumi.getter def code(self) -> pulumi.Input[str]: """ The source code of the action. """ return pulumi.get(self, "code") @code.setter def code(self, value: pulumi.Input[str]): pulumi.set(self, "code", value) @property @pulumi.getter(name="supportedTriggers") def supported_triggers(self) -> pulumi.Input['ActionSupportedTriggersArgs']: """ List of triggers that this action supports. At this time, an action can only target a single trigger at a time """ return pulumi.get(self, "supported_triggers") @supported_triggers.setter def supported_triggers(self, value: pulumi.Input['ActionSupportedTriggersArgs']): pulumi.set(self, "supported_triggers", value) @property @pulumi.getter def dependencies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ActionDependencyArgs']]]]: """ List of third party npm modules, and their versions, that this action depends on """ return pulumi.get(self, "dependencies") @dependencies.setter def dependencies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ActionDependencyArgs']]]]): pulumi.set(self, "dependencies", value) @property @pulumi.getter def deploy(self) -> Optional[pulumi.Input[bool]]: """ Deploying an action will create a new immutable version of the action. If the action is currently bound to a trigger, then the system will begin executing the newly deployed version of the action immediately. Default is `false`. """ return pulumi.get(self, "deploy") @deploy.setter def deploy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "deploy", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Secret name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def runtime(self) -> Optional[pulumi.Input[str]]: """ The Node runtime. For example `node16`, defaults to `node12` """ return pulumi.get(self, "runtime") @runtime.setter def runtime(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "runtime", value) @property @pulumi.getter def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ActionSecretArgs']]]]: """ List of secrets that are included in an action or a version of an action """ return pulumi.get(self, "secrets") @secrets.setter def secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ActionSecretArgs']]]]): pulumi.set(self, "secrets", value) @pulumi.input_type class _ActionState: def __init__(__self__, *, code: Optional[pulumi.Input[str]] = None, dependencies: Optional[pulumi.Input[Sequence[pulumi.Input['ActionDependencyArgs']]]] = None, deploy: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, runtime: Optional[pulumi.Input[str]] = None, secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ActionSecretArgs']]]] = None, supported_triggers: Optional[pulumi.Input['ActionSupportedTriggersArgs']] = None, version_id: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Action resources. :param pulumi.Input[str] code: The source code of the action. :param pulumi.Input[Sequence[pulumi.Input['ActionDependencyArgs']]] dependencies: List of third party npm modules, and their versions, that this action depends on :param pulumi.Input[bool] deploy: Deploying an action will create a new immutable version of the action. If the action is currently bound to a trigger, then the system will begin executing the newly deployed version of the action immediately. Default is `false`. :param pulumi.Input[str] name: Secret name. :param pulumi.Input[str] runtime: The Node runtime. For example `node16`, defaults to `node12` :param pulumi.Input[Sequence[pulumi.Input['ActionSecretArgs']]] secrets: List of secrets that are included in an action or a version of an action :param pulumi.Input['ActionSupportedTriggersArgs'] supported_triggers: List of triggers that this action supports. At this time, an action can only target a single trigger at a time :param pulumi.Input[str] version_id: Version ID of the action. This value is available if `deploy` is set to true """ if code is not None: pulumi.set(__self__, "code", code) if dependencies is not None: pulumi.set(__self__, "dependencies", dependencies) if deploy is not None: pulumi.set(__self__, "deploy", deploy) if name is not None: pulumi.set(__self__, "name", name) if runtime is not None: pulumi.set(__self__, "runtime", runtime) if secrets is not None: pulumi.set(__self__, "secrets", secrets) if supported_triggers is not None: pulumi.set(__self__, "supported_triggers", supported_triggers) if version_id is not None: pulumi.set(__self__, "version_id", version_id) @property @pulumi.getter def code(self) -> Optional[pulumi.Input[str]]: """ The source code of the action. """ return pulumi.get(self, "code") @code.setter def code(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "code", value) @property @pulumi.getter def dependencies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ActionDependencyArgs']]]]: """ List of third party npm modules, and their versions, that this action depends on """ return pulumi.get(self, "dependencies") @dependencies.setter def dependencies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ActionDependencyArgs']]]]): pulumi.set(self, "dependencies", value) @property @pulumi.getter def deploy(self) -> Optional[pulumi.Input[bool]]: """ Deploying an action will create a new immutable version of the action. If the action is currently bound to a trigger, then the system will begin executing the newly deployed version of the action immediately. Default is `false`. """ return pulumi.get(self, "deploy") @deploy.setter def deploy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "deploy", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Secret name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def runtime(self) -> Optional[pulumi.Input[str]]: """ The Node runtime. For example `node16`, defaults to `node12` """ return pulumi.get(self, "runtime") @runtime.setter def runtime(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "runtime", value) @property @pulumi.getter def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ActionSecretArgs']]]]: """ List of secrets that are included in an action or a version of an action """ return pulumi.get(self, "secrets") @secrets.setter def secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ActionSecretArgs']]]]): pulumi.set(self, "secrets", value) @property @pulumi.getter(name="supportedTriggers") def supported_triggers(self) -> Optional[pulumi.Input['ActionSupportedTriggersArgs']]: """ List of triggers that this action supports. At this time, an action can only target a single trigger at a time """ return pulumi.get(self, "supported_triggers") @supported_triggers.setter def supported_triggers(self, value: Optional[pulumi.Input['ActionSupportedTriggersArgs']]): pulumi.set(self, "supported_triggers", value) @property @pulumi.getter(name="versionId") def version_id(self) -> Optional[pulumi.Input[str]]: """ Version ID of the action. This value is available if `deploy` is set to true """ return pulumi.get(self, "version_id") @version_id.setter def version_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "version_id", value) class Action(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, code: Optional[pulumi.Input[str]] = None, dependencies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActionDependencyArgs']]]]] = None, deploy: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, runtime: Optional[pulumi.Input[str]] = None, secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActionSecretArgs']]]]] = None, supported_triggers: Optional[pulumi.Input[pulumi.InputType['ActionSupportedTriggersArgs']]] = None, __props__=None): """ Actions are secure, tenant-specific, versioned functions written in Node.js that execute at certain points during the Auth0 runtime. Actions are used to customize and extend Auth0's capabilities with custom logic. ## Example Usage ```python import pulumi import pulumi_auth0 as auth0 my_action = auth0.Action("myAction", code=\"\"\"/** * Handler that will be called during the execution of a PostLogin flow. * * @param {Event} event - Details about the user and the context in which they are logging in. * @param {PostLoginAPI} api - Interface whose methods can be used to change the behavior of the login. */ exports.onExecutePostLogin = async (event, api) => { console.log(event) }; \"\"\", deploy=True, supported_triggers=auth0.ActionSupportedTriggersArgs( id="post-login", version="v2", )) ``` ## Import An action can be imported using the action's ID, e.g. ```sh $ pulumi import auth0:index/action:Action example ... ``` ~> For security reasons importing `secrets` is not allowed. Therefore it is advised to import the action without secrets and adding them back after the action has been imported. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] code: The source code of the action. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActionDependencyArgs']]]] dependencies: List of third party npm modules, and their versions, that this action depends on :param pulumi.Input[bool] deploy: Deploying an action
<filename>hddcoin/cmds/wallet_funcs.py<gh_stars>1-10 import asyncio import sys import time from datetime import datetime from decimal import Decimal from typing import Callable, List, Optional, Tuple, Dict import aiohttp from hddcoin.cmds.units import units from hddcoin.rpc.wallet_rpc_client import WalletRpcClient from hddcoin.server.start_wallet import SERVICE_NAME from hddcoin.util.bech32m import encode_puzzle_hash from hddcoin.util.byte_types import hexstr_to_bytes from hddcoin.util.config import load_config from hddcoin.util.default_root import DEFAULT_ROOT_PATH from hddcoin.util.ints import uint16, uint64 from hddcoin.wallet.transaction_record import TransactionRecord from hddcoin.wallet.util.wallet_types import WalletType def print_transaction(tx: TransactionRecord, verbose: bool, name) -> None: if verbose: print(tx) else: hddcoin_amount = Decimal(int(tx.amount)) / units["hddcoin"] to_address = encode_puzzle_hash(tx.to_puzzle_hash, name) print(f"Transaction {tx.name}") print(f"Status: {'Confirmed' if tx.confirmed else ('In mempool' if tx.is_in_mempool() else 'Pending')}") print(f"Amount {'sent' if tx.sent else 'received'}: {hddcoin_amount} {name}") print(f"To address: {to_address}") print("Created at:", datetime.fromtimestamp(tx.created_at_time).strftime("%Y-%m-%d %H:%M:%S")) print("") async def get_transaction(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args["id"] transaction_id = hexstr_to_bytes(args["tx_id"]) config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"] tx: TransactionRecord = await wallet_client.get_transaction(wallet_id, transaction_id=transaction_id) print_transaction(tx, verbose=(args["verbose"] > 0), name=name) async def get_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args["id"] paginate = args["paginate"] if paginate is None: paginate = sys.stdout.isatty() txs: List[TransactionRecord] = await wallet_client.get_transactions(wallet_id) config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"] if len(txs) == 0: print("There are no transactions to this address") offset = args["offset"] num_per_screen = 5 if paginate else len(txs) for i in range(offset, len(txs), num_per_screen): for j in range(0, num_per_screen): if i + j >= len(txs): break print_transaction(txs[i + j], verbose=(args["verbose"] > 0), name=name) if i + num_per_screen >= len(txs): return None print("Press q to quit, or c to continue") while True: entered_key = sys.stdin.read(1) if entered_key == "q": return None elif entered_key == "c": break def check_unusual_transaction(amount: Decimal, fee: Decimal): return fee >= amount async def send(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args["id"] amount = Decimal(args["amount"]) fee = Decimal(args["fee"]) address = args["address"] override = args["override"] if not override and check_unusual_transaction(amount, fee): print( f"A transaction of amount {amount} and fee {fee} is unusual.\n" f"Pass in --override if you are sure you mean to do this." ) return print("Submitting transaction...") final_amount = uint64(int(amount * units["hddcoin"])) final_fee = uint64(int(fee * units["hddcoin"])) res = await wallet_client.send_transaction(wallet_id, final_amount, address, final_fee) tx_id = res.name start = time.time() while time.time() - start < 10: await asyncio.sleep(0.1) tx = await wallet_client.get_transaction(wallet_id, tx_id) if len(tx.sent_to) > 0: print(f"Transaction submitted to nodes: {tx.sent_to}") print(f"Do hddcoin wallet get_transaction -f {fingerprint} -tx 0x{tx_id} to get status") return None print("Transaction not yet submitted to nodes") print(f"Do 'hddcoin wallet get_transaction -f {fingerprint} -tx 0x{tx_id}' to get status") async def get_address(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args["id"] res = await wallet_client.get_next_address(wallet_id, False) print(res) async def delete_unconfirmed_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args["id"] await wallet_client.delete_unconfirmed_transactions(wallet_id) print(f"Successfully deleted all unconfirmed transactions for wallet id {wallet_id} on key {fingerprint}") def wallet_coin_unit(typ: WalletType, address_prefix: str) -> Tuple[str, int]: if typ == WalletType.COLOURED_COIN: return "", units["colouredcoin"] if typ in [WalletType.STANDARD_WALLET, WalletType.POOLING_WALLET, WalletType.MULTI_SIG, WalletType.RATE_LIMITED]: return address_prefix, units["hddcoin"] return "", units["byte"] def print_balance(amount: int, scale: int, address_prefix: str) -> str: ret = f"{amount/scale} {address_prefix} " if scale > 1: ret += f"({amount} byte)" return ret async def print_balances(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: summaries_response = await wallet_client.get_wallets() config = load_config(DEFAULT_ROOT_PATH, "config.yaml") address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"] # lazy load HODL stuff here for cleaner diff import hddcoin.hodl.exc from hddcoin.hodl.hodlrpc import HodlRpcClient hodlRpcClient = HodlRpcClient(fingerprint) try: rpcRet = await hodlRpcClient.get("getTotalHodlForWallet") hodl_balance_bytes = rpcRet["committed_bytes"] hodl_balance_hdd = Decimal(hodl_balance_bytes) / int(1e12) # emulating upstream repr for now hodl_balance_str = f"{hodl_balance_hdd} hdd ({hodl_balance_bytes} byte)" except hddcoin.hodl.exc.HodlConnectionError: hodl_balance_str = "< UNABLE TO CONNECT TO HODL SERVER >" except Exception as e: hodl_balance_str = f"ERROR: {e!r}" finally: hodlRpcClient.close() await hodlRpcClient.await_closed() print(f"Wallet height: {await wallet_client.get_height_info()}") print(f"Sync status: {'Synced' if (await wallet_client.get_synced()) else 'Not synced'}") print(f"Balances, fingerprint: {fingerprint}") print(f"HODL deposits: {hodl_balance_str}") for summary in summaries_response: wallet_id = summary["id"] balances = await wallet_client.get_wallet_balance(wallet_id) typ = WalletType(int(summary["type"])) address_prefix, scale = wallet_coin_unit(typ, address_prefix) print(f"Wallet ID {wallet_id} type {typ.name} {summary['name']}") print(f" -Total Balance: {print_balance(balances['confirmed_wallet_balance'], scale, address_prefix)}") print( f" -Pending Total Balance: {print_balance(balances['unconfirmed_wallet_balance'], scale, address_prefix)}" ) print(f" -Spendable: {print_balance(balances['spendable_balance'], scale, address_prefix)}") print(f" -Max Send Amount: {print_balance(balances['max_send_amount'], scale, address_prefix)}") async def get_wallet(wallet_client: WalletRpcClient, fingerprint: int = None) -> Optional[Tuple[WalletRpcClient, int]]: if fingerprint is not None: fingerprints = [fingerprint] else: fingerprints = await wallet_client.get_public_keys() if len(fingerprints) == 0: print("No keys loaded. Run 'hddcoin keys generate' or import a key") return None if len(fingerprints) == 1: fingerprint = fingerprints[0] if fingerprint is not None: log_in_response = await wallet_client.log_in(fingerprint) else: print("Choose wallet key:") for i, fp in enumerate(fingerprints): print(f"{i+1}) {fp}") val = None while val is None: val = input("Enter a number to pick or q to quit: ") if val == "q": return None if not val.isdigit(): val = None else: index = int(val) - 1 if index >= len(fingerprints): print("Invalid value") val = None continue else: fingerprint = fingerprints[index] assert fingerprint is not None log_in_response = await wallet_client.log_in(fingerprint) if log_in_response["success"] is False: if log_in_response["error"] == "not_initialized": use_cloud = True if "backup_path" in log_in_response: path = log_in_response["backup_path"] print(f"Backup file from backup.chia.net downloaded and written to: {path}") val = input("Do you want to use this file to restore from backup? (Y/N) ") if val.lower() == "y": log_in_response = await wallet_client.log_in_and_restore(fingerprint, path) else: use_cloud = False if "backup_path" not in log_in_response or use_cloud is False: if use_cloud is True: val = input( "No online backup file found,\n Press S to skip restore from backup" "\n Press F to use your own backup file: " ) else: val = input( "Cloud backup declined,\n Press S to skip restore from backup" "\n Press F to use your own backup file: " ) if val.lower() == "s": log_in_response = await wallet_client.log_in_and_skip(fingerprint) elif val.lower() == "f": val = input("Please provide the full path to your backup file: ") log_in_response = await wallet_client.log_in_and_restore(fingerprint, val) if "success" not in log_in_response or log_in_response["success"] is False: if "error" in log_in_response: error = log_in_response["error"] print(f"Error: {log_in_response[error]}") return None return wallet_client, fingerprint async def defrag(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: """Defragment the wallet, reducing the number of coins in it. This increases the maximum amount that can be sent in a single transaction. """ # This is currently an extremely simple algorithm. We just send the maximum possible amount to # ourselves, using the built in wallet restrictions (which are based on "reasonable" cost limits # per block). # # Successive calls to this will always result in a single coin in the wallet. from hddcoin.hodl.util import getNthWalletAddr, getPkSkFromFingerprint, loadConfig wallet_id = args["id"] fee_hdd = Decimal(args["fee"]) fee_bytes = uint64(int(fee_hdd * units["hddcoin"])) target_address = args["address"] override = args["override"] no_confirm = args["no_confirm"] if fee_hdd >= 1 and (override == False): print(f"fee of {fee_hdd} HDD seems too large (use --override to force)") return elif target_address and len(target_address) != 62: print("Address is invalid") return config = loadConfig() sk = getPkSkFromFingerprint(fingerprint)[1] if not target_address: target_address = getNthWalletAddr(config, sk, 0) else: check_count = 100 for i in range(check_count): if target_address == getNthWalletAddr(config, sk, i): break # address is confirmed as one of ours else: print("WARNING!!!\nWARNING!!!\nWARNING!!! ", end = "") print(f"The given address is not one of the first {check_count} wallet addresses!") print("WARNING!!!\nWARNING!!!") inp = input(f"Is {target_address} where you want to defrag to? [y/N] ") if not inp or inp[0].lower() == "n": print("Aborting defrag!") return # Figure out the maximum value the wallet can send at the moment balances = await wallet_client.get_wallet_balance(wallet_id) max_send_bytes = balances["max_send_amount"] spendable_bytes = balances["spendable_balance"] max_send_hdd = Decimal(max_send_bytes) / units["hddcoin"] spendable_hdd = Decimal(spendable_bytes) / units["hddcoin"] print(f"Total of spendable coins in wallet (right now): {spendable_hdd} HDD") print(f"Maximum value you can send right now (pre-defrag): {max_send_hdd} HDD") if not no_confirm: if max_send_bytes == spendable_bytes: inp = input("Your wallet is not currently limited by fragmentation! Continue? [y/N] ") else: inp = input("Do you wish to defrag and consolidate some coins? [y/N] ") if not inp or inp[0].lower() == "n": print("Aborting defrag!") return # Now do one round of defrag! defrag_coin_size_bytes = max_send_bytes - fee_bytes res = await wallet_client.send_transaction(wallet_id, defrag_coin_size_bytes, target_address, fee_bytes) tx_id = res.name start = time.time() while time.time() - start < 10:
"""AWS Higher Level Abstractions This module contains higher-level AWS abstractions to make working with AWS instances and collections of instances easier and less error-prone. :class:`EC2Instance` is responsible for maintaining information about itself and updating its state when asked to. The executer passed in must be capable of running functions that may block, ie a Greenlet or ThreadPool executor. :class:`EC2Collection` is a group of instances for a given allocation request. Collections should be passed back to the Pool when their use is no longer required. An EC2 Pool is responsible for allocating and dispersing :class:`EC2Instance's <EC2Instance>` and terminating idle instances. The :class:`EC2Pool` is responsible for tracking EC2 instances across regions, allocating them for use by the broker, and terminating excessively idle instances. It also can rebuild maps of existing instances by querying AWS for appropriate instance types. """ import concurrent.futures import time from collections import defaultdict from datetime import datetime, timedelta from typing import Dict, Optional from attr import attrib, attrs from boto.ec2 import connect_to_region from boto.ec2.instance import Instance # noqa from tornado import gen from tornado.concurrent import Future from tornado.platform.asyncio import to_tornado_future import tornado.ioloop from loadsbroker.exceptions import LoadsException from loadsbroker import logger _POPULATED = False AWS_REGIONS = ( # "ap-northeast-1", "ap-southeast-1", "ap-southeast-2", # speeding up "eu-west-1", # "sa-east-1", # this one times out "us-east-1", "us-west-1", "us-west-2" ) # Initial blank list of AMI ID's that will map a region to a dict keyed by # virtualization type of the appropriate AMI to use AWS_AMI_IDS = {k: {} for k in AWS_REGIONS} # How long after est. run times to trigger the reaper REAPER_DELTA = timedelta(hours=5) # Force the reaper for run times less than REAPER_FORCE = timedelta(hours=24) REAPER_STATE = 'ThirdState' def populate_ami_ids(aws_access_key_id=None, aws_secret_access_key=None, port=None, owner_id="595879546273", use_filters=True): """Populate all the AMI ID's with the latest CoreOS stable info. This is a longer blocking operation and should be done on startup. """ global _POPULATED # see https://github.com/boto/boto/issues/2617 if port is not None: is_secure = port == 443 else: is_secure = True # Spin up a temp thread pool to make this faster errors = [] def get_amis(region): logger.debug("Working in %s" % region) try: conn = connect_to_region( region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, port=port, is_secure=is_secure) filters = {} if owner_id is not None and use_filters: filters["owner-id"] = owner_id images = conn.get_all_images(filters=filters) # The last two highest sorted are the pvm and hvm instance id's # what is this 899.4 ??? XXX # images = sorted([x for x in images if "899.4" in x.name], # key=lambda x: x.name)[-2:] images = sorted(images, key=lambda x: x.name)[-2:] AWS_AMI_IDS[region] = {x.virtualization_type: x for x in images} logger.debug("%s populated" % region) except Exception as exc: logger.exception('Could not get all images in %s' % region) errors.append(exc) with concurrent.futures.ThreadPoolExecutor(len(AWS_REGIONS)) as pool: # Execute all regions in parallel. pool.map(get_amis, AWS_REGIONS) if len(errors) > 0: raise errors[0] _POPULATED = True def get_ami(region, instance_type): """Returns the appropriate AMI to use for a given region + instance type HVM is always used except for instance types which cannot use it. Based on matrix here: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/ .. note:: :func:`populate_ami_ids` must be called first to populate the available AMI's. """ if not _POPULATED: raise KeyError('populate_ami_ids must be called first') instances = AWS_AMI_IDS[region] inst_type = "hvm" if instance_type[:2] in ["m1", "m2", "c1", "t1"]: inst_type = "paravirtual" if inst_type not in instances: msg = "Could not find instance type %r in %s for region %s" raise KeyError(msg % (inst_type, list(instances.keys()), region)) return instances[inst_type].id def available_instance(instance): """Returns True if an instance is usable for allocation. Instances are only usable if they're running, or have been "pending" for less than 2 minutes. Instances pending more than 2 minutes are likely perpetually stalled and will be reaped. :type instance: :class:`instance.Instance` :returns: Whether the instance should be used for allocation. :rtype: bool """ if instance.state == "running": return True if instance.state == "pending": oldest = datetime.today() - timedelta(minutes=2) try: launched = datetime.strptime(instance.launch_time, '%Y-%m-%dT%H:%M:%S.%fZ') except ValueError: # Trigger by moto tests as they don't include a timezone launched = datetime.strptime(instance.launch_time, '%Y-%m-%dT%H:%M:%S') if oldest < launched: return True return False class ExtensionState: """A bare class that extensions can attach things to that will be retained on the instance.""" @attrs class EC2Instance: """EC2Instance that holds the underlying EC2.Instance object and configurable plugin state.""" instance = attrib() # type: Instance state = attrib() # type: ExtensionState class EC2Collection: """Create a collection to manage a set of instances. :type instances: list of :class:`instance.Instance` """ def __init__(self, run_id, uuid, conn, instances, io_loop=None): self.run_id = run_id self.uuid = uuid self.started = False self.finished = False self.conn = conn self.local_dns = False self._env_data = None self._command_args = None self._executer = concurrent.futures.ThreadPoolExecutor(len(instances)) self._loop = io_loop or tornado.ioloop.IOLoop.instance() self.instances = [] for inst in instances: self.instances.append(EC2Instance(inst, ExtensionState())) def debug(self, msg): logger.debug('[uuid:%s] %s' % (self.uuid, msg)) async def wait(self, seconds): """Waits for ``seconds`` before resuming.""" await gen.Task(self._loop.add_timeout, time.time() + seconds) def execute(self, func, *args, **kwargs): """Execute a blocking function, return a future that will be called in the io loop. The blocking function will receive the underlying boto EC2 instance object first, with the other args trailing. """ fut = Future() def set_fut(future): exc = future.exception() if exc: fut.set_exception(exc) else: fut.set_result(future.result()) def _throwback(fut): self._loop.add_callback(set_fut, fut) exc_fut = self._executer.submit(func, *args, **kwargs) exc_fut.add_done_callback(_throwback) return fut async def map(self, func, delay=0, *args, **kwargs): """Execute a blocking func with args/kwargs across all instances.""" futures = [] for x in self.instances: fut = self.execute(func, x, *args, **kwargs) futures.append(fut) if delay: await self.wait(delay) results = await gen.multi(futures) return results def pending_instances(self): return [i for i in self.instances if i.instance.state == "pending"] def dead_instances(self): return [i for i in self.instances if i.instance.state not in ["pending", "running"] or getattr(i.state, "nonresponsive", False)] def running_instances(self): return [i for i in self.instances if i.instance.state == "running"] async def remove_dead_instances(self): """Removes all dead instances per :meth:`dead_instances`.""" dead = self.dead_instances() if dead: self.debug("Pruning %d non-responsive instances." % len(dead)) await self.remove_instances(dead) async def wait_for_running(self, interval=5, timeout=600): """Wait for all the instances to be running. Instances unable to load will be removed.""" def update_state(inst): try: inst.instance.update() except Exception: # Updating state can fail, it happens self.debug('Failed to update instance state: %s' % inst.instance.id) return inst.instance.state end_time = time.time() + 600 pending = self.pending_instances() while time.time() < end_time and pending: self.debug('%d pending instances.' % len(pending)) # Update the state of all the pending instances await gen.multi( [self.execute(update_state, inst) for inst in pending]) pending = self.pending_instances() # Wait if there's pending to check again if pending: await self.wait(interval) # Remove everything that isn't running by now dead = self.dead_instances() + self.pending_instances() # Don't wait for the future that kills them self.debug("Removing %d dead instances that wouldn't run" % len(dead)) gen.convert_yielded(self.remove_instances(dead)) return True async def remove_instances(self, ec2_instances): """Remove an instance entirely.""" if not ec2_instances: return instances = [i.instance for i in ec2_instances] for inst in ec2_instances: self.instances.remove(inst) instance_ids = [x.id for x in instances] try: # Remove the tags await self.execute(self.conn.create_tags, instance_ids, {"RunId": "", "Uuid": ""}) except Exception: logger.debug("Error detagging instances, continuing.", exc_info=True) try: logger.debug("Terminating instances %s" % str(instance_ids)) # Nuke them await self.execute(self.conn.terminate_instances, instance_ids) except Exception: logger.debug("Error terminating instances.", exc_info=True) class EC2Pool: """Initialize a pool for instance allocation and recycling. All instances allocated using this pool will be tagged as follows: Name loads-BROKER_ID Broker BROKER_ID Instances in use by a run are tagged with the additional tags: RunId RUN_ID Uuid STEP_ID .. warning:: This instance is **NOT SAFE FOR CONCURRENT USE BY THREADS**. """ def __init__(self, broker_id, access_key=None, secret_key=None, key_pair="loads", security="loads", max_idle=600, user_data=None, io_loop=None, port=None, owner_id="595879546273", use_filters=True): self.owner_id = owner_id self.use_filters = use_filters self.broker_id = broker_id self.access_key = access_key self.secret_key = secret_key self.max_idle = max_idle self.key_pair = key_pair self.security = security self.user_data = user_data self._instances = defaultdict(list) self._tag_filters = {"tag:Name": "loads-%s*" % self.broker_id, "tag:Project": "loads"} self._conns = {} self._recovered = {} self._executor = concurrent.futures.ThreadPoolExecutor(15) self._loop = io_loop or tornado.ioloop.IOLoop.instance() self.port = port # see https://github.com/boto/boto/issues/2617 if port is not None: self.is_secure = port == 443 else: self.is_secure = True # Asynchronously initialize ourself when the pool runs self._loop.add_future( gen.convert_yielded(self.initialize()), self._initialized ) self.ready = Future() def shutdown(self): """Make sure we shutdown the executor. """ self._executor.shutdown() def _run_in_executor(self, func, *args, **kwargs): return to_tornado_future(self._executor.submit(func, *args, **kwargs))
{"inbytes": 0, "outbytes": 0}, 17: {"inbytes": 8, "outbytes": 0, "buffers": [9]}, }, 'nn::sf::hipc::detail::IHipcManager': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]}, 2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]}, 3: {"inbytes": 0, "outbytes": 2}, 4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]}, }, 'nn::wlan::detail::ISocketManager': { 0: {"inbytes": 0, "outbytes": 0, "buffers": [5]}, 1: {"inbytes": 4, "outbytes": 0}, 2: {"inbytes": 4, "outbytes": 4, "buffers": [9]}, 3: {"inbytes": 4, "outbytes": 0}, 4: {"inbytes": 8, "outbytes": 0}, 5: {"inbytes": 2, "outbytes": 4}, 6: {"inbytes": 0, "outbytes": 6}, 7: {"inbytes": 1, "outbytes": 0}, 8: {"inbytes": 0, "outbytes": 8}, 9: {"inbytes": 4, "outbytes": 0, "inhandles": [1, 1, 1, 1, 1]}, 10: {"inbytes": 0, "outbytes": 0}, 11: {"inbytes": 0, "outbytes": 0}, }, 'nn::wlan::detail::ILocalManager': { 0: {"inbytes": 0, "outbytes": 0}, 1: {"inbytes": 0, "outbytes": 0}, 2: {"inbytes": 0, "outbytes": 0}, 3: {"inbytes": 0, "outbytes": 0}, 4: {"inbytes": 0, "outbytes": 0}, 5: {"inbytes": 0, "outbytes": 0}, 6: {"inbytes": 0, "outbytes": 6}, 7: {"inbytes": 0x80, "outbytes": 0}, 8: {"inbytes": 0, "outbytes": 0}, 9: {"inbytes": 0, "outbytes": 0, "buffers": [21]}, 10: {"inbytes": 0, "outbytes": 0}, 11: {"inbytes": 0x80, "outbytes": 0}, 12: {"inbytes": 0, "outbytes": 0}, 13: {"inbytes": 0x80, "outbytes": 0}, 14: {"inbytes": 0, "outbytes": 0}, 15: {"inbytes": 0x10, "outbytes": 0}, 16: {"inbytes": 4, "outbytes": 0}, 17: {"inbytes": 4, "outbytes": 0, "outhandles": [1]}, 18: {"inbytes": 0, "outbytes": 0x3C}, 19: {"inbytes": 0, "outbytes": 0, "buffers": [22]}, 20: {"inbytes": 0, "outbytes": 0, "outhandles": [1]}, 21: {"inbytes": 0, "outbytes": 0, "buffers": [22]}, 22: {"inbytes": 0, "outbytes": 4}, 23: {"inbytes": 0, "outbytes": 0x50}, 24: {"inbytes": 4, "outbytes": 4, "buffers": [5]}, 25: {"inbytes": 4, "outbytes": 0}, 26: {"inbytes": 0, "outbytes": 0, "buffers": [5]}, 27: {"inbytes": 4, "outbytes": 0}, 28: {"inbytes": 4, "outbytes": 4, "buffers": [9]}, 29: {"inbytes": 4, "outbytes": 0}, 30: {"inbytes": 8, "outbytes": 0}, 31: {"inbytes": 2, "outbytes": 4}, 32: {"inbytes": 4, "outbytes": 0, "buffers": [25]}, 33: {"inbytes": 4, "outbytes": 0, "buffers": [25]}, 34: {"inbytes": 0, "outbytes": 0, "buffers": [25, 6]}, 35: {"inbytes": 0x10, "outbytes": 0, "buffers": [5]}, 36: {"inbytes": 0, "outbytes": 0, "buffers": [5]}, 37: {"inbytes": 0, "outbytes": 0}, 38: {"inbytes": 4, "outbytes": 4, "buffers": [9]}, 39: {"inbytes": 4, "outbytes": 0}, 40: {"inbytes": 8, "outbytes": 0}, 41: {"inbytes": 4, "outbytes": 4}, 42: {"inbytes": 4, "outbytes": 0}, 43: {"inbytes": 0, "outbytes": 4}, 44: {"inbytes": 4, "outbytes": 0}, }, 'nn::wlan::detail::ISocketGetFrame': { 0: {"inbytes": 4, "outbytes": 4, "buffers": [6]}, }, 'nn::wlan::detail::ILocalGetActionFrame': { 0: {"inbytes": 4, "outbytes": 0xC, "buffers": [6]}, }, }, 'ldn': { 'nn::sf::hipc::detail::IHipcManager': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]}, 2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]}, 3: {"inbytes": 0, "outbytes": 2}, 4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]}, }, 'nn::ldn::detail::IUserLocalCommunicationService': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 0, "outbytes": 0, "buffers": [26]}, 2: {"inbytes": 0, "outbytes": 8}, 3: {"inbytes": 0, "outbytes": 2}, 4: {"inbytes": 0, "outbytes": 0x20}, 5: {"inbytes": 0, "outbytes": 0x20}, 100: {"inbytes": 0, "outbytes": 0, "outhandles": [1]}, 101: {"inbytes": 0, "outbytes": 0, "buffers": [26, 10]}, 102: {"inbytes": 0x68, "outbytes": 2, "buffers": [34]}, 103: {"inbytes": 0x68, "outbytes": 2, "buffers": [34]}, 200: {"inbytes": 0, "outbytes": 0}, 201: {"inbytes": 0, "outbytes": 0}, 202: {"inbytes": 0x98, "outbytes": 0}, 203: {"inbytes": 0xB8, "outbytes": 0, "buffers": [9]}, 204: {"inbytes": 0, "outbytes": 0}, 205: {"inbytes": 4, "outbytes": 0}, 206: {"inbytes": 0, "outbytes": 0, "buffers": [33]}, 207: {"inbytes": 1, "outbytes": 0}, 208: {"inbytes": 6, "outbytes": 0}, 209: {"inbytes": 0, "outbytes": 0}, 300: {"inbytes": 0, "outbytes": 0}, 301: {"inbytes": 0, "outbytes": 0}, 302: {"inbytes": 0x7C, "outbytes": 0, "buffers": [25]}, 303: {"inbytes": 0xC0, "outbytes": 0}, 304: {"inbytes": 0, "outbytes": 0}, 400: {"inbytes": 8, "outbytes": 0, "pid": True}, 401: {"inbytes": 0, "outbytes": 0}, }, 'nn::ldn::detail::IUserServiceCreator': { 0: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::ldn::detail::IUserLocalCommunicationService']}, }, 'nn::ldn::detail::ISystemServiceCreator': { 0: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::ldn::detail::ISystemLocalCommunicationService']}, }, 'nn::ldn::detail::IMonitorService': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 0, "outbytes": 0, "buffers": [26]}, 2: {"inbytes": 0, "outbytes": 8}, 3: {"inbytes": 0, "outbytes": 2}, 4: {"inbytes": 0, "outbytes": 0x20}, 5: {"inbytes": 0, "outbytes": 0x20}, 100: {"inbytes": 0, "outbytes": 0}, 101: {"inbytes": 0, "outbytes": 0}, }, 'nn::ldn::detail::ISystemLocalCommunicationService': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 0, "outbytes": 0, "buffers": [26]}, 2: {"inbytes": 0, "outbytes": 8}, 3: {"inbytes": 0, "outbytes": 2}, 4: {"inbytes": 0, "outbytes": 0x20}, 5: {"inbytes": 0, "outbytes": 0x20}, 100: {"inbytes": 0, "outbytes": 0, "outhandles": [1]}, 101: {"inbytes": 0, "outbytes": 0, "buffers": [26, 10]}, 102: {"inbytes": 0x68, "outbytes": 2, "buffers": [34]}, 103: {"inbytes": 0x68, "outbytes": 2, "buffers": [34]}, 200: {"inbytes": 0, "outbytes": 0}, 201: {"inbytes": 0, "outbytes": 0}, 202: {"inbytes": 0x98, "outbytes": 0}, 203: {"inbytes": 0xB8, "outbytes": 0, "buffers": [9]}, 204: {"inbytes": 0, "outbytes": 0}, 205: {"inbytes": 4, "outbytes": 0}, 206: {"inbytes": 0, "outbytes": 0, "buffers": [33]}, 207: {"inbytes": 1, "outbytes": 0}, 208: {"inbytes": 6, "outbytes": 0}, 209: {"inbytes": 0, "outbytes": 0}, 300: {"inbytes": 0, "outbytes": 0}, 301: {"inbytes": 0, "outbytes": 0}, 302: {"inbytes": 0x7C, "outbytes": 0, "buffers": [25]}, 303: {"inbytes": 0xC0, "outbytes": 0}, 304: {"inbytes": 0, "outbytes": 0}, 400: {"inbytes": 8, "outbytes": 0, "pid": True}, 401: {"inbytes": 0, "outbytes": 0}, }, 'nn::ldn::detail::IMonitorServiceCreator': { 0: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::ldn::detail::IMonitorService']}, }, }, 'nvservices': { 'nv::gemcoredump::INvGemCoreDump': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 0, "outbytes": 0x10}, 2: {"inbytes": 0, "outbytes": 4, "buffers": [34]}, }, 'nn::sf::hipc::detail::IHipcManager': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]}, 2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]}, 3: {"inbytes": 0, "outbytes": 2}, 4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]}, }, 'nv::gemcontrol::INvGemControl': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 0, "outbytes": 4, "outhandles": [1]}, 2: {"inbytes": 1, "outbytes": 4}, 3: {"inbytes": 0x10, "outbytes": 4}, 4: {"inbytes": 0x10, "outbytes": 4}, 5: {"inbytes": 0, "outbytes": 0x10}, 6: {"inbytes": 0, "outbytes": 4}, }, 'nns::nvdrv::INvDrvDebugFSServices': { 0: {"inbytes": 0, "outbytes": 4, "inhandles": [1]}, 1: {"inbytes": 4, "outbytes": 0}, 2: {"inbytes": 4, "outbytes": 4, "buffers": [6]}, 3: {"inbytes": 4, "outbytes": 4, "buffers": [5, 6]}, 4: {"inbytes": 4, "outbytes": 4, "buffers": [5, 5]}, }, 'nns::nvdrv::INvDrvServices': { 0: {"inbytes": 0, "outbytes": 8, "buffers": [5]}, 1: {"inbytes": 8, "outbytes": 4, "buffers": [33, 34]}, 2: {"inbytes": 4, "outbytes": 4}, 3: {"inbytes": 4, "outbytes": 4, "inhandles": [1, 1]}, 4: {"inbytes": 8, "outbytes": 4, "outhandles": [1]}, 5: {"inbytes": 8, "outbytes": 4, "inhandles": [1]}, 6: {"inbytes": 0, "outbytes": 0x24}, 7: {"inbytes": 8, "outbytes": 4}, 8: {"inbytes": 8, "outbytes": 4, "pid": True}, 9: {"inbytes": 0, "outbytes": 0}, }, }, 'pcv': { 'nn::sf::hipc::detail::IHipcManager': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]}, 2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]}, 3: {"inbytes": 0, "outbytes": 2}, 4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]}, }, 'nn::timesrv::detail::service::IStaticService': { 0: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::timesrv::detail::service::ISystemClock']}, 1: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::timesrv::detail::service::ISystemClock']}, 2: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::timesrv::detail::service::ISteadyClock']}, 3: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::timesrv::detail::service::ITimeZoneService']}, 4: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::timesrv::detail::service::ISystemClock']}, 100: {"inbytes": 0, "outbytes": 1}, 101: {"inbytes": 1, "outbytes": 0}, }, 'nn::bpc::IPowerButtonManager': { 0: {"inbytes": 0, "outbytes": 4}, 1: {"inbytes": 4, "outbytes": 0, "outhandles": [1]}, }, 'nn::pcv::detail::IPcvService': { 0: {"inbytes": 8, "outbytes": 0}, 1: {"inbytes": 8, "outbytes": 0}, 2: {"inbytes": 8, "outbytes": 0}, 3: {"inbytes": 4, "outbytes": 4}, 4: {"inbytes": 4, "outbytes": 0xC}, 5: {"inbytes": 8, "outbytes": 8, "buffers": [10]}, 6: {"inbytes": 8, "outbytes": 0}, 7: {"inbytes": 8, "outbytes": 0}, 8: {"inbytes": 8, "outbytes": 0}, 9: {"inbytes": 4, "outbytes": 1}, 10: {"inbytes": 4, "outbytes": 0xC}, 11: {"inbytes": 8, "outbytes": 0}, 12: {"inbytes": 4, "outbytes": 4}, 13: {"inbytes": 4, "outbytes": 4, "buffers": [10]}, 14: {"inbytes": 4, "outbytes": 0}, 15: {"inbytes": 0, "outbytes": 0}, 16: {"inbytes": 0, "outbytes": 1}, 17: {"inbytes": 0, "outbytes": 0}, }, 'nn::bpc::IRtcManager': { 0: {"inbytes": 0, "outbytes": 8}, 1: {"inbytes": 8, "outbytes": 0}, 2: {"inbytes": 0, "outbytes": 1}, }, 'nn::timesrv::detail::service::ISteadyClock': { 0: {"inbytes": 0, "outbytes": 0x18}, 2: {"inbytes": 0, "outbytes": 8}, 3: {"inbytes": 8, "outbytes": 0}, }, 'nn::bpc::IBoardPowerControlManager': { 0: {"inbytes": 0, "outbytes": 0}, 1: {"inbytes": 0, "outbytes": 0}, 2: {"inbytes": 0, "outbytes": 4}, 3: {"inbytes": 0, "outbytes": 4}, 4: {"inbytes": 0, "outbytes": 1}, 5: {"inbytes":
<filename>lib/googlecloudsdk/command_lib/util/concepts/info_holders.py # -*- coding: utf-8 -*- # # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for runtime handling of concept arguments.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import abc from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base from googlecloudsdk.calliope.concepts import util from googlecloudsdk.command_lib.util.concepts import completers from googlecloudsdk.core.util import text import six from six.moves import filter # pylint: disable=redefined-builtin ANCHOR_HELP = ('ID of the {resource} or fully qualified identifier for the ' '{resource}.') PLURAL_ANCHOR_HELP = ('IDs of the {resource} or fully qualified identifiers ' 'for the {resource}.') class ConceptInfo(six.with_metaclass(abc.ABCMeta, object)): """Holds information for a concept argument. The ConceptInfo object is responsible for holding information about the dependencies of a concept, and building a Deps object when it is time for lazy parsing of the concept. Attributes: concept_spec: The concept spec underlying the concept handler. attribute_to_args_map: A map of attributes to the names of their associated flags. fallthroughs_map: A map of attributes to non-argument fallthroughs. """ @property def concept_spec(self): """The concept spec associated with this info class.""" raise NotImplementedError @property def fallthroughs_map(self): """A map of attribute names to non-primary fallthroughs.""" raise NotImplementedError @abc.abstractmethod def GetHints(self, attribute_name): """Get a list of string hints for how to specify a concept's attribute. Args: attribute_name: str, the name of the attribute to get hints for. Returns: [str], a list of string hints. """ def GetGroupHelp(self): """Get the group help for the group defined by the presentation spec. Must be overridden in subclasses. Returns: (str) the help text. """ raise NotImplementedError def GetAttributeArgs(self): """Generate args to add to the argument group. Must be overridden in subclasses. Yields: (calliope.base.Argument), all arguments corresponding to concept attributes. """ raise NotImplementedError def AddToParser(self, parser): """Adds all attribute args for the concept to argparse. Must be overridden in subclasses. Args: parser: the parser for the Calliope command. """ raise NotImplementedError @abc.abstractmethod def Parse(self, parsed_args=None): """Lazy parsing function to parse concept. Args: parsed_args: the argparse namespace from the runtime handler. Returns: the parsed concept. """ def ClearCache(self): """Clear cache if it exists. Override where needed.""" pass class ResourceInfo(ConceptInfo): """Holds information for a resource argument.""" def __init__(self, presentation_name, concept_spec, group_help, attribute_to_args_map, fallthroughs_map, required=False, plural=False, group=None): """Initializes the ResourceInfo. Args: presentation_name: str, the name of the anchor argument of the presentation spec. concept_spec: googlecloudsdk.calliope.concepts.ConceptSpec, The underlying concept spec. group_help: str, the group help for the argument group. attribute_to_args_map: {str: str}, A map of attribute names to the names of their associated flags. fallthroughs_map: {str: [deps_lib.Fallthrough]} A map of attribute names to non-argument fallthroughs. required: bool, False if resource parsing is allowed to return no resource, otherwise True. plural: bool, True if multiple resources can be parsed, False otherwise. group: an argparse argument group parser to which the resource arg group should be added, if any. """ self.presentation_name = presentation_name self._concept_spec = concept_spec self._fallthroughs_map = fallthroughs_map self.attribute_to_args_map = attribute_to_args_map self.plural = plural self.group_help = group_help self.allow_empty = not required self.group = group self._result = None self._result_computed = False self.sentinel = 0 @property def concept_spec(self): return self._concept_spec @property def resource_spec(self): return self.concept_spec @property def fallthroughs_map(self): return self._fallthroughs_map @property def title(self): """The title of the arg group for the spec, in all caps with spaces.""" name = self.concept_spec.name name = name[0].upper() + name[1:] return name.replace('_', ' ').replace('-', ' ') def _IsAnchor(self, attribute): return self.concept_spec.IsAnchor(attribute) def BuildFullFallthroughsMap(self): return self.concept_spec.BuildFullFallthroughsMap( self.attribute_to_args_map, self.fallthroughs_map, plural=self.plural) def GetHints(self, attribute_name): """Gets a list of string hints for how to set an attribute. Given the attribute name, gets a list of hints corresponding to the attribute's fallthroughs. Args: attribute_name: str, the name of the attribute. Returns: A list of hints for its fallthroughs, including its primary arg if any. """ fallthroughs = self.BuildFullFallthroughsMap().get(attribute_name, []) hints = [] for f in fallthroughs: if f.hint not in hints: hints.append(f.hint) return hints def GetGroupHelp(self): """Build group help for the argument group.""" if len(list(filter(bool, list(self.attribute_to_args_map.values())))) == 1: generic_help = 'This represents a Cloud resource.' else: generic_help = ('The arguments in this group can be used to specify the ' 'attributes of this resource.') description = ['{} resource - {} {}'.format( self.title, self.group_help, generic_help)] skip_flags = [ attribute.name for attribute in self.resource_spec.attributes if not self.attribute_to_args_map.get(attribute.name)] if skip_flags: description.append('(NOTE) Some attributes are not given arguments in ' 'this group but can be set in other ways.') for attr_name in skip_flags: hints = self.GetHints(attr_name) if not hints: # This may be an error, but existence of fallthroughs should not be # enforced here. continue hint = 'To set the [{}] attribute: {}.'.format( attr_name, '; '.join(hints)) description.append(hint) return ' '.join(description) @property def args_required(self): """True if the resource is required and any arguments have no fallthroughs. If fallthroughs can ever be configured in the ResourceInfo object, a more robust solution will be needed, e.g. a GetFallthroughsForAttribute method. Returns: bool, whether the argument group should be required. """ if self.allow_empty: return False anchor = self.resource_spec.anchor if (self.attribute_to_args_map.get(anchor.name, None) and not self.fallthroughs_map.get(anchor.name, [])): return True return False def _GetHelpTextForAttribute(self, attribute): """Helper to get the help text for the attribute arg.""" if self._IsAnchor(attribute): help_text = ANCHOR_HELP if not self.plural else PLURAL_ANCHOR_HELP else: help_text = attribute.help_text expansion_name = text.Pluralize( 2 if self.plural else 1, self.resource_spec.name, plural=getattr(self.resource_spec, 'plural_name', None)) return help_text.format(resource=expansion_name) def _IsRequiredArg(self, attribute): return (self._IsAnchor(attribute) and not self.fallthroughs_map.get(attribute.name, [])) def _IsPluralArg(self, attribute): return self._IsAnchor(attribute) and self.plural def _KwargsForAttribute(self, name, attribute): """Constructs the kwargs for adding an attribute to argparse.""" # Argument is modal if it's the anchor, unless there are fallthroughs. # If fallthroughs can ever be configured in the ResourceInfo object, # a more robust solution will be needed, e.g. a GetFallthroughsForAttribute # method. required = self._IsRequiredArg(attribute) final_help_text = self._GetHelpTextForAttribute(attribute) plural = self._IsPluralArg(attribute) if attribute.completer: completer = attribute.completer elif not self.resource_spec.disable_auto_completers: completer = completers.CompleterForAttribute( self.resource_spec, attribute.name) else: completer = None kwargs_dict = { 'help': final_help_text, 'type': attribute.value_type, 'completer': completer} if util.IsPositional(name): if plural and required: kwargs_dict.update({'nargs': '+'}) # The following should not usually happen because anchor args are # required. elif plural and not required: kwargs_dict.update({'nargs': '*'}) elif not required: kwargs_dict.update({'nargs': '?'}) else: kwargs_dict.update({'metavar': util.MetavarFormat(name)}) if required: kwargs_dict.update({'required': True}) if plural: kwargs_dict.update({'type': arg_parsers.ArgList()}) return kwargs_dict def _GetAttributeArg(self, attribute): """Creates argument for a specific attribute.""" name = self.attribute_to_args_map.get(attribute.name, None) # Return None for any false value. if not name: return None return base.Argument( name, **self._KwargsForAttribute(name, attribute)) def GetAttributeArgs(self): """Generate args to add to the argument group.""" args = [] for attribute in self.resource_spec.attributes: arg = self._GetAttributeArg(attribute) if arg: args.append(arg) return args def AddToParser(self, parser): """Adds all attributes of the concept to argparse. Creates a group to hold all the attributes and adds an argument for each attribute. If the presentation spec is required, then the anchor attribute argument will be required. Args: parser: the parser for the Calliope command. """ args = self.GetAttributeArgs() if not args: # Don't create the group if there are not going to be any args generated. return # If this spec is supposed to be added to a subgroup, that overrides the # provided parser. parser = self.group or parser resource_group = parser.add_group( help=self.GetGroupHelp(), required=self.args_required) for arg in args: arg.AddToParser(resource_group) def GetExampleArgList(self): """Returns a list of command line example arg strings for the concept.""" args = self.GetAttributeArgs() examples = [] for arg in args: if arg.name.startswith('--'): example = '{}=my-{}'.format(arg.name, arg.name[2:]) else: example = 'my-{}'.format(arg.name.lower()) examples.append(example) return examples def Parse(self, parsed_args=None): """Lazy, cached parsing function for resource. Args: parsed_args: the parsed Namespace. Returns: the initialized resource or a list of initialized resources if the resource argument was
<gh_stars>100-1000 import tensorflow.compat.v1 as tf tf.disable_v2_behavior() import tensorflow as tf2 import pickle, time, os import numpy as np class BasicModel: def __init__(self): self.tf_session = None self.deserializing_var_placeholder = {} self.deserializing_assign_op = [] def serializing_with_session(self, logger=None, elo_score=-1): if logger is not None: logger.info("start model serializing") with self.tf_session.as_default(): with self.tf_session.graph.as_default(): a_vars = tf.trainable_variables() a_values = self.tf_session.run(a_vars) a_var_values = {} for index, vars in enumerate(a_vars): a_var_values[vars.name] = a_values[index] a_var_values['model_time'] = int(time.time()) a_var_values['elo'] = elo_score raw_model = pickle.dumps(a_var_values) p = raw_model # zlib.compress(raw_model, level=4) # if logger is not None: # logger.info("before compress, model size %d" % sys.getsizeof(raw_model)) # logger.info("after compress, model size %d" % sys.getsizeof(p)) return p, a_var_values['model_time'] # 序列化模型 def serializing(self, update_times=0, logger=None, elo_score=-1): if logger is not None: logger.info("start model serializing") a_vars = tf.trainable_variables() a_values = self.tf_session.run(a_vars) a_var_values = {} for index, vars in enumerate(a_vars): a_var_values[vars.name] = a_values[index] a_var_values['model_time'] = int(time.time()) a_var_values['update_times'] = update_times a_var_values['elo'] = elo_score raw_model = pickle.dumps(a_var_values) p = raw_model # zlib.compress(raw_model, level=4) # if logger is not None: # logger.info("before compress, model size %d" % sys.getsizeof(raw_model)) # logger.info("after compress, model size %d" % sys.getsizeof(p)) return p, a_var_values['model_time'] # 反序列化模型 def deserializing(self, AC_total): model_time = None update_times = None if type(AC_total) is not dict: AC_total = pickle.loads(AC_total) if "update_times" in AC_total: update_times = AC_total['update_times'] AC_total.pop('update_times') if 'model_time' in AC_total: model_time = AC_total['model_time'] AC_total.pop('model_time') if 'elo' in AC_total: AC_total.pop('elo') if 'global_step:0' in AC_total: AC_total.pop('global_step:0') feed_dict = {} with self.tf_session.as_default(): with self.tf_session.graph.as_default(): for var_name, value in AC_total.items(): if var_name not in self.deserializing_var_placeholder: var = self.tf_session.graph.get_tensor_by_name(var_name) value = np.array(value) assign_placeholder = tf.placeholder(var.dtype, shape=value.shape) self.deserializing_var_placeholder[var_name] = assign_placeholder self.deserializing_assign_op.append(tf.assign(var, assign_placeholder)) feed_dict[self.deserializing_var_placeholder[var_name]] = value self.tf_session.run(self.deserializing_assign_op, feed_dict=feed_dict) return model_time, update_times # 保存 ckp 模型 def save_ckp_model(self, checkpoint_path): saver = tf.train.Saver() model_file = os.path.join(checkpoint_path, 'model.ckpt') saver.save(self.tf_session, model_file, global_step=tf.train.get_global_step()) def restore_ckp_model(self, checkpoint_path): with self.tf_session.as_default(): with self.tf_session.graph.as_default(): latest_ckp = tf.train.latest_checkpoint(checkpoint_path) saver = tf.train.Saver() saver.restore(self.tf_session, latest_ckp) print('load ' + latest_ckp) # 加载 checkpoint 模型到 dict 字典中,返回 dict def load_ckp_to_dict(self, checkpoint_path): value_dict = {} latest_ckp = tf.train.latest_checkpoint(checkpoint_path) reader = tf.train.NewCheckpointReader(latest_ckp) for name in reader.get_variable_to_shape_map(): value_dict[name + ':0'] = reader.get_tensor(name) return value_dict def weights_to_dict(self): a_vars = tf.trainable_variables() a_values = self.tf_session.run(a_vars) a_var_values = {} for index, vars in enumerate(a_vars): a_var_values[vars.name] = a_values[index] return a_var_values def fc_layer(self, inputs, num, scope, ln=True, activation=None, kernel_initializer=tf2.initializers.GlorotUniform()): with tf.variable_scope(scope): fc_raw = tf.layers.dense( inputs=inputs, units=num, activation=None, kernel_initializer=kernel_initializer, bias_initializer=tf.constant_initializer(0.)) if ln == True: layer_norm_out = self.layer_norm(fc_raw, num, scope + "_ln") return activation(layer_norm_out) else: if activation == None: return fc_raw else: return activation(fc_raw) def cnn_layer(self, image, scope): with tf.variable_scope('cnn_' + scope): c1 = self.conv(image, out_channel=8, kernel=3, scope='c1') c2 = self.conv(c1, out_channel=16, kernel=3, scope='c2') c2_shape = np.prod([v.value for v in c2.get_shape()[1:]]) reshape_c2 = tf.reshape(c2, [-1, c2_shape]) c2_fc = self.fc_layer(inputs=reshape_c2, num=32, scope=scope + '_fc', ln=True, activation=tf.nn.relu) return c2_fc def conv(self, image, out_channel, kernel, scope=''): with tf.variable_scope('conv_' + scope): channel_shape = image.get_shape()[3].value w = tf.get_variable('w', [kernel, kernel, channel_shape, out_channel], initializer=tf.constant_initializer(1.0)) b = tf.get_variable('b', out_channel, initializer=tf.constant_initializer(0.0)) conv = tf.nn.conv2d(image, w, strides=[1, 2, 2, 1], padding='SAME') + b pool = tf.nn.max_pool(tf.nn.relu(conv), [1, 2, 2, 1], [1, 1, 1, 1], padding='SAME') return pool def layer_norm(self, inputs, shape, scope): with tf.variable_scope(scope): mean, variance = tf.nn.moments(inputs, [1], keep_dims=True) normalised_input = (inputs - mean) / tf.sqrt(variance + 1e-10) # init to make zero mean and unit variance gains = tf.get_variable("norm_gain", shape, initializer=tf.constant_initializer(1.)) biases = tf.get_variable("norm_bias", shape, initializer=tf.constant_initializer(0.)) return normalised_input * gains + biases # only can used at one unit per UNIT_NUM scene def get_type_fc_and_mask(self, input, unit_categoy_batch, unit_attack_mask, type_constant, scope, layer_size): feature_bool_mask = tf.reshape(tf.math.equal(unit_categoy_batch, tf.constant(type_constant)), [-1]) feature_raw = tf.boolean_mask(input, feature_bool_mask) mask = tf.boolean_mask(unit_attack_mask, feature_bool_mask) mask = tf.cast(mask, dtype=tf.int32) feature_raw = self.fc_layer(feature_raw, layer_size, scope, ln=True, activation=tf.nn.relu) return feature_raw, mask def softmax_over_valid_position(self, input_tensor, valid_mask): self.all_exp_values = tf.exp(input_tensor) self.valid_exp_values = tf.multiply(self.all_exp_values, valid_mask) # if no valid action, denominator is zero self.valid_denominator = tf.reduce_sum(self.valid_exp_values, axis=-1, keep_dims=True) return self.valid_exp_values / self.valid_denominator def parameter_head_op(self, input_tensor, scope, fc_size): with tf.variable_scope(scope): feature_head_out = self.fc_layer(input_tensor, fc_size, scope, ln=False, activation=None) return tf.nn.softmax(feature_head_out, axis=-1) def embedding_op(self, input_tensor, scope, type_embedding_name, type_total_count, type_embedding_size): with tf.variable_scope(scope): input_tensor = tf.reshape(input_tensor, [-1]) self.feature_type_embedding = tf.get_variable(type_embedding_name, [type_total_count, type_embedding_size]) self.embedded_type = tf.nn.embedding_lookup(self.feature_type_embedding, input_tensor) return self.embedded_type # mask[true, false,....] def add_softmax_mask(self, input_tensor, mask_tensor): int_mask = tf.cast(mask_tensor, dtype=tf.int32) self.scaled_input_tensor = input_tensor + tf.cast((1 - int_mask) * -10000000, dtype=tf.float32) return self.scaled_input_tensor # action entropy def action_entropy(self, p): return -1 * tf.reduce_sum(tf.multiply(p, self.clip_log(p)), -1, keep_dims=True) def attention_with_fc(self, query, key_raw, layer_size, scope, feature_unit_categoy_batch, attention_unit_type, layersize_1, laysize_2): with tf.variable_scope(scope): feature_bool_mask = tf.reshape(tf.math.equal(feature_unit_categoy_batch, tf.constant(attention_unit_type)), [-1]) feature_filtered_unit = tf.where(feature_bool_mask, key_raw, tf.zeros_like(key_raw)) feature_fitered_fc1_out = self.fc_layer( feature_filtered_unit, layersize_1, scope + "_fc1", ln=True, activation=tf.nn.relu) feature_fitered_fc2_out = self.fc_layer( feature_fitered_fc1_out, laysize_2, scope + "_fc2", ln=True, activation=tf.nn.relu) attention_1, _ = self.attention_op(query, feature_fitered_fc2_out, layer_size, scope + "_attention_1", feature_unit_categoy_batch, attention_unit_type) attention_2, _ = self.attention_op(query, feature_fitered_fc2_out, layer_size, scope + "_attention_2", feature_unit_categoy_batch, attention_unit_type) attention_3, _ = self.attention_op(query, feature_fitered_fc2_out, layer_size, scope + "_attention_3", feature_unit_categoy_batch, attention_unit_type) attention_concat = tf.concat([attention_1, attention_2, attention_3], axis=-1) return attention_concat, feature_fitered_fc2_out def get_entropy_and_summary(self, train_selected_action_type_id, action_id, raw_input): attack_action_mask = tf.math.equal(train_selected_action_type_id, tf.constant(action_id, dtype=tf.int32)) masked_out = tf.where(attack_action_mask, raw_input, tf.zeros_like(raw_input)) input_entropy = -1 * tf.reduce_sum(tf.multiply(masked_out, self.clip_log(masked_out)), -1, keepdims=True) attack_action_mask.set_shape([None]) boolen_masked_out = tf.boolean_mask(raw_input, attack_action_mask) entropy_summary = tf.reduce_mean(-1 * tf.reduce_sum(tf.multiply(boolen_masked_out, self.clip_log(boolen_masked_out)), -1)) valid_mask = tf.cast(tf.equal(boolen_masked_out, tf.constant(0, dtype=tf.float32)), dtype=tf.int32) entropy_gradient = tf.reduce_mean( tf.abs(tf.reduce_sum(tf.cast((valid_mask - 1), dtype=tf.float32) * (self.clip_log(boolen_masked_out) + 1), -1))) return input_entropy, entropy_summary, entropy_gradient # query shape:(batch, d), key shape(batch, key_num, d), mask shape (batch, key_num) def attention_op(self, query, key_raw, layer_size, scope, feature_unit_categoy_batch, attention_unit_type): with tf.variable_scope(scope): self.feature_bool_mask = tf.reshape( tf.math.equal(feature_unit_categoy_batch, tf.constant(attention_unit_type)), [-1]) self.feature_filtered_unit = tf.where(self.feature_bool_mask, key_raw, tf.zeros_like(key_raw)) self.reshaped_key = self.feature_filtered_unit self.query_encoded = self.fc_layer(query, layer_size, scope + "_query_layer", ln=False, activation=tf.nn.relu) # reshape to [batch, 1, d] self.query_encoded = tf.reshape(self.query_encoded, [-1, 1, layer_size]) self.key_encoded = self.fc_layer( self.reshaped_key, layer_size, scope + "_key_layer", ln=False, activation=tf.nn.relu) # reshape to [batch, max_unit_count, layer_size] self.key_encoded = tf.reshape(self.key_encoded, [-1, self.max_unit_count, layer_size]) self.value_encoded = self.fc_layer( self.reshaped_key, layer_size, scope + "_value_layer", ln=False, activation=tf.nn.relu) self.value_encoded = tf.reshape(self.value_encoded, [-1, self.max_unit_count, layer_size]) # scaled dot-product attention layer_weight = tf.constant(layer_size, dtype=tf.float32) self.query_key_dot_elmentwise = tf.multiply(self.query_encoded, self.key_encoded) self.query_key_dot = tf.reduce_sum( self.query_key_dot_elmentwise, axis=-1, keepdims=False) / tf.math.sqrt(layer_weight) self.feature_bool_mask = tf.reshape(self.feature_bool_mask, [-1, self.max_unit_count]) self.query_key_dot_scaled = self.add_softmax_mask(self.query_key_dot, self.feature_bool_mask) self.query_key_score = tf.nn.softmax(self.query_key_dot_scaled, axis=-1) self.query_key_score = tf.reshape(self.query_key_score, [-1, self.max_unit_count, 1]) self.weighted_value_element = tf.multiply(self.query_key_score, self.value_encoded) # reduce sum over different key, result shape: [batch, layer_size] self.attention_value = tf.reduce_sum(self.weighted_value_element, axis=1, keepdims=False) return self.attention_value, self.query_key_score def attetntion_score(self, query, key_raw, layer_size, scope, units_mask, unit_num): with tf.variable_scope(scope): self.feature_bool_mask = units_mask self.feature_filtered_unit = tf.where(self.feature_bool_mask, key_raw, tf.zeros_like(key_raw)) self.reshaped_key = self.feature_filtered_unit if self.use_dropout: query = tf.nn.dropout(query, self.dropout_keep_prob) self.query_encoded = self.fc_layer(query, layer_size, scope + "_query_layer", ln=False, activation=tf.nn.relu) # reshape to [batch, 1, d] self.query_encoded = tf.reshape(self.query_encoded, [-1, 1, layer_size]) if self.use_dropout: self.reshaped_key = tf.nn.dropout(self.reshaped_key, self.dropout_keep_prob) self.key_encoded = self.fc_layer( self.reshaped_key, layer_size, scope + "_key_layer", ln=False, activation=tf.nn.relu) # reshpa to [batch, key_num, d] self.key_encoded = tf.reshape(self.key_encoded, [-1, unit_num, layer_size]) # scaled dot-product attention layer_weight = tf.constant(layer_size, dtype=tf.float32) self.query_key_dot_elmentwise = tf.multiply(self.query_encoded, self.key_encoded) self.query_key_dot = tf.reduce_sum( self.query_key_dot_elmentwise, axis=-1, keepdims=False) / tf.math.sqrt(layer_weight) self.feature_bool_mask = tf.reshape(self.feature_bool_mask, [-1, unit_num]) self.query_key_dot_scaled = self.add_softmax_mask(self.query_key_dot, self.feature_bool_mask) self.query_key_score = tf.nn.softmax(self.query_key_dot_scaled, axis=-1) return self.query_key_score def mask_fc_max_pool_op(self, input_tensor, feature_unit_categoy_batch, scope, constant_value, fc_lay1_size, fc_lay2_size): with tf.variable_scope(scope): self.feature_bool_mask = tf.reshape(tf.math.equal(feature_unit_categoy_batch, tf.constant(constant_value)), [-1]) self.feature_filtered_unit = tf.where(self.feature_bool_mask, input_tensor, tf.zeros_like(input_tensor)) self.feature_fitered_fc1_out = self.fc_layer( self.feature_filtered_unit, fc_lay1_size, scope + "_fc1", ln=True, activation=tf.nn.relu) feature_fitered_fc2_out = self.fc_layer( self.feature_fitered_fc1_out, fc_lay2_size, scope + "_fc2", ln=True, activation=tf.nn.relu) feature_unit_all_feature_trans = tf.reshape(feature_fitered_fc2_out, [-1, self.max_unit_count, fc_lay2_size]) feature_max_indices = tf.argmax(feature_unit_all_feature_trans, axis=1) indices = tf.cast(feature_max_indices, dtype=tf.int32) indices_shape = tf.shape(feature_max_indices) R, _ = tf.meshgrid(tf.range(indices_shape[0]), tf.range(indices_shape[1]), indexing='ij') coords = tf.stack([R, indices], axis=2) # shape [batch, fc_lay2_size, fc_lay2_size] feature_unit_all_feature_trans_selected = tf.gather_nd(feature_unit_all_feature_trans, coords) feature_unit_all_feature_trans_selected = tf.reshape(feature_unit_all_feature_trans_selected, [-1, fc_lay2_size * fc_lay2_size]) return feature_unit_all_feature_trans_selected, feature_fitered_fc2_out # input tensor shape [batch_size, unit_size, XX_size, feature] def max_pool_structure(self, input_tensor, scope, type_embedding_name, type_total_count, type_embedding_size, type_max_shape, type_origin_feature_count, fc_lay1_size, fc_lay2_size): # no reuse for different max_pool_structure with tf.variable_scope(scope): # reshape to 1D tensor self.feature_type = input_tensor[:, :, :, 0] self.feature_other = input_tensor[:, :, :, 1:] # embedding self.feature_embedded_type = self.embedding_op(self.feature_type, type_embedding_name + "_embedding", type_embedding_name, type_total_count + 1, type_embedding_size) self.feature_embedded_type = tf.reshape(self.feature_embedded_type, [-1, self.max_unit_count, type_max_shape, type_embedding_size]) # concat along -1 axis self.feature_other = tf.cast(self.feature_other, tf.float32) self.feature_concat = tf.concat([self.feature_embedded_type, self.feature_other], -1) self.feature_fc1_input = tf.reshape(self.feature_concat, [-1, type_origin_feature_count - 1 + type_embedding_size]) self.feature_fc1_output = tf.layers.dense( inputs=self.feature_fc1_input, units=fc_lay1_size, activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.constant_initializer(0.)) # input shape must be 3-dimension self.feature_input_max_pool = tf.reshape(self.feature_fc1_output, [-1, type_max_shape, fc_lay1_size]) #
""" Module: softacq.py Description: Interface for the software device acquisition emulation Copyright (c) 2010 JDSU. All rights reserved. $Log: softacq.py $ Revision 83 2013/03/28 00:55:13 +0800 phi58351 /MassTransit/MT1.20.100_Tip/NickReviewed/PendingReview/instrument_list Fixed issue with thread never stopping for time sync on a VM. This is test code and can be enabled later. Revision 82 2013/03/13 14:15:09 -0600 bau58327 /MassTransit/MT1.20.100_Tip/MBauers/TimeSync Misc. changes Revision 81 2013/03/12 15:53:44 -0600 bau58327 /MassTransit/MT1.20.100_Tip/MBauers/MergeWithJacques Misc. bug fixes Revision 80 2013/03/11 15:52:35 -0600 bau58327 /MassTransit/MT1.20.100_Tip/MBauers/TimeSync Working check in- Config should now work as expected Misc. GUI changes Revision 79 2013/03/08 16:16:41 -0700 bau58327 /MassTransit/MT1.20.100_Tip/MBauers/TimeSync Working checkin Revision 78 2013/03/07 12:56:23 -0700 hen55467 /MassTransit/MT1.20.100_Tip/Thomas/Integration Content copied from revision 77 Revision 76 2013/02/28 16:22:12 -0700 bau58327 /MassTransit/MT1.20.100_Tip/MBauers/TimeSync Working check-in Revision 75 2013/02/27 16:25:46 -0700 bau58327 /MassTransit/MT1.20.100_Tip/MBauers/TimeSync Work in progress checkin Revision 73 2012/09/17 17:05:36 -0600 hen55467 /MassTransit/MT1.10.100_Tip_SP1/Integration/SUPS/Python Glue Support hw SUPS config parameters. Revision 72 2012/07/27 14:49:17 -0600 phi58351 /MassTransit/MT1.10.100_Tip/Integration/XMLAPI VM would only capture one block of data. Fixed time records for software acquisition. Revision 71 2012/05/15 08:33:59 -0600 hen55467 deleted(id=31076) Add new boolean m_bSpanBuffer flag to block info structure Revision 70 2012/05/14 15:30:32 -0600 hen55467 /MassTransit/MT1.10.100_Tip/Integration/Thomas Fix a couple syntax errors Revision 69 2012/05/14 15:10:30 -0600 hen55467 /MassTransit/MT1.10.100_Tip/Integration/Thomas due to new queue and callback logic, data and statistics streams have been forced to be done separately Revision 68 2012/05/10 15:23:31 -0600 hen55467 /MassTransit/MT1.10.100_Tip/Integration/Thomas Bug fixes for software acquisition mode + discard data mode Revision 67 2012/05/08 16:29:12 -0600 hen55467 /MassTransit/MT1.10.100_Tip/Integration/Thomas changes for callback mode of the driver Revision 66 2012/03/12 19:31:10 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Fix timestamp records. Revision 65 2012/03/12 13:02:26 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Fix software aq generated timestamp records to fill in the payloads too. Revision 64 2012/03/01 17:09:02 -0700 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Send timestamps after data is complete. Revision 62 2012/02/21 12:19:48 -0700 isa49327 /MassTransit/MT1.00.100_Tip/Integration/William/William-dev Change the serial number to start with MY Revision 61 2012/02/08 16:43:24 -0500 isa49327 /MassTransit/MT1.00.100_Tip/Integration/William/William-dev Testing Fixes Revision 60 2012/02/08 14:23:13 -0500 isa49327 /MassTransit/MT1.00.100_Tip/Integration/William/William-dev Proper initialization of card 2s appliance name and serial number Revision 59 2012/02/01 10:39:49 -0500 isa49327 /MassTransit/MT1.00.100_Tip/Integration/William/William-dev Revision 58 2011/12/12 10:27:22 -0500 blo55201 /MassTransit/MT1.00.100_Tip/Integration/Scott Fixed problem with SART not working when connected to PI replaying a PCAP file. Revision 57 2011/12/07 17:52:48 -0700 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas pcaps have fcs by default. Revision 56 2011/12/07 12:13:24 -0700 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Fix syntax error Revision 55 2011/12/07 11:30:39 -0700 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Automatic merge of parallel changes into revision [53] Revision 54 2011/12/05 18:31:07 -0700 hen55467 /MassTransit/MT1.00.100_Tip/Integration/ForcedMode print out entire active value to the file Revision 53 2011/12/05 16:01:15 -0700 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Create a switch to correctly handle pcaps without fcs appended to the packets. Revision 52 2011/12/02 15:16:47 -0700 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Fix lengths in reading from pcaps. Revision 51 2011/12/01 15:57:58 -0700 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas No longer add in a 4 byte crc to the packets. Revision 50 2011/10/17 17:02:16 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/IPReassembly Automatic merge of parallel changes into revision [48] Revision 49 2011/10/14 10:58:57 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Make software acquisition create timestamps on virtual machines. Revision 48 2011/10/06 11:14:40 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/IPReassembly Automatic merge of parallel changes into revision [44] Revision 47 2011/10/05 19:46:25 -0600 blo55201 /MassTransit/MT1.00.100_Tip/Integration/Scott Automatic merge of parallel changes into revision [45] Revision 46 2011/10/05 12:54:18 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas don't busy wait during spawned threads, and insert initial delay into software acquisision Revision 45 2011/10/04 22:39:12 -0600 blo55201 /MassTransit/MT1.00.100_Tip/Integration/Scott Changed code to assume .pcap files do not have an FCS and append 4 byte FCS onto .PCAP frames when they are read in. Revision 44 2011/10/03 17:41:21 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/IPReassembly/options.ip_reassembly Needed changes to propagate GUI setting for ip_reassembly. Revision 43 2011/09/30 16:48:52 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Keep timestamp calculations the same to avoid unecessary breaking. Revision 42 2011/09/30 14:18:37 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Make software acquisition substitute modern timestamps Revision 41 2011/09/29 11:00:32 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Make acquisition work if one of the cards has no ports selected Revision 40 2011/09/20 11:07:05 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Fix syntax error: no card_id1 Revision 39 2011/09/20 10:55:16 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Add more emulation of settings. Revision 38 2011/09/01 17:12:34 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Add some additional watchdog kicks Revision 37 2011/08/30 20:01:35 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Integration/Thomas Make mode change more synchronized with the GUI, and make settings sticky, only using defaults when no previous setting exists. Revision 36 2011/07/20 15:59:18 -0600 fel55472 /MassTransit/MT1.00.100_Tip/johnStaging/johnDev added get_card_mode stub Revision 35 2011/06/29 12:30:25 -0600 fel55472 /MassTransit/MT1.00.100_Tip/johnStaging/johnDev fixes for port enable Revision 34 2011/05/04 16:08:52 -0600 fel55472 deleted(id=24412) time sync Revision 33 2011/05/04 09:38:59 -0600 fel55472 /MassTransit/MT1.00.100_Tip/john/hwTest hw_test Revision 32 2011/04/22 16:04:28 -0600 fel55472 /MassTransit/MT1.00.100_Tip/Thomas/john fake store rate handling Revision 31 2011/04/18 16:09:43 -0600 fel55472 /MassTransit/MT1.00.100_Tip/Merge with ModeChange/john added jabber size handling Revision 30 2011/04/15 14:52:51 -0600 fel55472 /MassTransit/MT1.00.100_Tip/Merge with ModeChange/temp2/john_mode_change Automatic merge of parallel changes into revision [28] Revision 29 2011/04/15 14:45:24 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Merge with ModeChange Fix timestamps in software acquisition. Revision 28 2011/04/15 13:55:28 -0600 fel55472 /MassTransit/MT1.00.100_Tip/Merge with ModeChange/john_mode_change Automatic merge of parallel changes into revision [27] Revision 27 2011/04/15 13:44:41 -0600 fel55472 /MassTransit/MT1.00.100_Tip/Merge with ModeChange/john_mode_change added handling for user plane stats, jabber size and time sync added calls down to the hw for store size, et al Revision 26 2011/04/14 14:06:15 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Merge with ModeChange Automatic merge of parallel changes into revision [25] Revision 25 2011/04/14 13:46:45 -0600 hen55467 /MassTransit/MT1.00.100_Tip/Merge with ModeChange Changes to support dual card mode. Revision 24 2011/04/08 11:37:35 -0600 fel55472 /MassTransit/MT1.00.100_Tip/DemoIntegration/john added stubs for configuration functions Revision 23 2011/03/21 14:49:38 -0600 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Fix bug where the timestamp does not force the end of a block. Revision 22 2011/03/21 12:14:18 -0600 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Tweak recd file header logic and log messages. Revision 21 2011/03/17 14:50:38 -0600 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Make running with hardware acquisition smoother. Revision 20 2011/03/17 12:16:34 -0600 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Get hw acquisition working again. Revision 19 2011/03/11 16:13:10 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott hooked up softacq_filename from defaults so it now uses that filename. Revision 18 2011/03/09 13:30:06 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott Automatic merge of parallel changes into revision [15] Revision 17 2011/03/08 15:19:10 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Set the correct header timestamps. Revision 16 2011/03/08 11:04:47 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Niceify logging output so that after the first 100 blocks and packets, logging will be logrithmically suppressed Revision 15 2011/03/07 15:43:26 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott Automatic merge of parallel changes into revision [14] Revision 14 2011/03/07 15:12:53 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott Added end of .rec file detection Revision 13 2011/03/07 14:44:03 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Made the software acquisition module monotonically increment timestamps. Revision 12 2011/03/07 10:46:58 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Add repeat_soft_acq_file default to defaults.py Revision 11 2011/03/04 14:23:41 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas change print statements to log statements. Revision 10 2011/03/04 13:28:52 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Automatic merge of parallel changes into revision [9] Revision 9 2011/03/04 11:28:05 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Refactored options and default options throughout the code. Control plane follower is now off by default. Revision 8 2011/03/03 14:53:57 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott fixed issue in rec file repaly. Revision 7 2011/03/03 10:25:08 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott Automatic merge of parallel changes into revision [5] Revision 6 2011/03/02 17:01:14 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas Refactored to read the recdopt.h file and construct the python interface structures dynamically. Revision 5 2011/02/22 14:18:21 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott Changed default back to basic.pcap Revision 4 2011/02/22 14:16:20 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott Fixed repeat logic Revision 3 2011/02/22 13:44:08 -0700 blo55201 /MassTransit/MT1.00.100_Tip/DemoIntegration/Scott Implemented .rec file reading Revision 2 2010/12/13 15:44:10 -0700 hen55467 /MassTransit/MT1.00.100_Tip/DemoIntegration/Thomas /softacq.py Revision 1 2010/11/12 16:50:15 -0700 hen55467 /Rocket/6.70.100_Tip/COS/Mass Transit Needed for the software acquisition module. """ import os import traceback import thread from genmocktraf import read_binary import ctypes import RtsdSupport import time import logging logger = logging.getLogger('recd') from block_info import BLOCK_INFO, BLOCK_INFO_PTR, work_q_empty_proto, work_q_front_proto from block_info import work_q_pop_front_proto, free_q_push_back_proto, QTYPE_DATA, QTYPE_STATISTICS from recdopt_context import * from buffer_access import * from timesyncdbupdater import * import math create_timestamps = False pcap_fcs = True; class card_sim_type(object): def __init__(self): self.blocks = None self.block_num = 0 self.packet_count = 0 self.last_packet_count_log = 0 self.index_mod2 = 0 self.dc_mode = 0 self.ready = False self.null_pointer = 0 self.work_q_data_arrival_f = None self.front_pointer = None self.working_pointer = None self.temp_block_info = BLOCK_INFO() self.buffer_size = 1024*1024 self.block_info_ptr = ctypes.addressof(self.temp_block_info) logger.info('For comparison, contents of self.block_info_ptr: %s' % (self.block_info_ptr,)) self.context_string = ctypes.create_string_buffer(self.buffer_size+1) ptr = ctypes.pointer(self.context_string) ptr_type = ctypes.POINTER(ctypes.c_uint8) self.temp_block_info.m_pData = ctypes.cast(ptr, ptr_type) for char_pos in range(0, self.buffer_size): self.context_string[char_pos] = '\0' self.context_string[self.buffer_size] = '\0' self.temp_block_info.m_nByteLength = 0 self.temp_block_info.m_nFrameNumber = 0 self.temp_block_info.m_nFrameCount = 0 self.temp_block_info.m_bBufferSpan = False def work_q_data_arrival(self, card, qtype): if not self.ready: return if self.work_q_data_arrival_f: self.work_q_data_arrival_f(card, qtype) def work_q_empty(self, card, qtype): if qtype != QTYPE_DATA: return 1 if not self.ready: return 1 if card == 2: return 1 if card == 1: if self.working_pointer: return 1 if not self.front_pointer: try: next_block = self.blocks.next() self.block_num += 1 except StopIteration: logger.info("next_block StopIteration in _py_disk_read after %s blocks" % block_num) self.ready = False return 1 if next_block.good: count = 0 self.packet_count += 1 self.temp_block_info.m_nByteLength = next_block.length self.temp_block_info.m_nFrameNumber = next_block.frame_num self.temp_block_info.m_nFrameCount = next_block.frame_count if (math.floor(math.log10(self.packet_count)) > self.last_packet_count_log) or (self.packet_count < 10): self.last_packet_count_log = math.floor(math.log10(self.packet_count)) logger.debug("buffer #%s byte_length from
<reponame>HyeonseoJUNG/DB2_TermPJT # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'recommand_page2.ui' # # Created by: PyQt5 UI code generator 5.15.6 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(1101, 915) MainWindow.setStyleSheet("background-color: rgb(0, 0, 0);") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setAutoFillBackground(False) self.centralwidget.setObjectName("centralwidget") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 31, 41)) self.label.setStyleSheet("background-color: rgb(255, 255, 255);") self.label.setText("") self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(60, 10, 31, 41)) self.label_2.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_2.setText("") self.label_2.setObjectName("label_2") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(110, 10, 31, 41)) self.label_3.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_3.setText("") self.label_3.setObjectName("label_3") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(160, 10, 31, 41)) self.label_4.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_4.setText("") self.label_4.setObjectName("label_4") self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(210, 10, 31, 41)) self.label_5.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_5.setText("") self.label_5.setObjectName("label_5") self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(260, 10, 31, 41)) self.label_6.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_6.setText("") self.label_6.setObjectName("label_6") self.label_7 = QtWidgets.QLabel(self.centralwidget) self.label_7.setGeometry(QtCore.QRect(310, 10, 31, 41)) self.label_7.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_7.setText("") self.label_7.setObjectName("label_7") self.label_8 = QtWidgets.QLabel(self.centralwidget) self.label_8.setGeometry(QtCore.QRect(360, 10, 31, 41)) self.label_8.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_8.setText("") self.label_8.setObjectName("label_8") self.label_9 = QtWidgets.QLabel(self.centralwidget) self.label_9.setGeometry(QtCore.QRect(410, 10, 31, 41)) self.label_9.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_9.setText("") self.label_9.setObjectName("label_9") self.label_10 = QtWidgets.QLabel(self.centralwidget) self.label_10.setGeometry(QtCore.QRect(460, 10, 31, 41)) self.label_10.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_10.setText("") self.label_10.setObjectName("label_10") self.label_11 = QtWidgets.QLabel(self.centralwidget) self.label_11.setGeometry(QtCore.QRect(510, 10, 31, 41)) self.label_11.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_11.setText("") self.label_11.setObjectName("label_11") self.label_14 = QtWidgets.QLabel(self.centralwidget) self.label_14.setGeometry(QtCore.QRect(110, 860, 31, 41)) self.label_14.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_14.setText("") self.label_14.setObjectName("label_14") self.label_15 = QtWidgets.QLabel(self.centralwidget) self.label_15.setGeometry(QtCore.QRect(260, 860, 31, 41)) self.label_15.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_15.setText("") self.label_15.setObjectName("label_15") self.label_16 = QtWidgets.QLabel(self.centralwidget) self.label_16.setGeometry(QtCore.QRect(160, 860, 31, 41)) self.label_16.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_16.setText("") self.label_16.setObjectName("label_16") self.label_17 = QtWidgets.QLabel(self.centralwidget) self.label_17.setGeometry(QtCore.QRect(510, 860, 31, 41)) self.label_17.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_17.setText("") self.label_17.setObjectName("label_17") self.label_18 = QtWidgets.QLabel(self.centralwidget) self.label_18.setGeometry(QtCore.QRect(10, 860, 31, 41)) self.label_18.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_18.setText("") self.label_18.setObjectName("label_18") self.label_19 = QtWidgets.QLabel(self.centralwidget) self.label_19.setGeometry(QtCore.QRect(410, 860, 31, 41)) self.label_19.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_19.setText("") self.label_19.setObjectName("label_19") self.label_20 = QtWidgets.QLabel(self.centralwidget) self.label_20.setGeometry(QtCore.QRect(210, 860, 31, 41)) self.label_20.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_20.setText("") self.label_20.setObjectName("label_20") self.label_22 = QtWidgets.QLabel(self.centralwidget) self.label_22.setGeometry(QtCore.QRect(310, 860, 31, 41)) self.label_22.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_22.setText("") self.label_22.setObjectName("label_22") self.label_23 = QtWidgets.QLabel(self.centralwidget) self.label_23.setGeometry(QtCore.QRect(460, 860, 31, 41)) self.label_23.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_23.setText("") self.label_23.setObjectName("label_23") self.label_25 = QtWidgets.QLabel(self.centralwidget) self.label_25.setGeometry(QtCore.QRect(60, 860, 31, 41)) self.label_25.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_25.setText("") self.label_25.setObjectName("label_25") self.label_26 = QtWidgets.QLabel(self.centralwidget) self.label_26.setGeometry(QtCore.QRect(360, 860, 31, 41)) self.label_26.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_26.setText("") self.label_26.setObjectName("label_26") self.label_12 = QtWidgets.QLabel(self.centralwidget) self.label_12.setGeometry(QtCore.QRect(460, 90, 221, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(14) font.setBold(True) font.setWeight(75) self.label_12.setFont(font) self.label_12.setStyleSheet("color: rgb(255, 255, 255);") self.label_12.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_12.setObjectName("label_12") self.label_13 = QtWidgets.QLabel(self.centralwidget) self.label_13.setGeometry(QtCore.QRect(50, 160, 341, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(14) font.setBold(True) font.setWeight(75) self.label_13.setFont(font) self.label_13.setStyleSheet("color: rgb(255, 255, 255);") self.label_13.setAlignment(QtCore.Qt.AlignCenter) self.label_13.setObjectName("label_13") self.label_21 = QtWidgets.QLabel(self.centralwidget) self.label_21.setGeometry(QtCore.QRect(380, 90, 61, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(14) font.setBold(True) font.setWeight(75) self.label_21.setFont(font) self.label_21.setStyleSheet("color: rgb(255, 255, 255);") self.label_21.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_21.setObjectName("label_21") self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setGeometry(QtCore.QRect(10, 70, 81, 61)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(18) self.pushButton_3.setFont(font) self.pushButton_3.setStyleSheet("background-color: rgb(122, 122, 122);\n" "color: rgb(255, 255, 255);") self.pushButton_3.setObjectName("pushButton_3") self.graphicsView = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView.setGeometry(QtCore.QRect(70, 200, 171, 211)) self.graphicsView.setObjectName("graphicsView") self.label_24 = QtWidgets.QLabel(self.centralwidget) self.label_24.setGeometry(QtCore.QRect(70, 420, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_24.setFont(font) self.label_24.setStyleSheet("color: rgb(255, 255, 255);") self.label_24.setAlignment(QtCore.Qt.AlignCenter) self.label_24.setObjectName("label_24") self.label_31 = QtWidgets.QLabel(self.centralwidget) self.label_31.setGeometry(QtCore.QRect(70, 460, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_31.setFont(font) self.label_31.setStyleSheet("color: rgb(255, 255, 255);") self.label_31.setAlignment(QtCore.Qt.AlignCenter) self.label_31.setObjectName("label_31") self.label_36 = QtWidgets.QLabel(self.centralwidget) self.label_36.setGeometry(QtCore.QRect(610, 10, 31, 41)) self.label_36.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_36.setText("") self.label_36.setObjectName("label_36") self.label_37 = QtWidgets.QLabel(self.centralwidget) self.label_37.setGeometry(QtCore.QRect(560, 10, 31, 41)) self.label_37.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_37.setText("") self.label_37.setObjectName("label_37") self.label_38 = QtWidgets.QLabel(self.centralwidget) self.label_38.setGeometry(QtCore.QRect(660, 10, 31, 41)) self.label_38.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_38.setText("") self.label_38.setObjectName("label_38") self.label_39 = QtWidgets.QLabel(self.centralwidget) self.label_39.setGeometry(QtCore.QRect(560, 860, 31, 41)) self.label_39.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_39.setText("") self.label_39.setObjectName("label_39") self.label_40 = QtWidgets.QLabel(self.centralwidget) self.label_40.setGeometry(QtCore.QRect(660, 860, 31, 41)) self.label_40.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_40.setText("") self.label_40.setObjectName("label_40") self.label_41 = QtWidgets.QLabel(self.centralwidget) self.label_41.setGeometry(QtCore.QRect(610, 860, 31, 41)) self.label_41.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_41.setText("") self.label_41.setObjectName("label_41") self.label_44 = QtWidgets.QLabel(self.centralwidget) self.label_44.setGeometry(QtCore.QRect(60, 510, 281, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(14) font.setBold(True) font.setWeight(75) self.label_44.setFont(font) self.label_44.setStyleSheet("color: rgb(255, 255, 255);") self.label_44.setAlignment(QtCore.Qt.AlignCenter) self.label_44.setObjectName("label_44") self.graphicsView_2 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_2.setGeometry(QtCore.QRect(260, 200, 171, 211)) self.graphicsView_2.setObjectName("graphicsView_2") self.graphicsView_3 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_3.setGeometry(QtCore.QRect(640, 200, 171, 211)) self.graphicsView_3.setObjectName("graphicsView_3") self.graphicsView_4 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_4.setGeometry(QtCore.QRect(450, 200, 171, 211)) self.graphicsView_4.setObjectName("graphicsView_4") self.graphicsView_5 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_5.setGeometry(QtCore.QRect(830, 200, 171, 211)) self.graphicsView_5.setObjectName("graphicsView_5") self.label_27 = QtWidgets.QLabel(self.centralwidget) self.label_27.setGeometry(QtCore.QRect(260, 420, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_27.setFont(font) self.label_27.setStyleSheet("color: rgb(255, 255, 255);") self.label_27.setAlignment(QtCore.Qt.AlignCenter) self.label_27.setObjectName("label_27") self.label_32 = QtWidgets.QLabel(self.centralwidget) self.label_32.setGeometry(QtCore.QRect(260, 460, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_32.setFont(font) self.label_32.setStyleSheet("color: rgb(255, 255, 255);") self.label_32.setAlignment(QtCore.Qt.AlignCenter) self.label_32.setObjectName("label_32") self.label_33 = QtWidgets.QLabel(self.centralwidget) self.label_33.setGeometry(QtCore.QRect(640, 460, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_33.setFont(font) self.label_33.setStyleSheet("color: rgb(255, 255, 255);") self.label_33.setAlignment(QtCore.Qt.AlignCenter) self.label_33.setObjectName("label_33") self.label_28 = QtWidgets.QLabel(self.centralwidget) self.label_28.setGeometry(QtCore.QRect(640, 420, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_28.setFont(font) self.label_28.setStyleSheet("color: rgb(255, 255, 255);") self.label_28.setAlignment(QtCore.Qt.AlignCenter) self.label_28.setObjectName("label_28") self.label_29 = QtWidgets.QLabel(self.centralwidget) self.label_29.setGeometry(QtCore.QRect(450, 420, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_29.setFont(font) self.label_29.setStyleSheet("color: rgb(255, 255, 255);") self.label_29.setAlignment(QtCore.Qt.AlignCenter) self.label_29.setObjectName("label_29") self.label_34 = QtWidgets.QLabel(self.centralwidget) self.label_34.setGeometry(QtCore.QRect(450, 460, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_34.setFont(font) self.label_34.setStyleSheet("color: rgb(255, 255, 255);") self.label_34.setAlignment(QtCore.Qt.AlignCenter) self.label_34.setObjectName("label_34") self.label_35 = QtWidgets.QLabel(self.centralwidget) self.label_35.setGeometry(QtCore.QRect(830, 460, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_35.setFont(font) self.label_35.setStyleSheet("color: rgb(255, 255, 255);") self.label_35.setAlignment(QtCore.Qt.AlignCenter) self.label_35.setObjectName("label_35") self.label_30 = QtWidgets.QLabel(self.centralwidget) self.label_30.setGeometry(QtCore.QRect(830, 420, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_30.setFont(font) self.label_30.setStyleSheet("color: rgb(255, 255, 255);") self.label_30.setAlignment(QtCore.Qt.AlignCenter) self.label_30.setObjectName("label_30") self.label_42 = QtWidgets.QLabel(self.centralwidget) self.label_42.setGeometry(QtCore.QRect(450, 810, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_42.setFont(font) self.label_42.setStyleSheet("color: rgb(255, 255, 255);") self.label_42.setAlignment(QtCore.Qt.AlignCenter) self.label_42.setObjectName("label_42") self.graphicsView_6 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_6.setGeometry(QtCore.QRect(450, 550, 171, 211)) self.graphicsView_6.setObjectName("graphicsView_6") self.graphicsView_7 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_7.setGeometry(QtCore.QRect(830, 550, 171, 211)) self.graphicsView_7.setObjectName("graphicsView_7") self.label_43 = QtWidgets.QLabel(self.centralwidget) self.label_43.setGeometry(QtCore.QRect(830, 810, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_43.setFont(font) self.label_43.setStyleSheet("color: rgb(255, 255, 255);") self.label_43.setAlignment(QtCore.Qt.AlignCenter) self.label_43.setObjectName("label_43") self.graphicsView_8 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_8.setGeometry(QtCore.QRect(260, 550, 171, 211)) self.graphicsView_8.setObjectName("graphicsView_8") self.label_45 = QtWidgets.QLabel(self.centralwidget) self.label_45.setGeometry(QtCore.QRect(260, 810, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_45.setFont(font) self.label_45.setStyleSheet("color: rgb(255, 255, 255);") self.label_45.setAlignment(QtCore.Qt.AlignCenter) self.label_45.setObjectName("label_45") self.label_46 = QtWidgets.QLabel(self.centralwidget) self.label_46.setGeometry(QtCore.QRect(260, 770, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_46.setFont(font) self.label_46.setStyleSheet("color: rgb(255, 255, 255);") self.label_46.setAlignment(QtCore.Qt.AlignCenter) self.label_46.setObjectName("label_46") self.label_47 = QtWidgets.QLabel(self.centralwidget) self.label_47.setGeometry(QtCore.QRect(640, 810, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_47.setFont(font) self.label_47.setStyleSheet("color: rgb(255, 255, 255);") self.label_47.setAlignment(QtCore.Qt.AlignCenter) self.label_47.setObjectName("label_47") self.label_48 = QtWidgets.QLabel(self.centralwidget) self.label_48.setGeometry(QtCore.QRect(70, 770, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_48.setFont(font) self.label_48.setStyleSheet("color: rgb(255, 255, 255);") self.label_48.setAlignment(QtCore.Qt.AlignCenter) self.label_48.setObjectName("label_48") self.graphicsView_9 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_9.setGeometry(QtCore.QRect(640, 550, 171, 211)) self.graphicsView_9.setObjectName("graphicsView_9") self.graphicsView_10 = QtWidgets.QGraphicsView(self.centralwidget) self.graphicsView_10.setGeometry(QtCore.QRect(70, 550, 171, 211)) self.graphicsView_10.setObjectName("graphicsView_10") self.label_49 = QtWidgets.QLabel(self.centralwidget) self.label_49.setGeometry(QtCore.QRect(830, 770, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_49.setFont(font) self.label_49.setStyleSheet("color: rgb(255, 255, 255);") self.label_49.setAlignment(QtCore.Qt.AlignCenter) self.label_49.setObjectName("label_49") self.label_50 = QtWidgets.QLabel(self.centralwidget) self.label_50.setGeometry(QtCore.QRect(450, 770, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_50.setFont(font) self.label_50.setStyleSheet("color: rgb(255, 255, 255);") self.label_50.setAlignment(QtCore.Qt.AlignCenter) self.label_50.setObjectName("label_50") self.label_51 = QtWidgets.QLabel(self.centralwidget) self.label_51.setGeometry(QtCore.QRect(640, 770, 171, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_51.setFont(font) self.label_51.setStyleSheet("color: rgb(255, 255, 255);") self.label_51.setAlignment(QtCore.Qt.AlignCenter) self.label_51.setObjectName("label_51") self.label_52 = QtWidgets.QLabel(self.centralwidget) self.label_52.setGeometry(QtCore.QRect(70, 810, 171, 21)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) font.setBold(False) font.setWeight(50) self.label_52.setFont(font) self.label_52.setStyleSheet("color: rgb(255, 255, 255);") self.label_52.setAlignment(QtCore.Qt.AlignCenter) self.label_52.setObjectName("label_52") self.label_53 = QtWidgets.QLabel(self.centralwidget) self.label_53.setGeometry(QtCore.QRect(910, 860, 31, 41)) self.label_53.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_53.setText("") self.label_53.setObjectName("label_53") self.label_54 = QtWidgets.QLabel(self.centralwidget) self.label_54.setGeometry(QtCore.QRect(860, 860, 31, 41)) self.label_54.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_54.setText("") self.label_54.setObjectName("label_54") self.label_55 = QtWidgets.QLabel(self.centralwidget) self.label_55.setGeometry(QtCore.QRect(810, 860, 31, 41)) self.label_55.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_55.setText("") self.label_55.setObjectName("label_55") self.label_56 = QtWidgets.QLabel(self.centralwidget) self.label_56.setGeometry(QtCore.QRect(710, 860, 31, 41)) self.label_56.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_56.setText("") self.label_56.setObjectName("label_56") self.label_57 = QtWidgets.QLabel(self.centralwidget) self.label_57.setGeometry(QtCore.QRect(760, 860, 31, 41)) self.label_57.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_57.setText("") self.label_57.setObjectName("label_57") self.label_58 = QtWidgets.QLabel(self.centralwidget) self.label_58.setGeometry(QtCore.QRect(1060, 860, 31, 41)) self.label_58.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_58.setText("") self.label_58.setObjectName("label_58") self.label_59 = QtWidgets.QLabel(self.centralwidget) self.label_59.setGeometry(QtCore.QRect(1010, 860, 31, 41)) self.label_59.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_59.setText("") self.label_59.setObjectName("label_59") self.label_60 = QtWidgets.QLabel(self.centralwidget) self.label_60.setGeometry(QtCore.QRect(960, 860, 31, 41)) self.label_60.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_60.setText("") self.label_60.setObjectName("label_60") self.label_61 = QtWidgets.QLabel(self.centralwidget) self.label_61.setGeometry(QtCore.QRect(960, 10, 31, 41)) self.label_61.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_61.setText("") self.label_61.setObjectName("label_61") self.label_62 = QtWidgets.QLabel(self.centralwidget) self.label_62.setGeometry(QtCore.QRect(1010, 10, 31, 41)) self.label_62.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_62.setText("") self.label_62.setObjectName("label_62") self.label_63 = QtWidgets.QLabel(self.centralwidget) self.label_63.setGeometry(QtCore.QRect(910, 10, 31, 41)) self.label_63.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_63.setText("") self.label_63.setObjectName("label_63") self.label_64 = QtWidgets.QLabel(self.centralwidget) self.label_64.setGeometry(QtCore.QRect(710, 10, 31, 41)) self.label_64.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_64.setText("") self.label_64.setObjectName("label_64") self.label_65 = QtWidgets.QLabel(self.centralwidget) self.label_65.setGeometry(QtCore.QRect(860, 10, 31, 41)) self.label_65.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_65.setText("") self.label_65.setObjectName("label_65") self.label_66 = QtWidgets.QLabel(self.centralwidget) self.label_66.setGeometry(QtCore.QRect(810, 10, 31, 41)) self.label_66.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_66.setText("") self.label_66.setObjectName("label_66") self.label_67 = QtWidgets.QLabel(self.centralwidget) self.label_67.setGeometry(QtCore.QRect(760, 10, 31, 41)) self.label_67.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_67.setText("") self.label_67.setObjectName("label_67") self.label_68 = QtWidgets.QLabel(self.centralwidget) self.label_68.setGeometry(QtCore.QRect(1060, 10, 31, 41)) self.label_68.setStyleSheet("background-color: rgb(255, 255, 255);") self.label_68.setText("") self.label_68.setObjectName("label_68") # MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) self.pushButton_3.clicked.connect(MainWindow.goMain) # type: ignore QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.label_12.setText(_translate("MainWindow", "\'s recommendation")) self.label_13.setText(_translate("MainWindow", "Customized movies Best 5")) self.label_21.setText(_translate("MainWindow", "ID")) self.pushButton_3.setText(_translate("MainWindow", "←")) self.label_24.setText(_translate("MainWindow", "영화 제목")) self.label_31.setText(_translate("MainWindow", "장르")) self.label_44.setText(_translate("MainWindow",
from parse import Lexer, Parser, Token, State, NFA, Handler, HandlerTree, NFATreeNode from random import randrange import numpy as np from functools import reduce import os import re bio_graph = "alibaba.graph.txt" samplegraph = "papergraph.txt" sampleq = "testqueriespaper.txt" sampleMS = "testMSqueries.txt" randomqueries = "samplerandomqueries.txt" bio_queries = "bio_queries.txt" bio_queries_1S = "bio_queries_single_src.txt" # from regex github project def compile(p, debug=False): def print_tokens(tokens): for t in tokens: print(t) lexer = Lexer(p) parser = Parser(lexer) tokens = parser.parse() handler = Handler() if debug: print_tokens(tokens) nfa_stack = [] for t in tokens: handler.handlers[t.name](t, nfa_stack) assert len(nfa_stack) == 1 return nfa_stack.pop() def makeParseTree(p, debug=False): def print_tokens(tokens): for t in tokens: print(t) lexer = Lexer(p) parser = Parser(lexer) tokens = parser.parse() handler = HandlerTree() if debug: print_tokens(tokens) nfa_stack = [] for t in tokens: handler.handlers[t.name](t, nfa_stack) assert len(nfa_stack) == 1 return nfa_stack.pop() def loadgraph(gfname): ''' load graph from file input: file where each line has an edge as: node1 node2 label output: graph data structure: dict{ node:[(node2,label), (node3,label)...], node2:[] ...} ''' grafile = open(gfname) thegraph = dict() cnt = 0 for line in grafile: cnt += 1 if (cnt % 10000 == 0): print(cnt) if (len(line) <= 1): continue tup = line.split() node1, node2, label = tup[0], tup[1], tup[2] thegraph.setdefault(node1, []).append((node2, label)) thegraph.setdefault(node2, []) grafile.close() return thegraph def loadgraphTxt(edgeList): ''' load graph from a list of edges node1, edgelabel, node2 input: list of tuples (node1, label, node2) output: graph data structure: dict{ node:[(node2,label), (node3,label)...], node2:[] ...} ''' thegraph = dict() cnt = 0 for tup in edgeList: cnt += 1 if (cnt % 10000 == 0): print(cnt) node1, label, node2 = tup[0], tup[1], tup[2] thegraph.setdefault(node1, []).append((node2, label)) thegraph.setdefault(node2, []) return thegraph def inoutdegrees(g): ingraph = dict() for node in g.keys(): for (othernode, label) in g[node]: ingraph.setdefault(othernode, []).append((node, label)) ingraph.setdefault(node, []) inout = [] for node in g.keys(): outdeg = len(g[node]) indeg = len(ingraph[node]) inout.append((indeg, outdeg)) return inout def reducerfun(t1, t2): # reducer function # expects two tuples of length 3, merges them if the 3rd element is the same, if (t1[-1][2] == t2[2]): # first element is a list of 3-element tuples, second is a 3-element tuple middle = t1[-1][1] # set of values in middle for given value in 3rd middle.add(t2[1]) # add value from second tuple return t1[:-1] + [(t1[-1][0], middle, t1[-1][2])] else: return t1 + [(t2[0], set(t2[1]), t2[2])] def bfs(graph, NFA, start): ''' This is the main algorithm where the product automaton is constructed and searched on the fly. ''' visited, queue = set(), [(start, NFA.start)] edgelist = [] # list of traversed edges graphsolutions = set() broadcasts = set() while queue: vertex = queue.pop(0) if vertex not in visited: visited.add(vertex) vgraph, vautom = vertex # this step to be modified to follow specific edge labels # if vertex is terminal, add to solution list if (vautom.is_end): graphsolutions.add(vgraph) # get epsilon-transitions eps_states = [(vgraph, veps) for veps in vautom.epsilon] trans_states = [] # get labeled transitions # record them as candidate broadcasts bccandidates = [(vgraph, lbl, vautom.transitions[lbl].name) for lbl in vautom.transitions.keys()] if (len(bccandidates) > 0): bccandidates.sort(key=lambda tup: tup[2]) # sort by destination initial = bccandidates.pop(0) # initial value is removed from list reducedbc = reduce(reducerfun, bccandidates, [ (initial[0], set([initial[1]]), initial[2])]) # build reducing initializer from initial tuple # reducedbc is a list of tuples (fromnode, set(labels), to automstate) reducedhashable = [(tup[0], "|".join(sorted(list(tup[1])))) for tup in reducedbc] # now list of tuples (fromnode, "l1|l2|l3...|ln") -> to automstate left out broadcasts.update(reducedhashable) # for each neighbouring node in graph for (vg2, outlabel) in graph[vgraph]: if (outlabel in vautom.transitions): vautom2 = vautom.transitions[outlabel] trans_states.append((vg2, vautom2)) edgelist.append((vgraph, outlabel, vg2)) trans_states.extend(eps_states) queue.extend([s for s in trans_states if s not in visited]) return graphsolutions, visited, list( set(edgelist)), broadcasts # set of graph nodes in terminal nodes of product automaton; list of visited nodes; list of traversed edges; list of broadcast queries def bfsEstimator(probabilities, graphsize, NFA): ''' This is the main algorithm where the product automaton is constructed and searched on the fly. In this one we use a random artificial graph to estimate the selectivity of a query. Probabilities is a dict {edge label L: probability that two nodes are connected with an edge labeled L} graphsize is the size of the graph. ''' visited, queue = set(), [(0, NFA.start)] # the start node is zero, which means some arbitrary node generatedgraph = dict() # keep track of the graph as we've generated it gengraphlabels = dict() # for each node, keep track of which labels we've generated for each node # dict {node:[label1, label2...]} graphsolutions = set() broadcasts = set() edgelist = [] while queue: if (len(queue) > 100000) or len(visited) > 100000: print("queue length:" + len(queue) + len(visited)) raise Exception vertex = queue.pop(0) if vertex not in visited: visited.add(vertex) vgraph, vautom = vertex # if vertex is terminal, add to solution list if (vautom.is_end): graphsolutions.add(vgraph) # get epsilon-transitions eps_states = [(vgraph, veps) for veps in vautom.epsilon] trans_states = [] # get labeled transitions # record them as candidate broadcasts bccandidates = [(vgraph, lbl, vautom.transitions[lbl].name) for lbl in vautom.transitions.keys()] if (len(bccandidates) > 0): bccandidates.sort(key=lambda tup: tup[2]) # sort by destination initial = bccandidates.pop(0) # initial value is removed from list reducedbc = reduce(reducerfun, bccandidates, [ (initial[0], set([initial[1]]), initial[2])]) # build reducing initializer from initial tuple # reducedbc is a list of tuples (fromnode, set(labels), to automstate) reducedhashable = [(tup[0], "|".join(sorted(list(tup[1])))) for tup in reducedbc] # now list of tuples (fromnode, "l1|l2|l3...|ln") -> to automstate left out broadcasts.update(reducedhashable) # randomly generate outgoing nodes from here, only for edges of interest # TODO: should we actually keep track of all generated edges? for label in (vautom.transitions.keys()): # these are the outgoing labels we're interested in # have we generated this yet? if (vgraph not in gengraphlabels) or (label not in gengraphlabels[vgraph]): # no! # we're doing it now, so remember it gengraphlabels.setdefault(vgraph, []).append(label) # how many outgoing edges with this label from this node? binomial random variable howmany = np.random.binomial(graphsize, probabilities[label]) # binomial (n,p) if (howmany > 0): print("generating " + str(howmany) + " edges!!") # this seems really slow # whichones = np.random.permutation(graphsize)[:howmany] #choose those outgoing edges without replacement whichones = set() while len(whichones) < howmany: whichones.add(np.random.randint(graphsize)) generatedgraph.setdefault(vgraph, []).extend([(V, label) for V in whichones]) # else: yes we've generated it and can just use it from before # now we've got randomly generated outgoing arcs from our node of interest, labeled with labels of interest! # now follow them in the standard way! # for each neighbouring node in graph for (vg2, outlabel) in generatedgraph.get(vgraph, []): # in case the above generated nothing (a probabilities issue) print("vg2! " + vg2 + " " + outlabel) if (outlabel in vautom.transitions): vautom2 = vautom.transitions[outlabel] trans_states.append((vg2, vautom2)) edgelist.append((vgraph, outlabel, vg2)) trans_states.extend(eps_states) queue.extend([s for s in trans_states if s not in visited]) return graphsolutions, visited, list(set(edgelist)), broadcasts def bfsEstimatorBigram(probabilities, graphsize, NFA): ''' same as above, except probabilities are given as `bigrams', probabilities of edges based on previous edge crossed ''' visited, queue = set(), [(0, NFA.start, 0, 'INIT')] # the start node is zero, which means some arbitrary node generatedgraph = dict() # keep track of the graph as we've generated it gengraphlabels = dict() # for each node, keep track of which labels we've generated for each node # dict {node:[label1, label2...]} graphsolutions = set() broadcasts = set() edgelist = [] while queue: # if (len(queue)>100000) or len(visited)>100000: # print "queue length:", len(queue), len(visited) # raise Exception vertex = queue.pop(0) if (vertex[0], vertex[1]) not in visited: visited.add((vertex[0], vertex[1])) vgraph, vautom, prevnode, prevlabel = vertex # if vertex is terminal, add to solution list if (vautom.is_end): graphsolutions.add(vgraph) # get epsilon-transitions eps_states = [(vgraph, veps, prevnode, prevlabel) for veps in vautom.epsilon] trans_states = [] # get labeled transitions # temporarily removing BC computation # record them as candidate broadcasts # bccandidates = [(vgraph, lbl, vautom.transitions[lbl].name) for lbl in vautom.transitions.keys()] # if (len(bccandidates)>0): # bccandidates.sort(key=lambda tup: tup[2]) #sort by destination # initial
""" Author: <NAME> Modified from https://github.com/gurkirt/realtime-action-detection Licensed under The MIT License [see LICENSE for details] """ import os import torch import torch.nn as nn import torch.optim as optim import torch.nn.init as init import argparse from torch.autograd import Variable import torch.utils.data as data from data import v2, ActionDetection, NormliseBoxes, detection_collate, CLASSES, BaseTransform from utils.augmentations import SSDAugmentation from layers.modules import MultiboxLoss from layers.functions import PriorBox from layers import MatchPrior from AMTNet import AMTNet import numpy as np import time, pdb from utils.evaluation import evaluate_detections from layers.box_utils import nms, decode_seq from utils import AverageMeter from torch.optim.lr_scheduler import MultiStepLR # from torchviz import make_dot, make_dot_from_trace def str2bool(v): return v.lower() in ("yes", "true", "t", "1") parser = argparse.ArgumentParser(description='AMTNet detection training script') parser.add_argument('--version', default='v2', help='conv11_2(v2) or pool6(v1) as last layer') parser.add_argument('--basenet', default='vgg16_reducedfc.pth', help='pretrained base model') parser.add_argument('--dataset', default='ucf24', help='pretrained base model') parser.add_argument('--train_split', default=1, type=int, help='Split id') parser.add_argument('--ssd_dim', default=300, type=int, help='Input Size for SSD') # only support 300 now parser.add_argument('--seq_len', default=2, type=int, help='Input sequence length ') parser.add_argument('--seq_gap', default=0, type=int, help='Gap between the frame of sequence') parser.add_argument('--fusion_type', default='cat', type=str, help='Fusion type to fuse from sequence of frames; options are SUM, CAT and NONE') # parser.add_argument('--input_type_base', default='rgb', type=str, help='INput tyep default rgb can take flow (brox or fastOF) as well') parser.add_argument('--input_type_extra', default='brox', type=str, help='INput tyep default brox can take flow (brox or fastOF) as well') parser.add_argument('--input_frames_base', default=1, type=int, help='Number of input frame, default for rgb is 1') parser.add_argument('--input_frames_extra', default=5, type=int, help='Number of input frame, default for flow is 5') parser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching') parser.add_argument('--batch_size', default=4, type=int, help='Batch size for training') parser.add_argument('--num_workers','-j', default=4, type=int, help='Number of workers used in dataloading') parser.add_argument('--max_iter', default=40000, type=int, help='Number of training iterations') parser.add_argument('--val_step', default=10000, type=int, help='Number of training iterations') parser.add_argument('--cuda', default=1, type=str2bool, help='Use cuda to train model') parser.add_argument('--ngpu', default=1, type=int, help='Use cuda to train model') parser.add_argument('--lr', '--learning-rate', default=0.0005, type=float, help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, help='momentum') parser.add_argument('--stepvalues', default='10000,30000', type=str, help='step points for learning rate drop') parser.add_argument('--weight_decay', default=1e-4, type=float, help='Weight decay for SGD') parser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD at for stepwise schedule') parser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization') parser.add_argument('--vis_port', default=8095, type=int, help='Port for Visdom Server') parser.add_argument('--data_root', default='~/data/', help='Location of where in data is located like images and annotation file') parser.add_argument('--save_root', default='~/cache/', help='Location to where we wanr save the checkpoints of models') parser.add_argument('--iou_thresh', default=0.5, type=float, help='Evaluation threshold') parser.add_argument('--conf_thresh', default=0.01, type=float, help='Confidence threshold for evaluation') parser.add_argument('--nms_thresh', default=0.45, type=float, help='NMS threshold') parser.add_argument('--default_mult', default=1.0, type=float, help='NMS threshold') parser.add_argument('--topk', default=50, type=int, help='topk for evaluation') parser.add_argument('--man_seed', default=123, type=int, help='manula seed') args = parser.parse_args() import socket import getpass username = getpass.getuser() hostname = socket.gethostname() print('\n\n ', username, ' is using ', hostname, '\n\n') if hostname == 'mars': args.data_root = '/mnt/mars-fast/datasets/' args.save_root = '/mnt/mars-gamma/' args.vis_port = 8097 elif hostname in ['sun']: args.data_root = '/mnt/sun-gamma/' args.save_root = '/mnt/sun-gamma/' args.vis_port = 8096 elif hostname == 'mercury': args.data_root = '/mnt/mercury-fast/datasets/' args.save_root = '/mnt/mercury-beta/' args.vis_port = 8098 elif username == 'gurkirt' and hostname.startswith('comp'): args.data_root = '/home/gurkirt/datasets/' args.save_root = '/home/gurkirt/cache/' args.vis_port = 8097 visdom=False # python train.py --seq_len=2 --num_workers=4 --batch_size=16 --ngpu=2 --fusion_type=NONE --input_type_base=brox --input_frames_base=5 --stepvalues=30000,50000 --max_iter=60000 --val_step=10000 --lr=0.001 torch.set_default_tensor_type('torch.FloatTensor') np.random.seed(args.man_seed) torch.manual_seed(args.man_seed) if args.cuda: torch.cuda.manual_seed_all(args.man_seed) def print_node(gdf): node_fns = gdf.next_functions for fn in node_fns: print(fn) print_node(fn[0][0]) def main(): args.cfg = v2 args.train_sets = 'train' args.test_sets = 'test' kd = 3 args.means = (104, 117, 123) num_classes = len(CLASSES[args.dataset]) + 1 # only support multiclass datasets, not multilabel args.num_classes = num_classes args.stepvalues = [int(val) for val in args.stepvalues.split(',')] args.loss_reset_step = 30 # args.val_step = 30000 args.print_step = 10 args.fusion_type = args.fusion_type.lower() args.fusion = args.fusion_type in ['sum','cat','mean'] ## Define the experiment Name will used for save directory and ENV for visdom if not args.fusion: args.exp_name = 'AMTNet-{}-s{:d}-{}-sl{:02d}sg{:02d}-bs{:02d}-lr{:05d}'.format(args.dataset, args.train_split, args.input_type_base, args.seq_len, args.seq_gap, args.batch_size, int(args.lr * 100000)) else: args.exp_name = 'AMTNet-{}-s{:d}-{}-{}-{}-sl{:02d}sg{:02d}-bs{:02d}-lr{:05d}'.format(args.dataset, args.train_split, args.fusion_type, args.input_type_base, args.input_type_extra, args.seq_len, args.seq_gap, args.batch_size,int(args.lr * 100000)) num_feat_multiplier = {'cat': 2, 'sum': 1, 'mean': 1, 'none': 1} # fusion type can one of the above keys args.fmd = [512, 1024, 512, 256, 256, 256] args.kd = 3 args.fusion_num_muliplier = num_feat_multiplier[args.fusion_type] ## DEFINE THE NETWORK net = AMTNet(args) if args.fusion: base_weights = torch.load(args.save_root +'weights/AMTNet_single_stream_{:s}_s{:02d}.pth'.format(args.input_type_base, args.train_split)) extra_weights = torch.load(args.save_root + '/weights/AMTNet_single_stream_{:s}_s{:02d}.pth'.format(args.input_type_extra, args.train_split)) print('Loading base network...') net.core_base.load_my_state_dict(base_weights, input_frames=args.input_frames_base) net.core_extra.load_my_state_dict(extra_weights, input_frames=args.input_frames_extra) else: base_weights = torch.load(args.data_root +'/weights/{}-ssd300_ucf24_120000.pth'.format(args.input_type_base)) net.core_base.load_my_state_dict(base_weights, input_frames=args.input_frames_base) args.data_root += args.dataset + '/' args.save_root += args.dataset + '/' net = net.cuda() def xavier(param): init.xavier_uniform_(param) def weights_init(m): if isinstance(m, nn.Conv2d): xavier(m.weight.data) m.bias.data.zero_() elif isinstance(m, nn.Linear): xavier(m.weight.data) m.bias.data.zero_() print('Initializing weights for HEADs...') net.loc.apply(weights_init) net.conf.apply(weights_init) args.save_root = args.save_root + 'cache/' + args.exp_name + '/' if not os.path.isdir(args.save_root): os.makedirs(args.save_root) if args.ngpu>1: print('\nLets do dataparallel\n\n') net = torch.nn.DataParallel(net) parameter_dict = dict(net.named_parameters()) # Get parmeter of network in dictionary format wtih name being key params = [] #Set different learning rate to bias layers and set their weight_decay to 0 mult = 1; decay = 0 for name, param in parameter_dict.items(): if name.find('bias') > -1: mult = 2.0; decay = 0 else: mult = 1.0; decay = 1 if name.find('vgg')> -1 or name.find('extra')>-1 or name.find('L2Norm')>-1: mult = mult/args.seq_len # print(name, 'layer parameters will be trained @ {}'.format(args.lr*mult)) params += [{'params':[param], 'lr': args.lr*mult, 'weight_decay':args.weight_decay*decay}] optimizer = optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) criterion = MultiboxLoss() scheduler = MultiStepLR(optimizer, milestones=args.stepvalues, gamma=args.gamma) # Get proior or anchor boxes with torch.no_grad(): priorbox = PriorBox(v2, args.seq_len) priors = priorbox.forward() train(args, net, priors, optimizer, criterion, scheduler) def train(args, net, priors, optimizer, criterion, scheduler): log_file = open(args.save_root+"training.log", "w", 1) log_file.write(args.exp_name+'\n') for arg in sorted(vars(args)): print(arg, getattr(args, arg)) log_file.write(str(arg)+': '+str(getattr(args, arg))+'\n') net.train() # loss counters batch_time = AverageMeter() losses = AverageMeter() loc_losses = AverageMeter() cls_losses = AverageMeter() print('Loading Dataset...') train_dataset = ActionDetection(args, args.train_sets, SSDAugmentation(args.ssd_dim, args.means), NormliseBoxes(), anno_transform=MatchPrior(priors, args.cfg['variance'])) log_file.write(train_dataset.print_str) print(train_dataset.print_str) val_dataset = ActionDetection(args, args.test_sets, BaseTransform(args.ssd_dim, args.means), NormliseBoxes(), full_test=False) log_file.write(val_dataset.print_str) # print(val_dataset.print_str) epoch_size = len(train_dataset) // args.batch_size print('Training SSD on', train_dataset.name) if args.visdom: import visdom viz = visdom.Visdom(env=args.exp_name, port=args.vis_port) # initialize visdom loss plot lot = viz.line( X=torch.zeros((1,)).cpu(), Y=torch.zeros((1, 6)).cpu(), opts=dict( xlabel='Iteration', ylabel='Loss', title='Current SSD Training Loss', legend=['REG', 'CLS', 'AVG', 'S-REG', ' S-CLS', ' S-AVG'] ) ) # initialize visdom meanAP and class APs plot legends = ['meanAP'] for cls in CLASSES[args.dataset]: legends.append(cls) val_lot = viz.line( X=torch.zeros((1,)).cpu(), Y=torch.zeros((1,args.num_classes)).cpu(), opts=dict( xlabel='Iteration', ylabel='Mean AP', title='Current SSD Validation mean AP', legend=legends ) ) batch_iterator = None train_data_loader = data.DataLoader(train_dataset, args.batch_size, num_workers=args.num_workers, shuffle=True, collate_fn=detection_collate, pin_memory=True) val_data_loader = data.DataLoader(val_dataset, args.batch_size, num_workers=args.num_workers, shuffle=False, collate_fn=detection_collate, pin_memory=True) itr_count = 0 torch.cuda.synchronize() t0 = time.perf_counter() for iteration in range(args.max_iter + 1): if (not batch_iterator) or (iteration % epoch_size == 0): # create batch iterator batch_iterator = iter(train_data_loader) # load train data images, _ , prior_gt_labels, prior_gt_locations, _, _ = next(batch_iterator) # images, ground_truths, _ , _, num_mt, img_indexs # pdb.set_trace() images = [img.cuda(0, non_blocking=True) for img in images if not isinstance(img, list)] prior_gt_labels = prior_gt_labels.cuda(0, non_blocking=True) prior_gt_locations = prior_gt_locations.cuda(0, non_blocking=True) # forward cls_out, reg_out = net(images) optimizer.zero_grad() loss_l, loss_c = criterion(cls_out, reg_out, prior_gt_labels, prior_gt_locations) loss = loss_l + loss_c loss.backward() optimizer.step() scheduler.step() # pdb.set_trace() loc_loss = loss_l.item() conf_loss = loss_c.item() # print('Loss data type ',type(loc_loss)) loc_losses.update(loc_loss) cls_losses.update(conf_loss) losses.update((loc_loss + conf_loss)/2.0) if iteration == 103: loc_losses.reset() cls_losses.reset() losses.reset() batch_time.reset() if iteration % args.print_step == 0: if args.visdom and iteration>100: losses_list = [loc_losses.val, cls_losses.val, losses.val, loc_losses.avg, cls_losses.avg, losses.avg] viz.line(X=torch.ones((1, 6)).cpu() * iteration, Y=torch.from_numpy(np.asarray(losses_list)).unsqueeze(0).cpu(), win=lot, update='append') torch.cuda.synchronize() t1 = time.perf_counter() batch_time.update(t1 - t0) print_line = 'Itration {:02d}/{:06d}/{:06d} loc-loss {:.3f}({:.3f}) cls-loss {:.3f}({:.3f}) ' \ 'average-loss {:.3f}({:.3f}) Timer {:0.3f}({:0.3f})'.format(iteration//epoch_size, iteration, args.max_iter, loc_losses.val, loc_losses.avg, cls_losses.val, cls_losses.avg, losses.val, losses.avg, batch_time.val, batch_time.avg) torch.cuda.synchronize() t0 = time.perf_counter() log_file.write(print_line+'\n') print(print_line) itr_count += 1 if itr_count % args.loss_reset_step == 0 and itr_count > 0: loc_losses.reset() cls_losses.reset() losses.reset() batch_time.reset() print('Reset ', args.exp_name,' after', itr_count*args.print_step) itr_count = 0 if (iteration % args.val_step == 0 or iteration in [1000, args.max_iter]) and iteration>0: torch.cuda.synchronize() tvs = time.perf_counter() print('Saving state, iter:', iteration) torch.save(net.state_dict(), args.save_root + 'AMTNet_' + repr(iteration) + '.pth') net.eval() # switch net to evaluation mode with torch.no_grad(): mAP, ap_all, ap_strs = validate(args, net, priors, val_data_loader, val_dataset, iteration, iou_thresh=args.iou_thresh) for ap_str in ap_strs: print(ap_str) log_file.write(ap_str+'\n') ptr_str = '\nMEANAP:::=>'+str(mAP)+'\n' print(ptr_str) log_file.write(ptr_str) if args.visdom: aps = [mAP] for ap in ap_all: aps.append(ap) viz.line( X=torch.ones((1, args.num_classes)).cpu() * iteration, Y=torch.from_numpy(np.asarray(aps)).unsqueeze(0).cpu(), win=val_lot, update='append' ) net.train() # Switch net back to training mode torch.cuda.synchronize() t0 = time.perf_counter() prt_str = '\nValidation TIME::: {:0.3f}\n\n'.format(t0-tvs) print(prt_str) log_file.write(ptr_str) log_file.close() def validate(args, net, priors, val_data_loader, val_dataset, iteration_num, iou_thresh=0.5): """Test a SSD network on an image database.""" print('Validating at ', iteration_num) num_images = len(val_dataset) num_classes = args.num_classes
dialog.SetAutoLayout()') def mock_setsize(self, *args): print('called dialog.SetSize()') def mock_Layout(self, *args): print('called dialog.Layout()') def mock_create_actions(self, *args): print('called dialog.create_actions()') mockbase = types.SimpleNamespace(app_title='title', opts={'Keywords': ['x', 'y']}) mockparent = types.SimpleNamespace(nt_icon='icon', base=mockbase) monkeypatch.setattr(gui.wx.Dialog, '__init__', mock_init) monkeypatch.setattr(gui.wx.Dialog, 'SetTitle', mock_SetTitle) monkeypatch.setattr(gui.wx.Dialog, 'SetIcon', mock_SetIcon) # monkeypatch.setattr(gui.wx.Dialog, 'SetAffirmativeId', mock_setaffirmativeid) # monkeypatch.setattr(gui.wx.Dialog, 'SetSize', mock_SetSize) monkeypatch.setattr(gui.wx.Dialog, 'Layout', mock_Layout) monkeypatch.setattr(gui.wx.Dialog, 'SetSizer', mock_setsizer) monkeypatch.setattr(gui.wx.Dialog, 'SetSize', mock_setsize) monkeypatch.setattr(gui.wx.Dialog, 'SetAutoLayout', mock_setautolayout) monkeypatch.setattr(gui.wx.Dialog, 'CreateButtonSizer', mock_createbuttons) monkeypatch.setattr(gui.wx, 'BoxSizer', MockBoxSizer) # monkeypatch.setattr(gui.wx, 'FlexGridSizer', MockGridSizer) monkeypatch.setattr(gui.wx, 'StaticText', MockStaticText) monkeypatch.setattr(gui.wx, 'Button', MockButton) monkeypatch.setattr(gui.wx, 'ListBox', MockListBox) # monkeypatch.setattr(gui.wx, 'TextCtrl', MockTextCtrl) monkeypatch.setattr(gui.KeywordsDialog, 'create_actions', mock_create_actions) testobj = gui.KeywordsDialog(mockparent, '') assert hasattr(testobj, 'helptext') # testobj = gui.KeywordsDialog(mockparent, keywords) assert capsys.readouterr().out == ('called wxDialog.__init__()\n' "called dialog.SetTitle() with args " "`('title - w_tags',)`\n" "called dialog.SetIcon() with args `('icon',)`\n" 'called ListBox.__init__()\n' 'called listbox.bind()\n' 'called StaticText.__init__()\n' 'called Button.__init__()\n' 'called Button.Bind()\n' 'called Button.__init__()\n' 'called Button.Bind()\n' 'called Button.__init__()\n' 'called Button.Bind()\n' 'called Button.__init__()\n' 'called Button.Bind()\n' 'called ListBox.__init__()\n' 'called listbox.bind()\n' 'called dialog.create_actions()\n' 'called listbox.append() with arg `x`\n' 'called listbox.append() with arg `y`\n' 'called BoxSizer.__init__(`vert`)\n' 'called BoxSizer.__init__(`hori`)\n' 'called BoxSizer.__init__(`vert`)\n' 'called StaticText.__init__()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called hori sizer.Add()\n' 'called BoxSizer.__init__(`vert`)\n' 'called vert sizer.AddStretchSpacer()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called vert sizer.AddSpacer()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called vert sizer.AddStretchSpacer()\n' 'called hori sizer.Add()\n' 'called BoxSizer.__init__(`vert`)\n' 'called StaticText.__init__()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called hori sizer.Add()\n' 'called vert sizer.Add()\n' 'called BoxSizer.__init__(`hori`)\n' 'called dialog.createbuttons()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called dialog.SetSizer()\n' 'called dialog.SetAutoLayout()\n' 'called vert sizer.Fit()\n' 'called vert sizer.SetSizeHints()\n' 'called dialog.Layout()\n' 'called dialog.SetSize()\n') def test_init_2(self, monkeypatch, capsys): "test calling with keywordi(s) already associated" def mock_init(self, *args): print('called wxDialog.__init__()') def mock_SetTitle(self, *args): print('called dialog.SetTitle() with args `{}`'.format(args)) def mock_SetIcon(self, *args): print('called dialog.SetIcon() with args `{}`'.format(args)) def mock_createbuttons(self, *args): print('called dialog.createbuttons()') def mock_setaffirmativeid(self, *args): print('called dialog.SetAffirmativeId()') def mock_setsizer(self, *args): print('called dialog.SetSizer()') def mock_setautolayout(self, *args): print('called dialog.SetAutoLayout()') def mock_setsize(self, *args): print('called dialog.SetSize()') def mock_Layout(self, *args): print('called dialog.Layout()') def mock_create_actions(self, *args): print('called dialog.create_actions()') mockbase = types.SimpleNamespace(app_title='title', opts={'Keywords': ['x', 'y']}) mockparent = types.SimpleNamespace(nt_icon='icon', base=mockbase) monkeypatch.setattr(gui.wx.Dialog, '__init__', mock_init) monkeypatch.setattr(gui.wx.Dialog, 'SetTitle', mock_SetTitle) monkeypatch.setattr(gui.wx.Dialog, 'SetIcon', mock_SetIcon) # monkeypatch.setattr(gui.wx.Dialog, 'SetAffirmativeId', mock_setaffirmativeid) # monkeypatch.setattr(gui.wx.Dialog, 'SetSize', mock_SetSize) monkeypatch.setattr(gui.wx.Dialog, 'Layout', mock_Layout) monkeypatch.setattr(gui.wx.Dialog, 'SetSizer', mock_setsizer) monkeypatch.setattr(gui.wx.Dialog, 'SetSize', mock_setsize) monkeypatch.setattr(gui.wx.Dialog, 'SetAutoLayout', mock_setautolayout) monkeypatch.setattr(gui.wx.Dialog, 'CreateButtonSizer', mock_createbuttons) monkeypatch.setattr(gui.wx, 'BoxSizer', MockBoxSizer) # monkeypatch.setattr(gui.wx, 'FlexGridSizer', MockGridSizer) monkeypatch.setattr(gui.wx, 'StaticText', MockStaticText) monkeypatch.setattr(gui.wx, 'Button', MockButton) monkeypatch.setattr(gui.wx, 'ListBox', MockListBox) # monkeypatch.setattr(gui.wx, 'TextCtrl', MockTextCtrl) monkeypatch.setattr(gui.KeywordsDialog, 'create_actions', mock_create_actions) testobj = gui.KeywordsDialog(mockparent, '', ['y']) assert hasattr(testobj, 'helptext') assert capsys.readouterr().out == ('called wxDialog.__init__()\n' "called dialog.SetTitle() with args " "`('title - w_tags',)`\n" "called dialog.SetIcon() with args `('icon',)`\n" 'called ListBox.__init__()\n' 'called listbox.bind()\n' 'called StaticText.__init__()\n' 'called Button.__init__()\n' 'called Button.Bind()\n' 'called Button.__init__()\n' 'called Button.Bind()\n' 'called Button.__init__()\n' 'called Button.Bind()\n' 'called Button.__init__()\n' 'called Button.Bind()\n' 'called ListBox.__init__()\n' 'called listbox.bind()\n' 'called dialog.create_actions()\n' 'called listbox.append() with arg `y`\n' 'called listbox.append() with arg `x`\n' 'called BoxSizer.__init__(`vert`)\n' 'called BoxSizer.__init__(`hori`)\n' 'called BoxSizer.__init__(`vert`)\n' 'called StaticText.__init__()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called hori sizer.Add()\n' 'called BoxSizer.__init__(`vert`)\n' 'called vert sizer.AddStretchSpacer()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called vert sizer.AddSpacer()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called vert sizer.AddStretchSpacer()\n' 'called hori sizer.Add()\n' 'called BoxSizer.__init__(`vert`)\n' 'called StaticText.__init__()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called hori sizer.Add()\n' 'called vert sizer.Add()\n' 'called BoxSizer.__init__(`hori`)\n' 'called dialog.createbuttons()\n' 'called vert sizer.Add()\n' 'called vert sizer.Add()\n' 'called dialog.SetSizer()\n' 'called dialog.SetAutoLayout()\n' 'called vert sizer.Fit()\n' 'called vert sizer.SetSizeHints()\n' 'called dialog.Layout()\n' 'called dialog.SetSize()\n') def test_create_actions(self, monkeypatch, capsys): def mock_init(self, *args, **kwargs): print('called dialog.__init__()') def mock_set_accels(self, *args, **kwargs): print('called dialog.SetAcceleratorTable()') monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(gui.KeywordsDialog, 'SetAcceleratorTable', mock_set_accels) monkeypatch.setattr(gui.wx, 'MenuItem', MockMenuItem) monkeypatch.setattr(gui.wx, 'AcceleratorEntry', MockAcceleratorEntry) monkeypatch.setattr(gui.wx, 'AcceleratorTable', MockAcceleratorTable) mockbase = types.SimpleNamespace(app_title='title', opts={'Keywords': ['x', 'y']}) mockparent = types.SimpleNamespace(nt_icon='icon', base=mockbase) testobj = gui.KeywordsDialog(mockparent, '') testobj.create_actions() assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called MenuItem.__init__()\n' 'called menuitem.Bind()\n' 'called menuitem.GetId()\n' 'called AcceleratorEntry.__init__()\n' 'called MockAcceleratorEntry.FromString()\n' 'called MenuItem.__init__()\n' 'called menuitem.Bind()\n' 'called menuitem.GetId()\n' 'called AcceleratorEntry.__init__()\n' 'called MockAcceleratorEntry.FromString()\n' 'called MenuItem.__init__()\n' 'called menuitem.Bind()\n' 'called menuitem.GetId()\n' 'called AcceleratorEntry.__init__()\n' 'called MockAcceleratorEntry.FromString()\n' 'called MenuItem.__init__()\n' 'called menuitem.Bind()\n' 'called menuitem.GetId()\n' 'called AcceleratorEntry.__init__()\n' 'called MockAcceleratorEntry.FromString()\n' 'called MenuItem.__init__()\n' 'called menuitem.Bind()\n' 'called menuitem.GetId()\n' 'called AcceleratorEntry.__init__()\n' 'called MockAcceleratorEntry.FromString()\n' 'called AcceleratorTable.__init__()\n' 'called dialog.SetAcceleratorTable()\n') def test_activate_left(self, monkeypatch, capsys): def mock_init(self, *args, **kwargs): print('called dialog.__init__()') def mock_activate(self, *args): print('called dialog._activate(`{}`)'.format(args[0])) monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(gui.KeywordsDialog, '_activate', mock_activate) testobj = gui.KeywordsDialog('parent', '') testobj.fromlist = 'fromlist' testobj.activate_left('event') assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called dialog._activate(`fromlist`)\n') def test_activate_right(self, monkeypatch, capsys): def mock_init(self, *args, **kwargs): print('called dialog.__init__()') def mock_activate(self, *args): print('called dialog._activate(`{}`)'.format(args[0])) monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(gui.KeywordsDialog, '_activate', mock_activate) testobj = gui.KeywordsDialog('parent', '') testobj.tolist = 'tolist' testobj.activate_right('event') assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called dialog._activate(`tolist`)\n') def test_activate(self, monkeypatch, capsys): "test when list-to-activate can be activated" def mock_init(self, *args, **kwargs): print('called dialog.__init__()') monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) testobj = gui.KeywordsDialog('parent', '') testobj._activate(MockListBox()) assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called ListBox.__init__()\n' 'called listbox.SetSelection(`1`)\n' 'called listbox.SetFocus()\n') def test_activate_2(self, monkeypatch, capsys): "test when nothing was previously selected" def mock_init(self, *args, **kwargs): print('called dialog.__init__()') monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(MockListBox, 'GetSelections', lambda x: None) testobj = gui.KeywordsDialog('parent', '') testobj._activate(MockListBox()) assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called ListBox.__init__()\n' 'called listbox.SetSelection(`0`)\n' 'called listbox.SetFocus()\n') def test_activate_3(self, monkeypatch, capsys): "test activating fromlist fails" def mock_init(self, *args, **kwargs): print('called dialog.__init__()') def mock_activate_left(self, *args, **kwargs): print('called dialog.activate_left()') def mock_activate_right(self, *args, **kwargs): print('called dialog.activate_right()') def mock_setselection(self, *args): raise gui.wx._core.wxAssertionError monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(gui.KeywordsDialog, 'activate_left', mock_activate_left) monkeypatch.setattr(gui.KeywordsDialog, 'activate_right', mock_activate_right) monkeypatch.setattr(MockListBox, 'SetSelection', mock_setselection) testobj = gui.KeywordsDialog('parent', '') testobj.fromlist = MockListBox() testobj.tolist = MockListBox() testobj._activate(testobj.fromlist) assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called ListBox.__init__()\n' 'called ListBox.__init__()\n' 'called dialog.activate_right()\n') def test_activate_4(self, monkeypatch, capsys): "test activating fromlist fails" def mock_init(self, *args, **kwargs): print('called dialog.__init__()') def mock_activate_left(self, *args, **kwargs): print('called dialog.activate_left()') def mock_activate_right(self, *args, **kwargs): print('called dialog.activate_right()') def mock_setselection(self, *args): raise gui.wx._core.wxAssertionError monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(gui.KeywordsDialog, 'activate_left', mock_activate_left) monkeypatch.setattr(gui.KeywordsDialog, 'activate_right', mock_activate_right) monkeypatch.setattr(MockListBox, 'SetSelection', mock_setselection) testobj = gui.KeywordsDialog('parent', '') testobj.fromlist = MockListBox() testobj.tolist = MockListBox() testobj._activate(testobj.tolist) assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called ListBox.__init__()\n' 'called ListBox.__init__()\n' 'called dialog.activate_left()\n') def test_move_right(self, monkeypatch, capsys): def mock_init(self, *args, **kwargs): print('called dialog.__init__()') def mock_moveitem(self, *args): print('called dialog._moveitem(`{}`, `{}`)'.format(args[0], args[1])) monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(gui.KeywordsDialog, '_moveitem', mock_moveitem) testobj = gui.KeywordsDialog('parent', '') testobj.fromlist = 'fromlist' testobj.tolist = 'tolist' testobj.move_right('event') assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called dialog._moveitem(`fromlist`, `tolist`)\n') def test_move_left(self, monkeypatch, capsys): def mock_init(self, *args, **kwargs): print('called dialog.__init__()') def mock_moveitem(self, *args): print('called dialog._moveitem(`{}`, `{}`)'.format(args[0], args[1])) monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(gui.KeywordsDialog, '_moveitem', mock_moveitem) testobj = gui.KeywordsDialog('parent', '') testobj.fromlist = 'fromlist' testobj.tolist = 'tolist' testobj.move_left('event') assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called dialog._moveitem(`tolist`, `fromlist`)\n') def test_moveitem(self, monkeypatch, capsys): def mock_init(self, *args, **kwargs): print('called dialog.__init__()') monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) testobj = gui.KeywordsDialog('parent', '') from_ = MockListBox() to = MockListBox() testobj._moveitem(from_, to) assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called ListBox.__init__()\n' 'called ListBox.__init__()\n' 'delete item 1 from listbox\n' 'called listbox.GetCount()\n' "insert `['value 1 from listbox']` into listbox\n") def test_add_trefw(self, monkeypatch, capsys): "test adding new keyword" def mock_init(self, *args, **kwargs): print('called dialog.__init__()') self.parent = args[0] def mock_textinit(self, *args, **kwargs): print('called MockTextDialog.__init__() with args `{}`'.format(kwargs)) mockbase = types.SimpleNamespace(app_title='title', opts={'Keywords': ['x', 'y']}) mockparent = types.SimpleNamespace(nt_icon='icon', base=mockbase) monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(MockTextDialog, '__init__', mock_textinit) monkeypatch.setattr(gui.wx, 'TextEntryDialog', MockTextDialog) testobj = gui.KeywordsDialog(mockparent, '') testobj.tolist = MockListBox() testobj.add_trefw('event') assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called ListBox.__init__()\n' 'called MockTextDialog.__init__() with args' " `{'caption': 'title', 'message': 't_newtag'}`\n" 'called listbox.append() with arg `entered value`\n' 'called dialog.Destroy()\n') def test_add_trefw_2(self, monkeypatch, capsys): "test canceling adding new keyword" def mock_init(self, *args, **kwargs): print('called dialog.__init__()') self.parent = args[0] def mock_textinit(self, *args, **kwargs): print('called MockTextDialog.__init__() with args `{}`'.format(kwargs)) mockbase = types.SimpleNamespace(app_title='title', opts={'Keywords': ['x', 'y']}) mockparent = types.SimpleNamespace(nt_icon='icon', base=mockbase) monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) monkeypatch.setattr(MockTextDialog, '__init__', mock_textinit) monkeypatch.setattr(MockTextDialog, 'ShowModal', lambda x: gui.wx.ID_CANCEL) monkeypatch.setattr(gui.wx, 'TextEntryDialog', MockTextDialog) testobj = gui.KeywordsDialog(mockparent, '') testobj.tolist = MockListBox() testobj.add_trefw('event') assert capsys.readouterr().out == ('called dialog.__init__()\n' 'called ListBox.__init__()\n' 'called MockTextDialog.__init__() with args' " `{'caption': 'title', 'message': 't_newtag'}`\n" 'called dialog.Destroy()\n') def test_keys_help(self, monkeypatch, capsys): def mock_init(self, *args, **kwargs): print('called dialog.__init__()') self.parent = args[0] self.helptext = args[1] def mock_setaffirmativeid(self, *args): print('called dialog.SetAffirmativeId()') def mock_settitle(self, *args): print('called dialog.SetTitle()') def mock_setsizer(self, *args): print('called dialog.SetSizer()') def mock_setautolayout(self, *args): print('called dialog.SetAutoLayout()') def mock_showmodal(self, *args): print('called dialog.ShowModal()') def mock_layout(self, *args): print('called dialog.Layout()') monkeypatch.setattr(gui.wx.Dialog, 'SetAffirmativeId', mock_setaffirmativeid) monkeypatch.setattr(gui.wx.Dialog, 'SetTitle', mock_settitle) monkeypatch.setattr(gui.wx.Dialog, 'SetSizer', mock_setsizer) monkeypatch.setattr(gui.wx.Dialog, 'SetAutoLayout', mock_setautolayout) monkeypatch.setattr(gui.wx.Dialog, 'Layout', mock_layout) monkeypatch.setattr(MockDialog, 'ShowModal', mock_showmodal) monkeypatch.setattr(gui.wx, 'Dialog', MockDialog) monkeypatch.setattr(gui.wx, 'BoxSizer', MockBoxSizer) monkeypatch.setattr(gui.wx, 'FlexGridSizer', MockGridSizer) monkeypatch.setattr(gui.wx, 'StaticText', MockStaticText) monkeypatch.setattr(gui.wx, 'Button', MockButton) mockbase = types.SimpleNamespace(app_title='title', opts={'Keywords': ['x', 'y']}) mockparent = types.SimpleNamespace(nt_icon='icon', base=mockbase) monkeypatch.setattr(gui.KeywordsDialog, '__init__', mock_init) testobj = gui.KeywordsDialog(mockparent, (('x',
= 4 #************************# #----------------------------------------------------------------------------------- #----------------------------------------------------------------------------------- # # ERROR_MESSAGE = "" try: project = FormProject.objects.get(pk=kwargs['project_pk']) formtype = FormType.objects.get(pk=kwargs['form_type_pk']) except: raise Http404("Project Does Not Exist!") if project.pk == request.user.permissions.project.pk: counter = Counter() counter.reset() kwargs.update({'access_level':TEMPLATE_ACCESS_LEVEL}) kwargs.update({'user_access':request.user.permissions.access_level}) kwargs.update({'user_project':request.user.permissions.project}) kwargs.update({'project':project}) kwargs.update({'form':'False'}) kwargs.update({'formtype':formtype}) kwargs.update({'toolbar_title_code': 'FormType_' + kwargs['form_type_pk']}) kwargs.update({'deletable': 'False'}) else: #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), 'Trying to access another project.', request.META) return HttpResponse(render_to_response('maqluengine/admin_warning.html', kwargs, RequestContext(request))) #Check our user's session and access level if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level): return HttpResponse(render_to_response('maqluengine/new_rtype_importer.html', kwargs, RequestContext(request))) else: ERROR_MESSAGE += "Error: You do not have permission to view this page" #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META) kwargs.update({'ERROR_MESSAGE': ERROR_MESSAGE}) return HttpResponse(render_to_response('maqluengine/admin_error.html', kwargs, RequestContext(request))) #=====================================================================================# # ACCESS LEVEL : 1 TEMPLATE_ACCESS_LEVEL : 5 PROJECT_HOME() #=====================================================================================# def project_home(self, request, **kwargs): #************************# ACCESS_LEVEL = 1 TEMPLATE_ACCESS_LEVEL = 5 #************************# #----------------------------------------------------------------------------------- # This view delivers the project overview of users/stats etc. Only a level 5 admin can edit # --the info on this screen. Although the access level is set to 5 on this view, we allow all # --project users to see this page. Access to modifications are prohibited in the template # --using this access_level passed to the **kwargs however, e.g. save buttons/delete buttons/delete # --will not be generated if someone isn't level 5 ERROR_MESSAGE = "" try: project = FormProject.objects.get(pk=kwargs['project_pk']) except: raise Http404("Project Does Not Exist!") if request.user.permissions.project.pk == project.pk: kwargs.update({'access_level':TEMPLATE_ACCESS_LEVEL}) kwargs.update({'user_access':request.user.permissions.access_level}) kwargs.update({'user_project':request.user.permissions.project}) kwargs.update({'project':project}) kwargs.update({'toolbar_title_code': 'Project_' + kwargs['project_pk']}) kwargs.update({'form':'False'}) kwargs.update({'deletable': 'False'}) else: #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), 'Trying to access another project.', request.META) return HttpResponse(render_to_response('maqluengine/admin_warning.html', kwargs, RequestContext(request))) #Check our user's session and access level if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level): return HttpResponse(render_to_response('maqluengine/project_control_panel.html', kwargs, RequestContext(request))) else: ERROR_MESSAGE += "Error: You do not have permission to view this page" #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META) kwargs.update({'ERROR_MESSAGE': ERROR_MESSAGE}) return HttpResponse(render_to_response('maqluengine/admin_error.html', kwargs, RequestContext(request))) #=====================================================================================# # ACCESS LEVEL : 3 TEMPLATE_ACCESS_LEVEL : 3 EDIT_FORM_TYPE() *RECYCLING #=====================================================================================# def edit_form_type(self, request, **kwargs): #************************# ACCESS_LEVEL = 3 TEMPLATE_ACCESS_LEVEL = 3 #************************# #------------------------------------------------------------------------------------------------------ # This view just displays the form type editor page. Only a level 3 access can see and use this page # --It's not necessary for any lower access to view this page ERROR_MESSAGE = "" try: project = FormProject.objects.get(pk=kwargs['project_pk']) formtype = FormType.objects.get(pk=kwargs['form_type_pk']) except: raise Http404("Project Does Not Exist!") #*** RECYCLING BIN *** Return a 404 Error if the requsted model instance is flagged for deletion (in the recycling bin) if formtype.flagged_for_deletion == True: raise Http404("This Page Does Not Exist!") #Make sure the user is trying to access their project and not another project #If they are trying to access another project--warn them their action has been logged #after redirecting them to a warning page if project.pk == request.user.permissions.project.pk and formtype.project.pk == request.user.permissions.project.pk: counter = Counter() counter.reset() kwargs.update({'access_level':TEMPLATE_ACCESS_LEVEL}) kwargs.update({'user_access':request.user.permissions.access_level}) kwargs.update({'user_project':request.user.permissions.project}) kwargs.update({'counter':counter}) kwargs.update({'project':project}) kwargs.update({'formtype':formtype}) kwargs.update({'form':'False'}) kwargs.update({'toolbar_title_code': 'FormType_' + kwargs['form_type_pk']}) kwargs.update({'deletable': 'True'}) else: #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), 'Trying to access another project.', request.META) return HttpResponse(render_to_response('maqluengine/admin_warning.html', kwargs, RequestContext(request))) #Check our user's session and access level if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level): return HttpResponse(render_to_response('maqluengine/edit_form_type.html', kwargs, RequestContext(request))) else: ERROR_MESSAGE += "Error: You do not have permission to view this page" #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META) kwargs.update({'ERROR_MESSAGE': ERROR_MESSAGE}) return HttpResponse(render_to_response('maqluengine/admin_error.html', kwargs, RequestContext(request))) #=====================================================================================# # ACCESS LEVEL : 4 TEMPLATE_ACCESS_LEVEL : 4 NEW_FORM_TYPE() #=====================================================================================# def new_form_type(self, request, **kwargs): #************************# ACCESS_LEVEL = 4 TEMPLATE_ACCESS_LEVEL = 4 #************************# #----------------------------------------------------------------------------------------------- # This view show the new form type creator template. It allows users to create new form types # --for their project. Because it is creating a new form type it is limited only to those with # --level 4 access. ERROR_MESSAGE = "" try: project = FormProject.objects.get(pk=kwargs['project_pk']) except: raise Http404("Project Does Not Exist!") #Make sure the user is trying to access their project and not another project #If they are trying to access another project--warn them their action has been logged #after redirecting them to a warning page if project.pk == request.user.permissions.project.pk: kwargs.update({'access_level':TEMPLATE_ACCESS_LEVEL}) kwargs.update({'user_access':request.user.permissions.access_level}) kwargs.update({'user_project':request.user.permissions.project}) kwargs.update({'toolbar_title_code': 'NewFormType_none'}) kwargs.update({'project':project}) kwargs.update({'form':'False'}) kwargs.update({'deletable': 'False'}) else: #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), 'Trying to access another project.', request.META) return HttpResponse(render_to_response('maqluengine/admin_warning.html', kwargs, RequestContext(request))) #Check our user's session and access level if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level): return HttpResponse(render_to_response('maqluengine/new_form_type.html', kwargs, RequestContext(request))) else: ERROR_MESSAGE += "Error: You do not have permission to view this page" #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META) kwargs.update({'ERROR_MESSAGE': ERROR_MESSAGE}) return HttpResponse(render_to_response('maqluengine/admin_error.html', kwargs, RequestContext(request))) #=====================================================================================# # ACCESS LEVEL : 1 TEMPLATE_ACCESS_LEVEL : 2 EDIT_FORM() *RECYCLING #=====================================================================================# def edit_form(self, request, **kwargs): #************************# ACCESS_LEVEL = 1 TEMPLATE_ACCESS_LEVEL = 2 #************************# #----------------------------------------------------------------------------------------------- # This view shows the page to edit an existing form. Any project user can view this, but only level 2 # --and above can use its functionality to submit data ERROR_MESSAGE = "" try: form = Form.objects.get(pk=kwargs['form_pk']) form_type = FormType.objects.get(pk=kwargs['form_type_pk']) project = FormProject.objects.get(pk=kwargs['project_pk']) except: raise Http404("Form does not exist") #Do something with request here #*** RECYCLING BIN *** Return a 404 Error if the requsted model instance is flagged for deletion (in the recycling bin) if form.flagged_for_deletion == True: raise Http404("This Page Does Not Exist!") #Make sure the user is trying to access their project and not another project #If they are trying to access another project--warn them their action has been logged #after redirecting them to a warning page if project.pk == request.user.permissions.project.pk and form.project.pk == request.user.permissions.project.pk and form.form_type.pk == form_type.pk: counter = Counter() counter.reset() #Temp function to make displaying the page much faster--remove the database query hits from the actual .html file #Need a list of this form's rtypes and a list of this forms rtype values print >>sys.stderr, "TIMER FOR FORM EDIT A" #*** RECYCLING BIN *** We need to make sure all the RTYPES and RVALS(by their RTYPE) are filtered out by their deletion flags frat_list = form_type.formrecordattributetype_set.all().filter(flagged_for_deletion=False) frav_list = form.formrecordattributevalue_set.all().filter(record_attribute_type__flagged_for_deletion=False) frrt_list = form_type.ref_to_parent_formtype.all().filter(flagged_for_deletion=False) frrv_list = form.ref_to_parent_form.all().filter(record_reference_type__flagged_for_deletion=False) kwargs.update({'api_urls':get_api_endpoints()}) kwargs.update({'frat_list':frat_list}) kwargs.update({'frav_list':frav_list}) kwargs.update({'frrt_list':frrt_list}) kwargs.update({'frrv_list':frrv_list}) print >>sys.stderr, "TIMER FOR FORM EDIT A" kwargs.update({'access_level':TEMPLATE_ACCESS_LEVEL}) kwargs.update({'user_access':request.user.permissions.access_level}) kwargs.update({'user_project':request.user.permissions.project}) kwargs.update({'formtype':form_type}) kwargs.update({'form':form}) kwargs.update({'project':project}) kwargs.update({'counter':counter}) kwargs.update({'toolbar_title_code': 'Form_' + kwargs['form_pk']}) kwargs.update({'deletable': 'True'}) else: SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), 'Trying to access another project.', request.META) return HttpResponse(render_to_response('maqluengine/admin_warning.html', kwargs, RequestContext(request))) #Check our user's session and access level if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level): return HttpResponse(render_to_response('maqluengine/edit_form.html', kwargs, RequestContext(request))) else: ERROR_MESSAGE += "Error: You do not have permission to view this page" #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META) kwargs.update({'ERROR_MESSAGE': ERROR_MESSAGE}) return HttpResponse(render_to_response('maqluengine/admin_error.html', kwargs, RequestContext(request))) #=====================================================================================# # ACCESS LEVEL : 2 TEMPLATE_ACCESS_LEVEL : 2 NEW_FORM() *RECYCLING #=====================================================================================# def new_form(self, request, **kwargs): #************************# ACCESS_LEVEL = 2 TEMPLATE_ACCESS_LEVEL = 2 #************************# #----------------------------------------------------------------------------------------------- # This view shows the page to edit an existing form. Any project user can view this, but only level 2 # --and above can use its functionality to submit data ERROR_MESSAGE = "" try: form_type =
[], None code = item[2][:4] for entry in Sandhi_Convt.antas: if code[0] == entry[0]: krdDetail.anta = AmaraKosha_Database_Queries.iscii_unicode(entry[2] + '³ÚÏÚÆè£', requested_script) break # print('krdGener Sufcode: qry %s code %s'%(qry,code)) cols, dataSufcode = AmaraKosha_Database_Queries.sqlQuery(qry, code, duplicate=False, maxrows=0) # print(('krdGener sufcode %s cols %s\n%s')%(code, cols, dataSufcode)) krdDetail.erb, krdDetail.erb_iscii = item[erbInColumn], item[erbInColumn + 1] krdDetail.sabda = AmaraKosha_Database_Queries.iscii_unicode(item[sabdaInColumn], requested_script) krdDetail.linga = AmaraKosha_Database_Queries.iscii_unicode(Sandhi_Convt.lingas[int(code[1])], requested_script) if dataSufcode != [] and len(dataSufcode[0]) > 2: suffixes = str(dataSufcode[0][2]).split(' ') else: return [], None # from VB SplitAndDisplay routine subforms = [] for sufcode in suffixes: subforms.append(Sandhi_Convt.Convt(sufcode)) # print('subforms %s'%([erb+item for item in subforms])) # print([Kosha_Subanta_Synonyms_Queries.iscii_unicode(erb+item) for item in subforms]) subforms_with_sandhi = [AmaraKosha_Database_Queries.iscii_unicode(Sandhi_Convt.Sandhi(krdDetail.erb_iscii + item), requested_script) for item in subforms] # print([Sandhi_Convt.Sandhi(erb + item) for item in subforms]) forms += [subforms_with_sandhi[0:3], subforms_with_sandhi[3:6], subforms_with_sandhi[6:9], subforms_with_sandhi[9:12], subforms_with_sandhi[12:15], subforms_with_sandhi[15:18], subforms_with_sandhi[18:21], subforms_with_sandhi[21:24]] krdDatas.append(getAnalysedinfo(krdDetail, dhatuNo, requested_script)) # print('no. of items %i subforms with sandhi %s' % (len(forms), forms)) # for item in krdDatas: # attributes = inspect.getmembers(item, lambda a: not (inspect.isroutine(a))) # print([a for a in attributes if not (a[0].startswith('__') and a[0].endswith('__'))]) return forms, krdDatas def getAnalysedinfo(krdDetail: krdData, dhatuNo: str, requested_script=1): # from VB GetAnalysedInfo routine qry = 'Select * from Sdhatu where field1=?' cols, dataAnalysed = AmaraKosha_Database_Queries.sqlQuery(qry, dhatuNo, maxrows=0, script=requested_script) if dataAnalysed == []: # qry failed due to iscii akshara in non-Devanagari ... try Devanagari cols, dataAnalysed = AmaraKosha_Database_Queries.sqlQuery(qry, dhatuNo, maxrows=0) # print('getAnalysedinfo cols %s\nData Analysed %s'%(cols, dataAnalysed)) # for item in dataAnalysed: arthas, karmas, arthas_iscii = [], [], [] for item in dataAnalysed: # there will be only one record! krdDetail.verb = item[cols.index('Field2')] krdDetail.nijverb = transliterate_lines(item[cols.index('Field3')], IndianLanguages[requested_script - 1]) krdDetail.sanverb = transliterate_lines(item[cols.index('Field4')], IndianLanguages[requested_script - 1]) krdDetail.verb_iscii, krdDetail.nijverb_iscii, krdDetail.sanverb_iscii = item[cols.index('Field2')+1], item[cols.index('Field3')+1], item[cols.index('Field4')+1] krdDetail.GPICode = item[cols.index('Field9')] krdDetail.gana = transliterate_lines(Tganas[int(krdDetail.GPICode[0])], IndianLanguages[requested_script - 1]) krdDetail.padi = transliterate_lines(Tpadis[int(krdDetail.GPICode[1]) - 1], IndianLanguages[requested_script - 1]) krdDetail.it = transliterate_lines(Tyits[int(krdDetail.GPICode[2]) - 1], IndianLanguages[requested_script - 1]) krdDetail.CombinedM = item[cols.index('Field10')] arthas_karmas = item[cols.index('Field8')].split('/') arthas_karmas_iscii = item[cols.index('Field8')+1].split('/') # print('getAnalysedinfo arthas_karmas %s'%arthas_karmas) arthas += [transliterate_lines(word[:-2], Transliterate.IndianLanguages[requested_script - 1]) for word in arthas_karmas] arthas_iscii += [word[:-2] for word in arthas_karmas_iscii] karmaCodes = [int(word[len(word)-1]) - 1 for word in arthas_karmas] krdDetail.karmaCode = ''.join([str(code) for code in karmaCodes]) karmas += [transliterate_lines(Tkarmas[karma], Transliterate.IndianLanguages[requested_script - 1]) for karma in karmaCodes if karma < len(Tkarmas)] krdDetail.meaning = ' '.join(arthas) krdDetail.meaning_iscii = ' '.join(arthas_iscii) krdDetail.karma = ' '.join(karmas) return krdDetail def krdanta_SortedList_KrDantavyayam(dhatuNo: str, DhatuVidah: str, KrdantaVidah: str, KrdMode: str, dataDhatu: List[str], cols_dataDhatu: List[str], requested_script=1): KrdCodeDicts = {"विध्यर्थः": {"तव्य": "a", "अनीयर्": "a", "य": "c"}, "भूतः": {"तव्य": "d", "अनीयर्": "e"}, "वर्तमानः": {"तव्य": "f", "अनीयर्": "g"}, "भविष्यत्": {"तव्य": "h", "अनीयर्": "i"}, "कृदव्ययम्": {"तव्य": "A", "अनीयर्": "B"}} if not KrdantaVidah == "कृदव्ययम्":KrdCode = KrdCodeDicts[KrdantaVidah][KrdMode] + {"केवलकृदन्तः": "1", "णिजन्तः": "2", "सन्नन्तः": "3"}[DhatuVidah] else: KrdCode = {"केवलकृदन्तः": "1", "णिजन्तः": "2", "सन्नन्तः": "3"}[DhatuVidah] + KrdCodeDicts[KrdantaVidah][KrdMode] krdDatas = [] forms = [] qry = 'select * from krudav where field2 = ? and field1 = ?' cols, datakrdAvyaya = AmaraKosha_Database_Queries.sqlQuery(qry, (KrdCode, dhatuNo), duplicate=False, maxrows=0, script=requested_script) for item in datakrdAvyaya: krdDataInstance = krdData() krdDataInstance.sabda = item[cols.index('Field3')] # print(cols_dataDhatu.index('Field2'), len(dataDhatu)) krdDataInstance.dhatuVidhah = DhatuVidah krdDataInstance.krdantaVidhah = KrdantaVidah krdDataInstance.pratyayaVidhah = KrdMode krdDataInstance.verb = dataDhatu[0][cols_dataDhatu.index('Field2')] krdDataInstance.nijverb = dataDhatu[0][cols_dataDhatu.index('Field3')] krdDataInstance.sanverb = dataDhatu[0][cols_dataDhatu.index('Field4')] krdDataInstance.GPICode = dataDhatu[0][cols_dataDhatu.index('Field9')] krdDataInstance.gana = Tganas[int(krdDataInstance.GPICode[0])] krdDataInstance.padi = Tpadis[int(krdDataInstance.GPICode[1]) - 1] krdDataInstance.it = Tyits[int(krdDataInstance.GPICode[2]) - 1] krdDataInstance.combinedM = dataDhatu[0][cols_dataDhatu.index('Field10')] arthas_karmas = dataDhatu[0][cols_dataDhatu.index('Field8')].split('/') krdDataInstance.arthas = [word[:-2] for word in arthas_karmas] krdDataInstance.karmas = [int(word[len(word) - 1]) for word in arthas_karmas] krdDataInstance.karmas = [Tkarmas[karma] for karma in krdDataInstance.karmas] # print('arthas %s\nkarmas %s'%(krdDataInstance.arthas, krdDataInstance.karmas)) krdDatas.append(krdDataInstance) return krdDatas def subanta_Analysis(word, requested_script=1): # print('word %s %s'%(word, AmaraKosha_Database_Queries.iscii_unicode(word))) if word != '' and ord(word[len(word) - 1]) == 162: word = word[:len(word) - 1] + 'Ìè' qry ='select * from subfin where Finform=?' cols_subfin, dbdata_subfin = AmaraKosha_Database_Queries.sqlQuery(qry, word, maxrows=0, script=requested_script) for row in dbdata_subfin: codes = row[cols_subfin.index('Code')] subDetail = subantaDetails() subDetails = [] for codeset in codes.split(' '): subDetail.vibvach = int(codeset.split(',')[1]) - 1 subDetail.vib = Sandhi_Convt.vibstr[subDetail.vibvach//3] subDetail.vach = Sandhi_Convt.vachstr[subDetail.vibvach%3] for entry in Sandhi_Convt.antas: if codeset[0] == entry[0]: subDetail.anta = entry[2] # equivalent of Right$(antas(i), Len(antas(i)) - 2) in VB code if codeset[0] == 'A': subDetail.anta += "ÚÆè£" else: subDetail.anta += "³ÚÏÚÆè£" break subDetail.linga = Sandhi_Convt.lingas[int(codeset[1:2])] subDetail.wtype = 1 qry = 'select * from fincode where code like ?' cols_fincode, dbdata_fincode = AmaraKosha_Database_Queries.sqlQuery(qry, codeset[0]+'__', maxrows=0, script=requested_script) for sufrec in dbdata_fincode: if sufrec[cols_fincode.index('code')][:3] == codeset[0]: subDetail.base = sufrec[cols_fincode.index('finroot')] subDetail.det = sufrec[cols_fincode.index('code')] subDetail.erb = None subDetails.append(subDetail) word_visandhi = Sandhi_Convt.visandhi(word) # ic.ic(word, word_visandhi, AmaraKosha_Database_Queries.iscii_unicode(word),AmaraKosha_Database_Queries.iscii_unicode(word_visandhi)) # print('subAnaly-visandhi %s %s'%(word_visandhi,AmaraKosha_Database_Queries.iscii_unicode(word_visandhi))) halanth = chr(232) subDetails = [] for i in range(len(word_visandhi)-1): # print('visandhi left %s'%word_visandhi[::-1][i]) if not word_visandhi[::-1][i] == halanth: subDetail = subantaDetails() subDetail.erb = Sandhi_Convt.Sandhi(word_visandhi[:-(i+1)]) subDetail.suf = Sandhi_Convt.Sandhi(word_visandhi[len(word_visandhi)-(i+1):]) # print('subAnaly 1 erb %s -> %s(%s) suf %s -> %s(%s)'%(word_visandhi[:-(i+1)], subDetail.erb, AmaraKosha_Database_Queries.iscii_unicode(subDetail.erb), word_visandhi[len(word_visandhi)-(i+1):], subDetail.suf, AmaraKosha_Database_Queries.iscii_unicode(subDetail.suf))) # splits += 1 subDetails.append(subDetail) # print('subAnaly 2 erb', [AmaraKosha_Database_Queries.iscii_unicode(det.erb) for det in subDetails], 'suf', [[det.suf,AmaraKosha_Database_Queries.iscii_unicode(det.suf)] for det in subDetails]) subforms_with_sandhi = [] subRecs = [] for subDetailsRec in subDetails: # print('subAnaly erb %s(%s) suf %s(%s)' % (subDetailsRec.erb, AmaraKosha_Database_Queries.iscii_unicode(subDetailsRec.erb), subDetailsRec.suf, AmaraKosha_Database_Queries.iscii_unicode(subDetailsRec.suf))) for j, scode in enumerate(Sandhi_Convt.Suffix): if scode != '' and subDetailsRec.suf == scode: chCode = Sandhi_Convt.decode(j) # ic.ic(j, scode, AmaraKosha_Database_Queries.iscii_unicode(scode), chCode) qry = 'select * from subanta where erb = ?' if "'" in subDetailsRec.erb: param = subDetailsRec.erb.split("'") param = param[0] + "''" + param[1] else: param = subDetailsRec.erb # print('param %s %s'%(param, AmaraKosha_Database_Queries.iscii_unicode(param))) cols_subanta, dbdata_subanta = AmaraKosha_Database_Queries.sqlQuery(qry, param, maxrows=0, script=requested_script) for subantaDetailRec in dbdata_subanta: if subDetailsRec.erb == subantaDetailRec[cols_subanta.index('Erb') + 1]: qry = 'select * from sufcode where code=?' cols_sufcode, dbSufcode = AmaraKosha_Database_Queries.sqlQuery(qry, subantaDetailRec[cols_subanta.index('Code')][:4], maxrows=0, script=requested_script) subforms = [] for sufrec in dbSufcode: if sufrec[cols_sufcode.index('Code')] == subantaDetailRec[cols_subanta.index('Code')][:4]: sufixes = sufrec[cols_sufcode.index('SufStr')] if chCode in sufixes: for sufcode in sufixes.split(' '): if '/' in sufcode: ss = '' for s in sufcode.split('/'): ss += '/' + Sandhi_Convt.Convt(s) ss = ss[1:] else: ss = Sandhi_Convt.Convt(sufcode) if ss != '': subforms.append(ss) for subformItemNo, subformItem in enumerate(subforms): for tstr in subformItem.split('/'): if Sandhi_Convt.Sandhi(subDetailsRec.erb + tstr) == word: subDetail = subantaDetails() subDetail.vibvach = subformItemNo subDetail.vib = AmaraKosha_Database_Queries.iscii_unicode(Sandhi_Convt.vibstr[subformItemNo // 3], requested_script) subDetail.vach = AmaraKosha_Database_Queries.iscii_unicode(Sandhi_Convt.vachstr[subformItemNo % 3], requested_script) subDetail.base = subantaDetailRec[cols_subanta.index('Base') + 1] subDetail.erb = subantaDetailRec[cols_subanta.index('Erb') + 1] subDetail.det = subantaDetailRec[cols_subanta.index('Code') + 1] for entry in Sandhi_Convt.antas: if subantaDetailRec[cols_subanta.index('Code')][0] == entry[0]: subDetail.anta = entry[2] # equivalent of Right$(antas(i), Len(antas(i)) - 2) in VB code if subantaDetailRec[cols_subanta.index('Code')][0] == 'A': subDetail.anta += "ÚÆè£" else: subDetail.anta += "³ÚÏÚÆè£" break subDetail.anta = AmaraKosha_Database_Queries.iscii_unicode(subDetail.anta, requested_script) subDetail.linga = AmaraKosha_Database_Queries.iscii_unicode(Sandhi_Convt.lingas[int(subantaDetailRec[cols_subanta.index('Code')][1:2])], requested_script) subDetail.rupam = AmaraKosha_Database_Queries.iscii_unicode(Sandhi_Convt.Sandhi(subDetailsRec.erb + tstr), requested_script) subRecs.append(subDetail) # print([erb+item for item in subforms]) # print([AmaraKosha_Database_Queries.iscii_unicode(subDetails[i].erb+item) for item in subforms]) subforms_with_sandhi += [AmaraKosha_Database_Queries.iscii_unicode(Sandhi_Convt.Sandhi(subDetailsRec.erb + item + ' '), script=requested_script) for item in subforms] # print(len(subforms_with_sandhi),subforms_with_sandhi) if subforms_with_sandhi == []: # return [], subDetails[0].base, subDetail.anta, subDetail.linga, '', subDetails[0].vib, subDetails[0].vach raise Exception('Subanta Forms for ' + AmaraKosha_Database_Queries.iscii_unicode(word) + ' not found in Database') else: forms = [subforms_with_sandhi[0:3], subforms_with_sandhi[3:6], subforms_with_sandhi[6:9], subforms_with_sandhi[9:12], subforms_with_sandhi[12:15], subforms_with_sandhi[15:18], subforms_with_sandhi[18:21], # subforms_with_sandhi[21:24]] list(map(lambda word: 'हे ' + word, subforms_with_sandhi[0:3]))] # return forms, subDetail.anta, subDetail.linga, subforms_with_sandhi[0], subDetail.vib, subDetail.vach, subDetail.base, subDetail.erb, subDetail.det, subDetail.vibvach return forms, subRecs def krdanta_Analysis(word, requested_script=1): word_visandhi = Sandhi_Convt.visandhi(word) halanth = chr(232) krdAnalyDetails, krdDetails = [], [] forms = [] for i in range(len(word_visandhi)-1): if not word_visandhi[::-1][i] == halanth: krdAnalyDetail = krdAnalyData() krdAnalyDetail.erb = Sandhi_Convt.Sandhi(word_visandhi[:-(i + 1)]) krdAnalyDetail.suf = Sandhi_Convt.Sandhi(word_visandhi[len(word_visandhi) - (i + 1):]) # print('krdAnaly erb %s -> %s(%s) suf %s -> %s(%s)'%(word_visandhi[:-(i+1)], krdAnalyDetail.erb,AmaraKosha_Database_Queries.iscii_unicode(krdAnalyDetail.erb), word_visandhi[len(word_visandhi)-(i+1):], krdAnalyDetail.suf, AmaraKosha_Database_Queries.iscii_unicode(krdAnalyDetail.suf))) krdAnalyDetails.append(krdAnalyDetail) subforms_with_sandhi = [] for krdAnalyDetail in krdAnalyDetails: for j, scode in enumerate(Sandhi_Convt.Suffix): if scode != '' and krdAnalyDetail.suf == scode: chCode = Sandhi_Convt.decode(j) # print('chcode %s j %s scode %s'%(chCode, j, scode)) qry = 'select * from Krud where Field1 = ?' cols_krdanta, dataKrdanta = AmaraKosha_Database_Queries.sqlQuery(qry, krdAnalyDetail.erb, maxrows=0) for krdDetailRec in dataKrdanta: qry = 'select * from Sufcode where code=?' code = krdDetailRec[2][:4] # print('krdAnaly Sufcode: qry %s code %s' % (qry, code)) cols_sufcode, dataSufcode = AmaraKosha_Database_Queries.sqlQuery(qry, code, duplicate=False, maxrows=0) subforms = [] for sufrec in dataSufcode: if sufrec[cols_sufcode.index('Code')] == code: sufixes = sufrec[cols_sufcode.index('SufStr')] if chCode in sufixes: for sufcode in sufixes.split(' '): subforms.append(Sandhi_Convt.Convt(sufcode)) # print('subforms %s'%subforms) for subformItemNo, subformItem in enumerate(subforms): for tstr in subformItem.split('/'): if Sandhi_Convt.Sandhi(krdAnalyDetail.erb + tstr) ==
<filename>py2df/utils.py import json import typing import collections import nbtlib as nbt from array import array if typing.TYPE_CHECKING: # avoid cyclic import from .typings import Numeric Docable = typing.Union[typing.Callable, type] IterOrSingleDocable = typing.Union[Docable, typing.Iterable[Docable]] def remove_u200b_from_doc(obj: IterOrSingleDocable, *other_objs: IterOrSingleDocable) -> None: """ Remove ``\\u200b`` from a class/method's docstring. Parameters ---------- obj : Union[Union[Callable, :class:`type`], Iterable[Union[Callable, :class:`type`]]] Can be either a class/method or an iterable of classes/methods from whose documentation ``\\u200b`` will be removed. other_objs: Union[Union[Callable, :class:`type`], Iterable[Union[Callable, :class:`type`]]] Any other objects (or iterables thereof) to follow the same procedure. Returns ------- None None """ if isinstance(obj, collections.Iterable) and not isinstance(obj, type): for clz in obj: remove_u200b_from_doc(clz) else: the_doc = obj.__doc__ if "\u200b" in the_doc: obj.__doc__ = the_doc.replace("\u200b", "") for o in other_objs: remove_u200b_from_doc(o) class _DoNotFlatten: def __init__(self, val): self.val = val def flatten( *args: typing.Any, allow_iterables: typing.Iterable[typing.Type[typing.Iterable]] = tuple(), except_iterables: typing.Iterable[typing.Type[typing.Iterable]] = tuple(), max_depth: typing.Optional[int] = None, keep_iterables: typing.Iterable[typing.Type[typing.Iterable]] = tuple(), curr_depth: int = 0 ) -> list: """ Flatten a list or iterable of arbitrary length. >>> from py2df.utils import flatten >>> flatten(1, 2, ['b', 'a' , ['c', 'd']], 3) [1, 2, 'b', 'a', 'c', 'd', 3] Parameters ---------- args : Any Items and lists to be combined into a single list. allow_iterables : Iterable[:class:`type`], optional An iterable (list, etc.) which specifies the types (classes) of Iterables to flatten, besides :class:`list` and :class:`tuple` (which are always checked by default) - they will be checked with an ``isinstance()`` call. Defaults to ``tuple()`` (empty tuple - none). except_iterables : Iterable[:class:`type`], optional An iterable (list, etc.) which specifies the types (classes) of Iterables to NOT be flattened; i.e., all will be flattened except the given ones. This could have serious side-effects, so choose wisely. Defaults to ``tuple()`` (empty tuple - none). If using this, **it is recommended to set `max_depth` ** . **This parameter overrides `allow_iterables`. ** max_depth : Optional[int], optional An integer that represents the maximum depth until which the list is flattened, or None for no limit. Defaults to None. keep_iterables : Iterable[:class:`type`], optional List of iterable types to keep; i.e., flatten them, but keep them there regardless (one position before). E.g.:: >>> flatten(1, (1, 2), keep_iterables=[tuple]) [1, (1, 2), 1, 2] Returns ------- :class:`list` Resulting list. Warnings -------- Pick the iterables in the ``allow_iterables`` list wisely, because **any instance of it will be flattened**. This can produce unexpected results when accepting, for example, :class:`str` as a valid Iterable to be flattened. This warning also applies (even more so) to ``except_iterables``: If it is specified, make sure to set ``max_depth`` in order to avoid further problems. Notes ----- Credits to NLTK authors for this function's original code. """ x = [] if except_iterables and allow_iterables: allow_iterables = tuple() for el in args: is_iterable = isinstance(el, collections.Iterable) do_keep = keep_iterables and isinstance(el, tuple(keep_iterables)) and curr_depth == 0 # prevent dupes if do_keep: # if we should keep it in the list x.append(el) if ( (is_iterable and except_iterables and isinstance(el, tuple(except_iterables))) # if iterable in "except", or (not except_iterables and not isinstance(el, (list, tuple, *(allow_iterables or [])))) # !"accept"... ): el = [el] # make it an one-element iterable for the for loop to work. for item in el: if do_keep and item == el: continue item_is_iter = isinstance(item, collections.Iterable) if ( # if this is a valid iterable according to the given parameters, then flatten it (curr_depth < max_depth if max_depth else True) # do not flatten any further than max depth. and ( # if this is a valid iterable (not in "except" or in "accept"), flatten it! (item_is_iter and except_iterables and not isinstance(item, tuple(except_iterables))) or (not except_iterables and isinstance(item, (list, tuple, *(allow_iterables or tuple())))) ) ): if keep_iterables and isinstance(item, tuple(keep_iterables)): x.append(item) x.extend( flatten( # flatten item, allow_iterables=allow_iterables, except_iterables=except_iterables, max_depth=max_depth, curr_depth=curr_depth + 1, keep_iterables=keep_iterables ) ) else: # don't flatten x.append(item) # if keep_iterables and curr_depth == 0 and False: # # print(f"bruh moment {curr_depth=} {len(list(filter(lambda t: type(t) == _DoNotFlatten, x)))}") # return [el.val if type(el) == _DoNotFlatten else el for el in x] return x AnyNumber = typing.Union[int, float] @typing.overload def clamp(num: int, min_: int, max_: int) -> int: ... @typing.overload def clamp(num: float, min_: float, max_: float) -> float: ... @typing.overload def clamp(num: AnyNumber, min_: AnyNumber, max_: AnyNumber) -> AnyNumber: ... def clamp(num: AnyNumber, min_: AnyNumber, max_: AnyNumber) -> AnyNumber: """Clamps a number (int, float) between two bounds, inclusively. Parameters ---------- num : Union[:class:`int`, :class:`float`] Number to be clamped. min_ : Union[:class:`int`, :class:`float`] Lower bound; the minimum value that this number can be, and is returned if ``distance <= min_`` holds. max_ : Union[:class:`int`, :class:`float`] Upper bound; the maximum value that this number can be, and is returned if ``distance => max_`` holds. Returns ------- Union[:class:`int`, :class:`float`] The clamped number. """ return min(max(min_, num), max_) T = typing.TypeVar("T") def all_attr_eq(a: T, b: T) -> bool: """ Checks if two objects are equal by comparing their types and each of their attributes. Parameters ---------- a : Any An object to compare. b : Any Another object to compare equality. Returns ------- :class:`bool` Whether or not the objects are equal (if their types and attributes are all equal). """ return type(a) == type(b) and all( getattr(a, attr) == getattr(b, attr) for attr in getattr( a.__class__, "__slots__", a.__dict__ ) or a.__class__.__dict__ ) K = typing.TypeVar("K") V = typing.TypeVar("V") def select_dict( obj: typing.Dict[K, V], *keys: typing.Union[str, typing.Iterable[str]], ignore_missing: bool = False ) -> typing.Dict[K, V]: """ Selects certain keys from a dict. Parameters ---------- obj : :class:`dict` The dictionary from which to select keys. keys : Union[:class:`str`, Iterable[:class:`str`]] Key, keys or iterables of keys to select. ignore_missing : :class:`bool`, optional Whether or not to ignore missing attributes in the dictionary (i.e., accept trying to select a key that is not there). This defaults to False. (If False, it will raise a KeyError.) Returns ------- :class:`dict` A dictionary with only the given keys. """ flattened_keys = flatten(keys, except_iterables=(str,), max_depth=2) def select_key(k: K) -> typing.Tuple[K, V]: return (k, obj[k]) if ignore_missing: def filter_key(k: K) -> bool: return k in obj gen = map(select_key, filter(filter_key, flattened_keys)) else: gen = map(select_key, flattened_keys) return {k: v for k, v in gen} @typing.overload # String => str def nbt_to_python(obj: nbt.String, convert_items: bool = True) -> str: ... @typing.overload # Int/Long/Short/Byte => int def nbt_to_python(obj: typing.Union[nbt.Int, nbt.Long, nbt.Short, nbt.Byte], convert_items: bool = True) -> int: ... @typing.overload # Float/Double => float def nbt_to_python(obj: typing.Union[nbt.Float, nbt.Double], convert_items: bool = True) -> float: ... @typing.overload # Compound => dict def nbt_to_python(obj: nbt.Compound, convert_items: bool = True) -> dict: ... ItemType = typing.TypeVar("ItemType") if hasattr(typing, "Literal"): # py 3.8 TrueLiteral: "typing.Literal[True]" = typing.Literal[True] FalseLiteral: "typing.Literal[False]" = typing.Literal[False] else: # support for py <3.8 - just accept any bool TrueLiteral = FalseLiteral = typing.cast(typing.Any, bool) # type: typing.Type[bool] @typing.overload # convert_items is False; List def nbt_to_python(obj: nbt.List[ItemType], convert_items: FalseLiteral) -> typing.List[ItemType]: ... @typing.overload # convert_items is True; List def nbt_to_python( obj: nbt.List, convert_items: TrueLiteral = True ) -> typing.List[typing.Union[str, dict, list, array, int, float]]: ... @typing.overload # arrays def nbt_to_python( obj: typing.Union[nbt.ByteArray, nbt.LongArray, nbt.IntArray], convert_items: bool = True ) -> array: ... @typing.overload # general case def nbt_to_python( obj: nbt.tag.Base, convert_items: bool = True ) -> typing.Union[str, dict, list, array, int, float]: ... def nbt_to_python(obj: nbt.tag.Base, convert_items: bool = True) -> typing.Union[str, dict, list, array, int, float]: """ Converts a NBT object (instance of :class:`nbtlib.tag.Base`, i.e., any NBT-related class) into its Python raw type equivalent. Example:: >>> from py2df.utils import nbt_to_python >>> import nbtlib >>> converted = nbt_to_python(nbtlib.Byte(5)) >>> converted 5 >>> type(converted) <class 'int'> The full relation is: +-------------------------+----------------------------------------------------------------------------------------+ | Equivalent raw type | NBT Type | +=========================+========================================================================================+ | :class:`str` | :class:`nbtlib.String` | +-------------------------+----------------------------------------------------------------------------------------+ | :class:`int` | :class:`nbtlib.Int`, :class:`nbtlib.Long`, :class:`nbtlib.Short`, :class:`nbtlib.Byte` | +-------------------------+----------------------------------------------------------------------------------------+ | :class:`float` | :class:`nbtlib.Float`, :class:`nbtlib.Double` | +-------------------------+----------------------------------------------------------------------------------------+ | :class:`dict` | :class:`nbtlib.Compound` | +-------------------------+----------------------------------------------------------------------------------------+ | :class:`list` | :class:`nbtlib.List` | +-------------------------+----------------------------------------------------------------------------------------+ | :class:`~array.array` | :class:`nbtlib.ByteArray`, :class:`nbtlib.IntArray`, :class:`nbtlib.LongArray` | +-------------------------+----------------------------------------------------------------------------------------+ Parameters ---------- obj : :class:`nbtlib.tag.Base` The NBT object to convert. convert_items : :class:`bool`, optional Whether or not should convert all items of list, array and dict-related types to python raw
its output. It compute mentions scores. Args: mention_emb: tf.float64, [num_mentions, emb], a tensor that contains of embeddings of specific mentions Returns: [num_mentions, 1] Output of the fully-connected network, that compute the mentions scores. """ with tf.variable_scope("mention_scores"): return utils.ffnn(mention_emb, self.opt["ffnn_depth"], self.opt["ffnn_size"], 1, self.dropout) # [num_mentions, 1] def softmax_loss(self, antecedent_scores, antecedent_labels): """ Computes the value of the loss function using antecedent_scores and antecedent_labels. Practically standard softmax function. Args: antecedent_scores: tf.float64, [num_mentions, max_ant + 1], output of fully-connected network that compute antecedent scores. antecedent_labels: True labels for antecedent. Returns: [num_mentions] The value of loss function. """ gold_scores = antecedent_scores + tf.log(tf.cast(antecedent_labels, tf.float64)) # [num_mentions, max_ant + 1] marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [num_mentions] log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [num_mentions] return log_norm - marginalized_gold_scores # [num_mentions] def get_antecedent_scores(self, mention_emb, mention_scores, antecedents, antecedents_len, mention_speaker_ids, genre_emb): """ Forms a new tensor using special features, mentions embeddings, mentions scores, etc. and passes it through a fully-connected network that compute antecedent scores. Args: mention_emb: [num_mentions, emb], a tensor that contains of embeddings of specific mentions mention_scores: [num_mentions, 1], Output of the fully-connected network, that compute the mentions scores. antecedents: [] get from C++ function antecedents_len: [] get from C++ function mention_speaker_ids: [num_mentions, speaker_emb_size], tf.float64, Speaker IDs. genre_emb: [genre_emb_size], tf.float64, Genre Returns: tf.float64, [num_mentions, max_ant + 1], antecedent scores. """ num_mentions = utils.shape(mention_emb, 0) max_antecedents = utils.shape(antecedents, 1) feature_emb_list = [] if self.opt["use_metadata"]: antecedent_speaker_ids = tf.gather(mention_speaker_ids, antecedents) # [num_mentions, max_ant] same_speaker = tf.equal(tf.expand_dims(mention_speaker_ids, 1), antecedent_speaker_ids) # [num_mentions, max_ant] speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.opt["feature_size"]], dtype=tf.float64), tf.to_int32(same_speaker)) # [num_mentions, max_ant, emb] feature_emb_list.append(speaker_pair_emb) tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [num_mentions, max_antecedents, 1]) # [num_mentions, max_ant, emb] feature_emb_list.append(tiled_genre_emb) if self.opt["use_features"]: target_indices = tf.range(num_mentions) # [num_mentions] mention_distance = tf.expand_dims(target_indices, 1) - antecedents # [num_mentions, max_ant] mention_distance_bins = self.distance_bins(mention_distance) # [num_mentions, max_ant] mention_distance_bins.set_shape([None, None]) mention_distance_emb = tf.gather(tf.get_variable("mention_distance_emb", [10, self.opt["feature_size"]], dtype=tf.float64), mention_distance_bins) # [num_mentions, max_ant] feature_emb_list.append(mention_distance_emb) feature_emb = tf.concat(feature_emb_list, 2) # [num_mentions, max_ant, emb] feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [num_mentions, max_ant, emb] antecedent_emb = tf.gather(mention_emb, antecedents) # [num_mentions, max_ant, emb] target_emb_tiled = tf.tile(tf.expand_dims(mention_emb, 1), [1, max_antecedents, 1]) # [num_mentions, max_ant, emb] similarity_emb = antecedent_emb * target_emb_tiled # [num_mentions, max_ant, emb] pair_emb = tf.concat([target_emb_tiled, antecedent_emb, similarity_emb, feature_emb], 2) # [num_mentions, max_ant, emb] with tf.variable_scope("iteration"): with tf.variable_scope("antecedent_scoring"): antecedent_scores = utils.ffnn(pair_emb, self.opt["ffnn_depth"], self.opt["ffnn_size"], 1, self.dropout) # [num_mentions, max_ant, 1] antecedent_scores = tf.squeeze(antecedent_scores, 2) # [num_mentions, max_ant] antecedent_mask = tf.log( tf.sequence_mask(antecedents_len, max_antecedents, dtype=tf.float64)) # [num_mentions, max_ant] antecedent_scores += antecedent_mask # [num_mentions, max_ant] antecedent_scores += tf.expand_dims(mention_scores, 1) + tf.gather(mention_scores, antecedents) # [num_mentions, max_ant] antecedent_scores = tf.concat([tf.zeros([utils.shape(mention_scores, 0), 1], dtype=tf.float64), antecedent_scores], 1) # [num_mentions, max_ant + 1] return antecedent_scores # [num_mentions, max_ant + 1] def flatten_emb_by_sentence(self, emb, text_len_mask): """ Create boolean mask for emb tensor. Args: emb: Some embeddings tensor with rank 2 or 3 text_len_mask: A mask tensor representing the first N positions of each row. Returns: emb tensor after mask applications. """ num_sentences = tf.shape(emb)[0] max_sentence_length = tf.shape(emb)[1] emb_rank = len(emb.get_shape()) if emb_rank == 2: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length]) elif emb_rank == 3: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, utils.shape(emb, 2)]) else: raise ValueError("Unsupported rank: {}".format(emb_rank)) return tf.boolean_mask(flattened_emb, text_len_mask) def encode_sentences(self, text_emb, text_len, text_len_mask): """ Passes the input tensor through bi_LSTM. Args: text_emb: [num_sentences, max_sentence_length, emb], text code in tensor text_len: tf.int32, [Amount of sentences] text_len_mask: boolean mask for text_emb Returns: [num_sentences, max_sentence_length, emb], output of bi-LSTM after boolean mask application """ num_sentences = tf.shape(text_emb)[0] max_sentence_length = tf.shape(text_emb)[1] # Transpose before and after for efficiency. inputs = tf.transpose(text_emb, [1, 0, 2]) # [max_sentence_length, num_sentences, emb] with tf.variable_scope("fw_cell"): cell_fw = utils.CustomLSTMCell(self.opt["lstm_size"], num_sentences, self.dropout) preprocessed_inputs_fw = cell_fw.preprocess_input(inputs) with tf.variable_scope("bw_cell"): cell_bw = utils.CustomLSTMCell(self.opt["lstm_size"], num_sentences, self.dropout) preprocessed_inputs_bw = cell_bw.preprocess_input(inputs) preprocessed_inputs_bw = tf.reverse_sequence(preprocessed_inputs_bw, seq_lengths=text_len, seq_dim=0, batch_dim=1) state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1])) state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1])) with tf.variable_scope("lstm"): with tf.variable_scope("fw_lstm"): fw_outputs, fw_states = tf.nn.dynamic_rnn(cell=cell_fw, inputs=preprocessed_inputs_fw, sequence_length=text_len, initial_state=state_fw, time_major=True) with tf.variable_scope("bw_lstm"): bw_outputs, bw_states = tf.nn.dynamic_rnn(cell=cell_bw, inputs=preprocessed_inputs_bw, sequence_length=text_len, initial_state=state_bw, time_major=True) bw_outputs = tf.reverse_sequence(bw_outputs, seq_lengths=text_len, seq_dim=0, batch_dim=1) text_outputs = tf.concat([fw_outputs, bw_outputs], 2) text_outputs = tf.transpose(text_outputs, [1, 0, 2]) # [num_sentences, max_sentence_length, emb] return self.flatten_emb_by_sentence(text_outputs, text_len_mask) def get_predicted_antecedents(self, antecedents, antecedent_scores): """ Forms a list of predicted antecedent labels Args: antecedents: [] get from C++ function antecedent_scores: [num_mentions, max_ant + 1] output of fully-connected network that compute antecedent_scores Returns: a list of predicted antecedent labels """ predicted_antecedents = [] for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1): if index < 0: predicted_antecedents.append(-1) else: predicted_antecedents.append(antecedents[i, index]) return predicted_antecedents def get_predictions_and_loss(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids): """ Connects all elements of the network to one complete graph, that compute mentions spans independently And passes through it the tensors that came to the input of placeholders. Args: word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size], float64, Text embeddings. char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size], tf.int32, Character indices. text_len: tf.int32, [Amount of sentences] speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs. genre: [Amount of independent genres], tf.int32, Genre is_training: tf.bool gold_starts: tf.int32, [Amount of gold mentions] gold_ends: tf.int32, [Amount of gold mentions] cluster_ids: tf.int32, [Amount of independent clusters] Returns:[candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents, antecedent_scores], loss List of predictions and scores, and Loss function value """ self.dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["dropout_rate"]) self.lexical_dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["lexical_dropout_rate"]) num_sentences = tf.shape(word_emb)[0] max_sentence_length = tf.shape(word_emb)[1] text_emb_list = [word_emb] if self.opt["char_embedding_size"] > 0: char_emb = tf.gather( tf.get_variable("char_embeddings", [len(self.char_dict), self.opt["char_embedding_size"]]), char_index, tf.float64) # [num_sentences, max_sentence_length, max_word_length, emb] flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, utils.shape(char_emb, 2), utils.shape(char_emb, 3)]) # [num_sentences * max_sentence_length, max_word_length, emb] flattened_aggregated_char_emb = utils.cnn(flattened_char_emb, self.opt["filter_widths"], self.opt[ "filter_size"]) # [num_sentences * max_sentence_length, emb] aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb, [num_sentences, max_sentence_length, utils.shape(flattened_aggregated_char_emb, 1)]) # [num_sentences, max_sentence_length, emb] text_emb_list.append(aggregated_char_emb) text_emb = tf.concat(text_emb_list, 2) text_emb = tf.nn.dropout(text_emb, self.lexical_dropout) text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) text_len_mask = tf.reshape(text_len_mask, [num_sentences * max_sentence_length]) text_outputs = self.encode_sentences(text_emb, text_len, text_len_mask) text_outputs = tf.nn.dropout(text_outputs, self.dropout) genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.opt["feature_size"]], dtype=tf.float64), genre) # [emb] sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length] flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words] flattened_text_emb = self.flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words] candidate_starts, candidate_ends = self.spans( sentence_indices=flattened_sentence_indices, max_width=self.max_mention_width) candidate_starts.set_shape([None]) candidate_ends.set_shape([None]) candidate_mention_emb = self.get_mention_emb(flattened_text_emb, text_outputs, candidate_starts, candidate_ends) # [num_candidates, emb] candidate_mention_scores = self.get_mention_scores(candidate_mention_emb) # [num_mentions, 1] candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [num_mentions] k = tf.to_int32(tf.floor(tf.to_float(tf.shape(text_outputs)[0]) * self.opt["mention_ratio"])) predicted_mention_indices = self.extract_mentions(candidate_mention_scores, candidate_starts, candidate_ends, k) # ([k], [k]) predicted_mention_indices.set_shape([None]) mention_starts = tf.gather(candidate_starts, predicted_mention_indices) # [num_mentions] mention_ends = tf.gather(candidate_ends, predicted_mention_indices) # [num_mentions] mention_emb = tf.gather(candidate_mention_emb, predicted_mention_indices) # [num_mentions, emb] mention_scores = tf.gather(candidate_mention_scores, predicted_mention_indices) # [num_mentions] # mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb] # mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb] mention_speaker_ids = tf.gather(speaker_ids, mention_starts) # [num_mentions] max_antecedents = self.opt["max_antecedents"] antecedents, antecedent_labels, antecedents_len = self.get_antecedents(mention_starts, mention_ends, gold_starts, gold_ends, cluster_ids, max_antecedents) # ([num_mentions, max_ant], [num_mentions, max_ant + 1], [num_mentions] antecedents.set_shape([None, None]) antecedent_labels.set_shape([None, None]) antecedents_len.set_shape([None]) antecedent_scores = self.get_antecedent_scores(mention_emb, mention_scores, antecedents, antecedents_len, mention_speaker_ids, genre_emb) # [num_mentions, max_ant + 1] loss = self.softmax_loss(antecedent_scores, antecedent_labels) # [num_mentions] loss = tf.reduce_sum(loss) # [] return [candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents, antecedent_scores], loss def get_predicted_clusters(self, mention_starts, mention_ends, predicted_antecedents): """ Creates a list of clusters, as in dict from observation, and dict mentions with a list of clusters to which they belong. They are necessary for inference mode and marking a new conll documents without last column. Args: mention_starts: tf.float64, [Amount of mentions] mention_ends: tf.float64, [Amount of mentions] predicted_antecedents: [len antecedent scores] Returns: predicted_clusters = [[(),(),()],[(),()]] list like, with mention id mention_to_predicted = {mentions id: [(),(),()], ...} """ mention_to_predicted = {} predicted_clusters = [] for i, predicted_index in enumerate(predicted_antecedents): if predicted_index < 0: continue assert i > predicted_index predicted_antecedent = (int(mention_starts[predicted_index]), int(mention_ends[predicted_index])) if predicted_antecedent in mention_to_predicted: predicted_cluster = mention_to_predicted[predicted_antecedent] else: predicted_cluster = len(predicted_clusters) predicted_clusters.append([predicted_antecedent]) mention_to_predicted[predicted_antecedent] = predicted_cluster mention = (int(mention_starts[i]), int(mention_ends[i])) predicted_clusters[predicted_cluster].append(mention) mention_to_predicted[mention] = predicted_cluster predicted_clusters = [tuple(pc) for pc in predicted_clusters] mention_to_predicted = {m: predicted_clusters[i] for m, i in mention_to_predicted.items()} return predicted_clusters, mention_to_predicted def init_from_saved(self, saver): """ Load model from saved checkpoint.
<filename>src/ama/validator.py<gh_stars>1-10 # Copyright 2013-2014, <NAME>, <EMAIL> # pylint: disable=unused-argument """Provides access to a registry of validation functions. Functions are returned via the :func:`get_validator` function and can be refined by passing a specification which alters what passes the validation. All validators throw :class:`TypeError` if the value's type cannot be validated and :class:`ValueError` if the value fails validation. ======================== ====================================================== Validator Name Tests that the value... ======================== ====================================================== ``nonempty`` is not None or an empty string ``constant`` always returns the same value ``str`` can be converted to a string ``int`` can be converted to an integer value ``float`` can be converted to a floating point value ``bool`` can be converted to a boolean value ``yesno`` matches one of ``yes``, ``y``, ``no``, ``n`` with any case plus 1, 0, True and False ``re`` matches the regular expression. ``path`` is a valid path ``date`` is a valid date ``time`` is a valid time ``color`` is a valid RGB or RGB hex color ``email`` is a valid email address ======================== ====================================================== """ from __future__ import absolute_import, division, print_function, unicode_literals import os import sys import csv import gettext import glob import shutil import string import tempfile from io import StringIO from datetime import datetime, date, time from functools import partial try: from pkg_resources import load_entry_point except: load_entry_point = lambda x: None import re try: import pyisemail PYISEMAIL = True except ImportError: PYISEMAIL = False if sys.version_info < (3, 0): gettext.install('ama', unicode=True) #pylint: disable=unexpected-keyword-arg else: gettext.install('ama') DEFAULT_TIME_FORMAT = '%H:%M' DEFAULT_DATE_FORMAT = '%Y-%m-%d' if sys.version_info >= (3, 0): str_type = str csv.register_dialect('ama', delimiter='|') else: str_type = basestring csv.register_dialect('ama', delimiter=b'|') def str_to_elems(string): if sys.version_info < (3, 0) and isinstance(string, basestring): string = string.decode('UTF-8') ds = StringIO(string) reader = csv.reader(ds, dialect=csv.get_dialect('ama')) for row in reader: for elem in row: yield elem def str_to_kwargs(string, allowed=None): kwargs = {} for elem in str_to_elems(string): option, value = elem.split('=') if not allowed or option in allowed: kwargs[option] = value return kwargs def NonEmpty(*args, **kwargs): """Create a validator that checks that any value is provided""" msg = kwargs.get('message', _('Please enter anything.')) def validate(value): if not value: raise ValueError(msg) return value return validate def Constant(*args, **kwargs): """Create a validator that always return the same value.""" def validate(value): return args[0] return validate def OneOf(*args, **kwargs): """Create a validator that checks that the value is one of the those provided.""" def validate(value): msg = _('Value must be one of %s') if value in args: return value else: raise ValueError(msg % ', '.join(args)) return validate def Str(*args, **kwargs): """Create a validator that checks that the value is a valid string according to the `spec` :param spec: The specification to check the string against. Can be either None Anything that can be converted to a string passes The string ``nonempty`` a string of length greater than 1 passes A string of `argument=value` pairs separated by commas. Checks the string matches based on the arguments specified The following arguments can be specified. | ``min`` - The minimum number of characters | ``max`` - The maximum number of characters e.g. "min=3,max=6" means the string must be between 3 and 6 characters long. :type spec: str """ def validate(value, **kwargs): if value is None or value=='': return '' try: value = str(value) except: raise ValueError(_('Unable to convert value to string')) length = len(value) if 'min' in kwargs: min_ = int(kwargs['min']) if length < min_: raise ValueError(_('String must be at least %d characters') % min_) if 'max' in kwargs: max_ = int(kwargs['max']) if length > max_: raise ValueError(_('String must be a maximum of %d characters') % max_) return value if args and args[0] == 'nonempty': return NonEmpty() else: return partial(validate, **kwargs) def Int(*args, **kwargs): """Create a validator that checks that the value is a valid integer according to the `spec` :param spec: The specification to check the integer against. Can be either None Anything that is an integer passes. e.g. 1 and "1" are valid integers but 1.2, "1.2" or "chas" are not. A string of `argument=value` pairs separated by commas. Alters how the integer is validated. The following arguments can be specified. | ``min`` - The minimum value | ``max`` - The maximum value e.g. "min=3,max=6" means the value must be between 3 and 6. :type spec: str """ def validate(value, **kwargs): msg = _('Invalid integer value') if isinstance(value, float): raise TypeError(msg) if isinstance(value, str_type): decimal = kwargs.get('decimal', '.') if decimal in value: raise ValueError(msg) try: value = int(value) except: raise ValueError(msg) if 'min' in kwargs: min_ = int(kwargs['min']) if value < min_: raise ValueError('Integer value less than minimum %d' % min_) if 'max' in kwargs: max_ = int(kwargs['max']) if value > max_: raise ValueError('Integer value greater than maximum %d' % max_) return value return partial(validate, **kwargs) def Float(*args, **kwargs): """Create a validator that checks that the value is a valid float according to the `spec` :param spec: The specification to check the float against. Can be either None Anything that is a float passes. e.g. 1.2 and "1.2" are valid floats but 1, "1" or "dave" are not. A string of `argument=value` pairs separated by commas. Alters how the float is validated. The following arguments can be specified. | ``min`` - The minimum value | ``max`` - The maximum value | ``decimal`` - The character to consider as the decimal separator | ``nocoerce`` - Disable coercing int to float e.g. "min=3.1,max=6.0" means the value must be between 3.1 and 6.0; "decimal=\\\\," means that "33,234" is a valid float. :type spec: str """ def validate(value, **kwargs): msg = _('Invalid floating point value') if 'nocoerce' in kwargs and isinstance(value, int): raise TypeError(msg) if isinstance(value, str_type): decimal = kwargs.get('decimal', '.') if 'nocoerce' in kwargs and decimal not in value: raise ValueError(msg) elif decimal != '.': value = value.replace(decimal, '.') try: value = float(value) except: raise ValueError(msg) if 'min' in kwargs: min_ = float(kwargs['min']) if value < min_: raise ValueError('Float value less than minimum %f' % min_) if 'max' in kwargs: max_ = float(kwargs['max']) if value > max_: raise ValueError('Float value greater than maximum %f' % max_) return value return partial(validate, **kwargs) def Number(*args, **kwargs): """Create a validator that checks that the value is a valid number according to the `spec` :param spec: The specification to check the integer against. Can be either None Anything that is a number passes. A string of `argument=value` pairs separated by commas. Check s the integer matches based on the arguments specified The following arguments can be specified. | ``min`` - The minimum value | ``max`` - The maximum value | ``decimal`` - The character to consider as the decimal separator e.g. "min=3,max=6" means the value must be between 3 and 6. :type spec: str """ def validate(value, **kwargs): msg = _('Invalid number') if isinstance(value, str_type): decimal = kwargs.get('decimal', '.') if decimal != '.': value = value.replace(decimal, '.') try: value = float(value) except ValueError: raise ValueError(msg) if 'min' in kwargs: min_ = float(kwargs['min']) if value < min_: raise ValueError('Float value less than minimum %d' % min_) if 'max' in kwargs: max_ = float(kwargs['max']) if value > max_: raise ValueError('Float value greater than maximum %d' % max_) return value return partial(validate, **kwargs) def Bool(*args, **kwargs): """Create a validator that checks that the value is a valid bool.""" def validate(value): msg = _('Invalid boolean value') true_values = ['true', '1', 'yes', 'y'] false_values = ['false', '0', 'no', 'n'] if isinstance(value, bool): return value elif isinstance(value, int): return bool(value) elif str(value).lower() in true_values: return True elif str(value).lower() in false_values: return False else: raise ValueError(msg) return validate def Regex(*args, **kwargs): """Create a validator that checks that the value matches a regular expression. """ # if no regex provided just check that the value can be converted to a string if len(args) == 0: return lambda value: str(value) def validate(value, **kwargs): msg = _('Please enter a string which matches the regex') regex = kwargs.pop('regex', None) if regex: m = re.match(regex, value) if m is not None: return value else: raise ValueError('%s %s' % (msg, regex)) else: return value kwargs['regex'] = args[0] return partial(validate, **kwargs) def Path(*args, **kwargs): """Create a validator that checks that the value is a valid path. The meaning of valid is determined
# -*- coding: utf-8 -*- # Copyright (c) 2004-2015 Alterra, Wageningen-UR # <NAME> (<EMAIL>), Juli 2015 # from __future__ import print_function """Implementation of AgroManager and related classes for agromanagement actions in PCSE. Available classes: * CropCalendar: A class for handling cropping calendars * TimedEventDispatcher: A class for handling timed events (e.g. events connected to a date) * StateEventDispatcher: A class for handling state events (e.g. events that happen when a state variable reaches a certain values. * AgroManager: A class for handling all agromanagement events which encapsulates the CropCalendar and Timed/State events. """ from datetime import date, timedelta import logging from collections import Counter from .base_classes import DispatcherObject, VariableKiosk, SimulationObject, ParameterProvider, AncillaryObject from .traitlets import HasTraits, Float, Int, Instance, Enum, Bool, List, Dict, Unicode from . import exceptions as exc from .util import ConfigurationLoader from . import signals from . import exceptions as exc def cmp2(x, y): """ Compare two values and return sign Surrogate for cmp() function in Python2 """ return (x > y) - (x < y) def check_date_range(day, start, end): """returns True if start <= day < end Optionally, end may be None. in that case return True if start <= day :param day: the date that will be checked :param start: the start date of the range :param end: the end date of the range or None :return: True/False """ if end is None: return start <= day else: return start <= day < end class CropCalendar(HasTraits, DispatcherObject): """A crop calendar for managing the crop cycle. A `CropCalendar` object is responsible for storing, checking, starting and ending the crop cycle. The crop calendar is initialized by providing the parameters needed for defining the crop cycle. At each time step the instance of `CropCalendar` is called and at dates defined by its parameters it initiates the appropriate actions: - sowing/emergence: A `crop_start` signal is dispatched including the parameters needed to start the new crop simulation object - maturity/harvest: the crop cycle is ended by dispatching a `crop_finish` signal with the appropriate parameters. :param kiosk: The PCSE VariableKiosk instance :param crop_name: String identifying the crop :param variety_name: String identifying the variety :param crop_start_date: Start date of the crop simulation :param crop_start_type: Start type of the crop simulation ('sowing', 'emergence') :param crop_end_date: End date of the crop simulation :param crop_end_type: End type of the crop simulation ('harvest', 'maturity', 'earliest') :param max_duration: Integer describing the maximum duration of the crop cycle :return: A CropCalendar Instance """ # Characteristics of the crop cycle crop_name = Unicode() variety_name = Unicode() crop_start_date = Instance(date) crop_start_type = Enum(["sowing", "emergence"]) crop_end_date = Instance(date) crop_end_type = Enum(["maturity", "harvest", "earliest"]) max_duration = Int() # system parameters kiosk = Instance(VariableKiosk) parameterprovider = Instance(ParameterProvider) mconf = Instance(ConfigurationLoader) logger = Instance(logging.Logger) # Counter for duration of the crop cycle duration = Int(0) in_crop_cycle = Bool(False) def __init__(self, kiosk, crop_name=None, variety_name=None, crop_start_date=None, crop_start_type=None, crop_end_date=None, crop_end_type=None, max_duration=None): # set up logging loggername = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) self.logger = logging.getLogger(loggername) self.kiosk = kiosk self.crop_name = crop_name self.variety_name = variety_name self.crop_start_date = crop_start_date self.crop_start_type = crop_start_type self.crop_end_date = crop_end_date self.crop_end_type = crop_end_type self.max_duration = max_duration self._connect_signal(self._on_CROP_FINISH, signal=signals.crop_finish) def validate(self, campaign_start_date, next_campaign_start_date): """Validate the crop calendar internally and against the interval for the agricultural campaign. :param campaign_start_date: start date of this campaign :param next_campaign_start_date: start date of the next campaign """ # Check that crop_start_date is before crop_end_date crop_end_date = self.crop_end_date if self.crop_end_type == "maturity": crop_end_date = self.crop_start_date + timedelta(days=self.max_duration) if self.crop_start_date >= crop_end_date: msg = "crop_end_date before or equal to crop_start_date for crop '%s'!" raise exc.PCSEError(msg % (self.crop_start_date, self.crop_end_date)) # check that crop_start_date is within the campaign interval r = check_date_range(self.crop_start_date, campaign_start_date, next_campaign_start_date) if r is not True: msg = "Start date (%s) for crop '%s' vareity '%s' not within campaign window (%s - %s)." % \ (self.crop_start_date, self.crop_name, self.variety_name, campaign_start_date, next_campaign_start_date) raise exc.PCSEError(msg) def __call__(self, day): """Runs the crop calendar to determine if any actions are needed. :param day: a date object for the current simulation day :param drv: the driving variables at this day :return: None """ if self.in_crop_cycle: self.duration += 1 # Start of the crop cycle if day == self.crop_start_date: # Start a new crop self.duration = 0 self.in_crop_cycle = True msg = "Starting crop (%s) with variety (%s) on day %s" % (self.crop_name, self.variety_name, day) self.logger.info(msg) self._send_signal(signal=signals.crop_start, day=day, crop_name=self.crop_name, variety_name=self.variety_name, crop_start_type=self.crop_start_type, crop_end_type=self.crop_end_type) # end of the crop cycle finish_type = None if self.in_crop_cycle: # Check if crop_end_date is reached for CROP_END_TYPE harvest/earliest if self.crop_end_type in ["harvest", "earliest"]: if day == self.crop_end_date: finish_type = "harvest" # Check for forced stop because maximum duration is reached if self.in_crop_cycle and self.duration == self.max_duration: finish_type = "max_duration" # If finish condition is reached send a signal to finish the crop if finish_type is not None: self.in_crop_cycle = False self._send_signal(signal=signals.crop_finish, day=day, finish_type=finish_type, crop_delete=True) def _on_CROP_FINISH(self): """Register that crop has reached the end of its cycle. """ self.in_crop_cycle = False def get_end_date(self): """Return the end date of the crop cycle. This is either given as the harvest date or calculated as crop_start_date + max_duration :return: a date object """ if self.crop_end_type in ["harvest", 'earliest']: return self.crop_end_date else: return self.crop_start_date + timedelta(days=self.max_duration) def get_start_date(self): """Returns the start date of the cycle. This is always self.crop_start_date :return: the start date """ return self.crop_start_date class TimedEventsDispatcher(HasTraits, DispatcherObject): """Takes care handling events that are connected to a date. Events are handled by dispatching a signal (taken from the `signals` module) and providing the relevant parameters with the signal. TimedEvents can be most easily understood when looking at the definition in the agromanagement file. The following section (in YAML) provides the definition of two instances of TimedEventsDispatchers:: TimedEvents: - event_signal: irrigate name: Timed irrigation events comment: All irrigation amounts in mm events_table: - 2000-01-01: {irrigation_amount: 20} - 2000-01-21: {irrigation_amount: 50} - 2000-03-18: {irrigation_amount: 30} - 2000-03-19: {irrigation_amount: 25} - event_signal: apply_npk name: Timed N/P/K application table comment: All fertilizer amounts in kg/ha events_table: - 2000-01-10: {N_amount : 10, P_amount: 5, K_amount: 2} - 2000-01-31: {N_amount : 30, P_amount: 15, K_amount: 12} - 2000-03-25: {N_amount : 50, P_amount: 25, K_amount: 22} - 2000-04-05: {N_amount : 70, P_amount: 35, K_amount: 32} Each TimedEventDispatcher is defined by an `event_signal`, an optional name, an optional comment and the events_table. The events_table is list which provides for each date the parameters that should be dispatched with the given event_signal. """ event_signal = None events_table = List() days_with_events = Instance(Counter) kiosk = Instance(VariableKiosk) logger = Instance(logging.Logger) name = Unicode() comment = Unicode() def __init__(self, kiosk, event_signal, name, comment, events_table): """Initialising a TimedEventDispatcher :param kiosk: an instance of the VariableKiosk :param event_signal: the signal to be dispatched when the event occurs (from pcse.signals) :param name: the name of the event dispatcher :param comment: A comment that will be used in log message :param events_table: The events table, the structure here is a list of dicts, with each dict having only one key/value with the key being the date of the event and the value a dict of parameter values that should be dispatched with the signal. """ # set up logging loggername = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) self.logger = logging.getLogger(loggername) self.kiosk = kiosk self.events_table = events_table self.name = name self.comment = comment # get signal from signals module if not hasattr(signals, event_signal): msg = "Signal '%s' not defined in pcse.signals module." raise exc.PCSEError(msg % event_signal) # self.event_signal = getattr(signals, event_signal) self.event_signal = getattr(signals, event_signal) # Build a counter for the days with events. self.days_with_events = Counter() for ev in self.events_table: self.days_with_events.update(ev.keys()) # Check if there are days with two or more events under the # same signal which is not allowed. multi_days = [] for day, count in self.days_with_events.items(): if count > 1: multi_days.append(day) if multi_days: msg = "Found days with more than 1 event for events table '%s' on days: %s" raise exc.PCSEError(msg % (self.name, multi_days)) def validate(self, campaign_start_date, next_campaign_start_date): """Validates the timed events
is None: self.method = [] else: self.method = method self.baseMethod = baseMethod def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, PayWidget) if subclass is not None: return subclass(*args_, **kwargs_) if PayWidget.subclass: return PayWidget.subclass(*args_, **kwargs_) else: return PayWidget(*args_, **kwargs_) factory = staticmethod(factory) def get_method(self): return self.method def set_method(self, method): self.method = method def add_method(self, value): self.method.append(value) def insert_method_at(self, index, value): self.method.insert(index, value) def replace_method_at(self, index, value): self.method[index] = value def get_baseMethod(self): return self.baseMethod def set_baseMethod(self, baseMethod): self.baseMethod = baseMethod def get_memo(self): return self.memo def set_memo(self, memo): self.memo = memo def get_target(self): return self.target def set_target(self, target): self.target = target def get_autoSubmit(self): return self.autoSubmit def set_autoSubmit(self, autoSubmit): self.autoSubmit = autoSubmit def get_testMode(self): return self.testMode def set_testMode(self, testMode): self.testMode = testMode def get_embeddedAppId(self): return self.embeddedAppId def set_embeddedAppId(self, embeddedAppId): self.embeddedAppId = embeddedAppId def hasContent_(self): if ( self.method or self.baseMethod is not None or super(PayWidget, self).hasContent_() ): return True else: return False def export(self, outfile, level, namespaceprefix_='', name_='PayWidget', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('PayWidget') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PayWidget') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='PayWidget', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PayWidget'): super(PayWidget, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PayWidget') if self.memo is not None and 'memo' not in already_processed: already_processed.add('memo') outfile.write(' memo=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.memo), input_name='memo')), )) if self.target is not None and 'target' not in already_processed: already_processed.add('target') outfile.write(' target=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.target), input_name='target')), )) if not self.autoSubmit and 'autoSubmit' not in already_processed: already_processed.add('autoSubmit') outfile.write(' autoSubmit="%s"' % self.gds_format_boolean(self.autoSubmit, input_name='autoSubmit')) if self.testMode and 'testMode' not in already_processed: already_processed.add('testMode') outfile.write(' testMode="%s"' % self.gds_format_boolean(self.testMode, input_name='testMode')) if self.embeddedAppId is not None and 'embeddedAppId' not in already_processed: already_processed.add('embeddedAppId') outfile.write(' embeddedAppId=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.embeddedAppId), input_name='embeddedAppId')), )) def exportChildren(self, outfile, level, namespaceprefix_='', name_='PayWidget', fromsubclass_=False, pretty_print=True): super(PayWidget, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print) if pretty_print: eol_ = '\n' else: eol_ = '' for method_ in self.method: method_.export(outfile, level, namespaceprefix_, name_='method', pretty_print=pretty_print) if self.baseMethod is not None: self.baseMethod.export(outfile, level, namespaceprefix_, name_='baseMethod', pretty_print=pretty_print) def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('memo', node) if value is not None and 'memo' not in already_processed: already_processed.add('memo') self.memo = value value = find_attr_value_('target', node) if value is not None and 'target' not in already_processed: already_processed.add('target') self.target = value value = find_attr_value_('autoSubmit', node) if value is not None and 'autoSubmit' not in already_processed: already_processed.add('autoSubmit') if value in ('true', '1'): self.autoSubmit = True elif value in ('false', '0'): self.autoSubmit = False else: raise_parse_error(node, 'Bad boolean attribute') value = find_attr_value_('testMode', node) if value is not None and 'testMode' not in already_processed: already_processed.add('testMode') if value in ('true', '1'): self.testMode = True elif value in ('false', '0'): self.testMode = False else: raise_parse_error(node, 'Bad boolean attribute') value = find_attr_value_('embeddedAppId', node) if value is not None and 'embeddedAppId' not in already_processed: already_processed.add('embeddedAppId') self.embeddedAppId = value super(PayWidget, self).buildAttributes(node, attrs, already_processed) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'method': obj_ = PaymentMethod.factory() obj_.build(child_) self.method.append(obj_) obj_.original_tagname_ = 'method' elif nodeName_ == 'baseMethod': class_obj_ = self.get_class_obj_(child_, BasePaymentMethod) obj_ = class_obj_.factory() obj_.build(child_) self.baseMethod = obj_ obj_.original_tagname_ = 'baseMethod' super(PayWidget, self).buildChildren(child_, node, nodeName_, True) # end class PayWidget class Form(GeneratedsSuper): subclass = None superclass = None def __init__(self, positiveButtonCaption=None, positiveButtonConfirmation=None, negativeButtonCaption=None, negativeButtonConfirmation=None, widget=None, javascriptValidation=None): self.original_tagname_ = None self.positiveButtonCaption = _cast(None, positiveButtonCaption) self.positiveButtonConfirmation = _cast(None, positiveButtonConfirmation) self.negativeButtonCaption = _cast(None, negativeButtonCaption) self.negativeButtonConfirmation = _cast(None, negativeButtonConfirmation) self.widget = widget self.javascriptValidation = javascriptValidation def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, Form) if subclass is not None: return subclass(*args_, **kwargs_) if Form.subclass: return Form.subclass(*args_, **kwargs_) else: return Form(*args_, **kwargs_) factory = staticmethod(factory) def get_widget(self): return self.widget def set_widget(self, widget): self.widget = widget def get_javascriptValidation(self): return self.javascriptValidation def set_javascriptValidation(self, javascriptValidation): self.javascriptValidation = javascriptValidation def get_positiveButtonCaption(self): return self.positiveButtonCaption def set_positiveButtonCaption(self, positiveButtonCaption): self.positiveButtonCaption = positiveButtonCaption def get_positiveButtonConfirmation(self): return self.positiveButtonConfirmation def set_positiveButtonConfirmation(self, positiveButtonConfirmation): self.positiveButtonConfirmation = positiveButtonConfirmation def get_negativeButtonCaption(self): return self.negativeButtonCaption def set_negativeButtonCaption(self, negativeButtonCaption): self.negativeButtonCaption = negativeButtonCaption def get_negativeButtonConfirmation(self): return self.negativeButtonConfirmation def set_negativeButtonConfirmation(self, negativeButtonConfirmation): self.negativeButtonConfirmation = negativeButtonConfirmation def hasContent_(self): if ( self.widget is not None or self.javascriptValidation is not None ): return True else: return False def export(self, outfile, level, namespaceprefix_='', name_='Form', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('Form') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Form') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Form', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Form'): if self.positiveButtonCaption is not None and 'positiveButtonCaption' not in already_processed: already_processed.add('positiveButtonCaption') outfile.write(' positiveButtonCaption=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.positiveButtonCaption), input_name='positiveButtonCaption')), )) if self.positiveButtonConfirmation is not None and 'positiveButtonConfirmation' not in already_processed: already_processed.add('positiveButtonConfirmation') outfile.write(' positiveButtonConfirmation=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.positiveButtonConfirmation), input_name='positiveButtonConfirmation')), )) if self.negativeButtonCaption is not None and 'negativeButtonCaption' not in already_processed: already_processed.add('negativeButtonCaption') outfile.write(' negativeButtonCaption=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.negativeButtonCaption), input_name='negativeButtonCaption')), )) if self.negativeButtonConfirmation is not None and 'negativeButtonConfirmation' not in already_processed: already_processed.add('negativeButtonConfirmation') outfile.write(' negativeButtonConfirmation=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.negativeButtonConfirmation), input_name='negativeButtonConfirmation')), )) def exportChildren(self, outfile, level, namespaceprefix_='', name_='Form', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.widget is not None: self.widget.export(outfile, level, namespaceprefix_, name_='widget', pretty_print=pretty_print) if self.javascriptValidation is not None: showIndent(outfile, level, pretty_print) outfile.write('<javascriptValidation>%s</javascriptValidation>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.javascriptValidation), input_name='javascriptValidation')), eol_)) def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('positiveButtonCaption', node) if value is not None and 'positiveButtonCaption' not in already_processed: already_processed.add('positiveButtonCaption') self.positiveButtonCaption = value value = find_attr_value_('positiveButtonConfirmation', node) if value is not None and 'positiveButtonConfirmation' not in already_processed: already_processed.add('positiveButtonConfirmation') self.positiveButtonConfirmation = value value = find_attr_value_('negativeButtonCaption', node) if value is not None and 'negativeButtonCaption' not in already_processed: already_processed.add('negativeButtonCaption') self.negativeButtonCaption = value value = find_attr_value_('negativeButtonConfirmation', node) if value is not None and 'negativeButtonConfirmation' not in already_processed: already_processed.add('negativeButtonConfirmation') self.negativeButtonConfirmation = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'widget': class_obj_ = self.get_class_obj_(child_, Widget) obj_ = class_obj_.factory() obj_.build(child_) self.widget = obj_ obj_.original_tagname_ = 'widget' elif nodeName_ == 'javascriptValidation': javascriptValidation_ = child_.text javascriptValidation_ = self.gds_validate_string(javascriptValidation_, node, 'javascriptValidation') self.javascriptValidation = javascriptValidation_ # end class Form class FormMessage(FlowElement): subclass = None superclass = FlowElement def __init__(self, id=None, member=None, brandingKey=None, autoLock=None, vibrate=None, alertType=None, alertIntervalType=None, positiveReference=None, negativeReference=None, content=None, form=None, attachment=None): self.original_tagname_ = None super(FormMessage, self).__init__(id, ) self.member = _cast(None, member) self.brandingKey = _cast(None, brandingKey) self.autoLock = _cast(bool, autoLock) self.vibrate = _cast(bool, vibrate) self.alertType = _cast(None, alertType) self.alertIntervalType = _cast(None, alertIntervalType) self.positiveReference = _cast(None, positiveReference) self.negativeReference = _cast(None, negativeReference) self.content = content self.form = form if attachment is None: self.attachment = [] else: self.attachment = attachment def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, FormMessage) if subclass is not None: return subclass(*args_, **kwargs_) if FormMessage.subclass: return FormMessage.subclass(*args_, **kwargs_) else: return FormMessage(*args_, **kwargs_) factory = staticmethod(factory) def get_content(self): return self.content def set_content(self, content): self.content = content def get_form(self): return self.form def set_form(self, form): self.form = form def get_attachment(self): return self.attachment def set_attachment(self, attachment): self.attachment = attachment def add_attachment(self, value): self.attachment.append(value) def insert_attachment_at(self, index, value): self.attachment.insert(index, value) def replace_attachment_at(self, index, value): self.attachment[index] = value def get_member(self): return self.member def set_member(self, member): self.member = member def get_brandingKey(self): return self.brandingKey def set_brandingKey(self, brandingKey): self.brandingKey = brandingKey def get_autoLock(self): return self.autoLock def set_autoLock(self, autoLock): self.autoLock = autoLock def get_vibrate(self): return self.vibrate def set_vibrate(self, vibrate): self.vibrate = vibrate def get_alertType(self): return self.alertType def set_alertType(self, alertType): self.alertType = alertType def get_alertIntervalType(self): return self.alertIntervalType def set_alertIntervalType(self, alertIntervalType): self.alertIntervalType = alertIntervalType
<gh_stars>1-10 # -*- coding: utf-8-*- # Encoding cookie added by Mu Editor """Board -- an n-dimensional board with support for iteration, containership and slicing Boards can have any number of dimensions, any of which can be infinite. Boards can be sliced [:1, :2], returning a linked-copy, or copied (.copy), returning a snapshot copy. Boards can be iterated over for coordinates or data (.iterdata). There are also convenience functions to determine neighbours across all dimensions (.neighbours), the bounding box of occupied data (.occupied), all the coordinates in a space in n-dimensions (.itercoords) and others. """ # testing # # The semantics of 3.x range are broadly equivalent # to xrange in 2.7 # try: range = xrange except NameError: pass try: long except NameError: long = int import os, sys import functools import itertools import io try: from PIL import Image, ImageDraw, ImageFont except ImportError: Image = None class _Infinity(int): def __new__(meta): return sys.maxsize def __str__(self): return "Infinity" def __repr__(self): return "<Infinity>" def __eq__(self, other): return other == self.size def __lt__(self, other): return False def __gt__(self, other): return True Infinity = _Infinity() class _Empty(object): def __repr__(self): return "<Empty>" def __bool__(self): return False __nonzero__ = __bool__ Empty = _Empty() class BaseDimension(object): def __repr__(self): return "<{}>".format(self.__class__.__name__) class Dimension(BaseDimension): is_finite = True is_infinite = False def __init__(self, size): self._size = size self._range = range(size) def __iter__(self): return iter(self._range) def __eq__(self, other): return isinstance(self, type(other)) and self._size == other._size def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self._size) def __len__(self): return self._size def __contains__(self, item): return item in self._range def __getitem__(self, item): if isinstance(item, (int, long)): return self._range[item] elif isinstance(item, slice): return self._range[item.start, item.stop, item.step] else: raise TypeError("{} can only be indexed by int or slice".format(self.__class__.__name__)) class _InfiniteDimension(BaseDimension): chunk_size = 10 is_finite = False is_infinite = True def __iter__(self): return itertools.count() def __repr__(self): return "<Infinite Dimension>" def __eq__(self, other): # # Ensure that any infinite dimension is equal to any other # return isinstance(other, self.__class__) def __contains__(self, item): # # An infinite dimension includes any non-negative coordinate # if item < 0: return False return True def __len__(self): return Infinity def __getitem__(self, item): if isinstance(item, (int, long)): if item == 0: return 0 elif item == -1: return Infinity else: raise IndexError("Infinite dimensions can only return first & last items") elif isinstance(item, slice): # # If the request is for an open-ended slice, # just return the same infinite dimension. # if item.stop is None: return self else: return range(*item.indices(item.stop)) else: raise TypeError("{} can only be indexed by int or slice".format(self.__class__.__name__)) InfiniteDimension = _InfiniteDimension() def _centred_coord(outer_size, inner_size): """Given an outer and an inner size, calculate the top-left coordinates which the inner image should position at to be centred within the outer image """ outer_w, outer_h = outer_size inner_w, inner_h = inner_size return round((outer_w - inner_w) / 2), round((outer_h - inner_h) / 2) def text_sprite(font_name="arial", colour="#0000ff"): """Text sprite generator callback from Board.paint Convert the object to text of approximately the right size for the cell being painted. Typically this will be used for one or two letter objects, but it will work for any object which can meaningfully be converted to text """ def _text_sprite(obj, size): # # Very roughly, one point is three quarters of # a pixel. We pick a point size which will fill # the smaller edge of the cell (if it's not square) # point_size = round(min(size) * 0.75) # # Create a new transparent image to hold the # text. Draw the text into it in blue, centred, # using the font requested, and return the resulting image # image = Image.new("RGBA", size, (255, 255, 255, 0)) draw = ImageDraw.Draw(image) font = ImageFont.truetype("%s.ttf" % font_name, point_size) text = str(obj) draw.text(_centred_coord(size, font.getsize(text)), text, font=font, fill=colour) return image return _text_sprite def imagefile_sprite(directory=".", extension=".png"): """Image sprite generator callback for Board.paint Given the text form of an object, look for an image file in the stated directory [default: current] and return it, scaled to size. """ def _imagefile_sprite(obj, size): image = Image.open(os.path.join(directory, "%s%s" % (obj, extension))) image.thumbnail(size) return image return _imagefile_sprite class Board(object): """Board - represent a board of n dimensions, each possibly infinite. A location on the board is represented as an n-dimensional coordinate, matching the dimensionality originally specified. The board is addressed by index with a coordinate: b = Board((4, 4)) b[2, 2] = "*" b.draw() """ class BoardError(Exception): pass class InvalidDimensionsError(BoardError): pass class OutOfBoundsError(BoardError): pass def __init__(self, dimension_sizes, _global_board=None, _offset_from_global=None): """Set up a n-dimensional board """ if not dimension_sizes: raise self.InvalidDimensionsError("The board must have at least one dimension") try: iter(dimension_sizes) except TypeError: raise self.InvalidDimensionsError("Dimensions must be iterable (eg a tuple), not {}".format(type(dimension_sizes).__name__)) if any(d <= 0 for d in dimension_sizes): raise self.InvalidDimensionsError("Each dimension must be >= 1") self.dimensions = [InfiniteDimension if size == Infinity else Dimension(size) for size in dimension_sizes] # # This can be a sub-board of another board: a slice. # If that's the case, the boards share a common data structure # and this one is offset from the other. # NB this means that if a slice is taken of a slice, the offset must itself be offset! # self._data = {} if _global_board is None else _global_board self._offset_from_global = _offset_from_global or tuple(0 for _ in self.dimensions) self._sprite_cache = {} def __repr__(self): return "<{} ({})>".format( self.__class__.__name__, ", ".join(("Infinity" if d.is_infinite else str(len(d))) for d in self.dimensions) ) def __eq__(self, other): return \ self.dimensions == other.dimensions and \ dict(self.iterdata()) == dict(other.iterdata()) def __len__(self): # # Return the total number of positions on the board. If any of # the dimensions is infinite, the total will be Infinity # if any(d.is_infinite for d in self.dimensions): return Infinity else: return functools.reduce(lambda a, b: a * b, (len(d) for d in self.dimensions)) def __bool__(self): return any(coord for coord in self._data if self._is_in_bounds(coord)) __nonzero__ = __bool__ @property def is_offset(self): """Is this board offset from a different board?""" return any(o for o in self._offset_from_global) @property def has_finite_dimensions(self): """Does this board have at least one finite dimension?""" return any(d.is_finite for d in self.dimensions) @property def has_infinite_dimensions(self): """Does this board have at least one infinite dimension?""" return any(d.is_infinite for d in self.dimensions) def dumped(self): is_offset = any(o for o in self._offset_from_global) if is_offset: offset = " offset by {}".format(self._offset_from_global) else: offset = "" yield repr(self) + offset yield "{" for coord, value in sorted(self.iterdata()): if is_offset: global_coord = " => {}".format(self._to_global(coord)) else: global_coord = "" data = " [{}]".format(self[coord] if self[coord] is not None else "") yield " {}{}{}".format(coord, global_coord, data) yield "}" def dump(self, outf=sys.stdout): for line in self.dumped(): outf.write(line + "\n") def _is_in_bounds(self, coord): """Is a given coordinate within the space of this board? """ if len(coord) != len(self.dimensions): raise self.InvalidDimensionsError( "Coordinate {} has {} dimensions; the board has {}".format(coord, len(coord), len(self.dimensions))) return all(c in d for (c, d) in zip(coord, self.dimensions)) def _check_in_bounds(self, coord): """If a given coordinate is not within the space of this baord, raise an OutOfBoundsError """ if not self._is_in_bounds(coord): raise self.OutOfBoundsError("{} is out of bounds for {}".format(coord, self)) def __contains__(self, coord): """Implement <coord> in <board> """ return self._is_in_bounds(coord) def __iter__(self): """Implement for <coord> in <board> Iterate over all combinations of coordinates. If you need data, use iterdata(). """ # If all the dimensions are finite (the simplest and most common # situation) just use itertools.product. # If any dimension is infinite, we can't use itertools.product # directly because it consumes its arguments in order to make # up the axes for its Cartesian join. Instead, we chunk through # any infinite dimensions, while repeating the finite ones. if any(d.is_infinite for d in self.dimensions): start, chunk = 0, InfiniteDimension.chunk_size while True: iterators = [d[start:start+chunk] if d[-1] == Infinity else iter(d) for d in self.dimensions] for coord in itertools.product(*iterators): yield coord start += chunk else: for coord in itertools.product(*self.dimensions): yield coord def _to_global(self, coord): return tuple(c + o for (c, o) in zip(coord, self._offset_from_global)) def _from_global(self, coord): return tuple(c - o for (c, o) in zip(coord, self._offset_from_global)) def iterdata(self): """Implement: for (<coord>, <data>) in <board> Generate the list of data
<filename>source/py4dlib/objects.py # -*- coding: utf-8 -*- # # objects.py # py4dlib # # Created by <NAME> on 2012-09-26. # Copyright 2012 Berg Media. All rights reserved. # # <EMAIL> # # pylint: disable-msg=F0401 '''py4dlib.objects -- components for working with CINEMA 4D's objects.''' import os import re __version__ = (0, 5) __date__ = '2012-09-27' __updated__ = '2013-08-10' DEBUG = 0 or ('DebugLevel' in os.environ and os.environ['DebugLevel'] > 0) TESTRUN = 0 or ('TestRunLevel' in os.environ and os.environ['TestRunLevel'] > 0) import pprint pp = pprint.PrettyPrinter() PP = pp.pprint PF = pp.pformat try: import c4d #@UnresolvedImport from c4d import documents #@UnresolvedImport except ImportError: if TESTRUN == 1: pass from py4dlib.utils import UnescapeUnicode, EscapeUnicode, FuzzyCompareStrings, deprecated from py4dlib.maths import BBox from py4dlib.mesh import CalcGravityCenter class ObjectIterator(object): """ Iterator over specific objects in the object manager tree. Using a depth first traversal scheme, return a tuple in the form (op, lvl), where op is a c4d.BaseObject representing the current object and lvl is an integer indicating the current depth level. :param start_obj: the object whose hierarchy should be iterated over :param stop_obj: an object or a list of objects at which traversal should stop (optional) :param children_only: if True, iterate through the sub-hierarchy under start_obj and stop as soon as start_obj's parent or stop_obj (if given) is reached. This excludes start_obj from the iteration. :param startlvl: base indentation level """ def __init__(self, start_obj, stop_obj=None, children_only=True, startlvl=-1): super(ObjectIterator, self).__init__() self.curobj = start_obj # determine depth level within the hierarchy of start_obj op = start_obj while op: startlvl += 1 op = op.GetUp() self.curlvl = startlvl self.children_only = children_only if children_only: self.stop_objs = [start_obj] self.init = False else: self.stop_objs = [] self.init = True if stop_obj and isinstance(stop_obj, list): self.stop_objs.extend(stop_obj) elif stop_obj and isinstance(stop_obj, c4d.BaseObject): self.stop_objs.append(stop_obj) def __iter__(self): return self # next() becomes __next__() in later Pythons def next(self): op = self.curobj if self.init is True: self.init = False return (self.curobj, self.curlvl) if op == None: raise StopIteration if op.GetDown(): if op.GetNext() in self.stop_objs or \ op.GetDown() in self.stop_objs: raise StopIteration self.curlvl += 1 self.curobj = op.GetDown() return (self.curobj, self.curlvl) if op in self.stop_objs: raise StopIteration if self.stop_objs is None: while not op.GetNext() and op.GetUp(): self.curlvl -= 1 op = op.GetUp() else: while not op.GetNext() and op.GetUp(): if (op in self.stop_objs) or \ (op.GetUp() in self.stop_objs): raise StopIteration self.curlvl -= 1 op = op.GetUp() if op.GetNext(): if op.GetNext() in self.stop_objs: raise StopIteration self.curobj = op.GetNext() return (self.curobj, self.curlvl) else: raise StopIteration return (self.curobj, self.curlvl) class ObjectEntry(object): """ Wraps ``c4d.BaseObject``s and makes them hashable, so they can be used as keys in dictionaries. """ def __init__(self, op, lvl=-1, parents=None): """ :param op: the object to wrap. :type op: ``c4d.BaseObject`` :param lvl: the depth level within the hierarchy. :type lvl: ``int`` :param parents: a list of parent objects :type parents: ``list<c4d.BaseObject>`` """ super(ObjectEntry, self).__init__() self.op = op self.name = op.GetName() curlvl = lvl if curlvl < 0: while op: curlvl += 1 op = op.GetUp() self.curlvl = curlvl self.lvl = curlvl self.parents = parents def __str__(self): return ('%s%s' % (' ' * 4 * self.lvl, self.name)) def __repr__(self): return ("%s (%s)" % (self.name, self.op.GetTypeName())) def __hash__(self): return hash(self.name) ^ self.lvl def __cmp__(self, other): try: return cmp(self.op, other.op) except (AttributeError, TypeError): return NotImplemented class ObjectHierarchy(object): """ Represents a hierarchical group structure in the object manager. Can be used to create a Pythonic snapshot of the current scene so as to provide easy access to specifc sets of objects. Starting with root object stores a list of ``c4d.BaseObjects`` for each depth level in a dictionary. Each list is indexed by a concatenation of its parent names. The concat character is a forward slash, which forms a Unix like filepath as seen with the object manager's address bar widget. Additionally, a small subset of X-Path like functionality is provided with the ``get()`` function, namely the subset that coincides with syntax for wildcard and regular epxression expansion. This makes it easy to select a subset of objects, based on parent-name relationships. :param filter_type: only recognize objects of this c4d type :param bool children_only: see :py:class:`ObjectIterator` """ def __init__(self, root_obj=None, filter_type=None, children_only=False): super(ObjectHierarchy, self).__init__() if root_obj is None: doc = c4d.documents.GetActiveDocument() root_obj = doc.GetFirstObject() children_only = False self.children_only = children_only self.root = root_obj self.maxlvl = -1 sep = '/' hierarchy = {} for op, lvl in ObjectIterator(root_obj, children_only=children_only): if ((filter_type is None) or (filter_type and op.GetType() == filter_type)): plist = [] opp = op i = lvl # IGNORE:W0612 while opp: opp = opp.GetUp() if opp: oppname = opp.GetName() i -= 1 pentry = oppname plist.append(pentry) if len(plist) == 0: plist.append(op.GetName()) plist.reverse() parent_path = sep.join(plist) if parent_path not in hierarchy: hierarchy[parent_path] = [] hierarchy[parent_path].append(op) if lvl > self.maxlvl: self.maxlvl = lvl self.sep = sep self.entries = hierarchy def _strxform(self): result = "{" for k, v in self.entries.items(): result += "%r: [" % k for op in v: result += "%r, " % op.GetName() result = result[:-2] # chop off remainder "," result += "], " result = result[:-2] # chop off remainder "," result += "}" return result def __str__(self): return self._strxform() def __repr__(self): return repr(self.entries) def PPrint(self, stop_obj=None, filter_type=None, tabsize=4): """Print an indented, tree-like representation of an object manager hierarchy.""" lvl = 0 total = handled = 0 if stop_obj is None and self.children_only: stop_obj = self.root for op, lvl in ObjectIterator(self.root, stop_obj, children_only=self.children_only): total += 1 indent = lvl * tabsize * ' ' if not filter_type or (filter_type and op.GetType() == filter_type): handled += 1 print("%s%s" % (indent, op.GetName())) filtered = (total - handled) if total == 1: s = "" else: s = "s" if filtered > 0: fstr = " (%d filtered)" % filtered else: fstr = "" print("processed %d object%s%s" % (handled, s, fstr)) def Get(self, path, strict=True): """ Get a list of ``c4d.BaseObject``s for the key path given by 'path'. Key path can contain wildcards (``*`` or ``?``) or regular expression syntax. Prepend a '!' to 'path' if you want to forego wildcard expansion and thus ensure it is used as a verbatim regular expression pattern instead. Note that if 'strict' is True, 'path' must match the whole key it is tested against. Otherwise it is sufficient if the path is contained by the key. Returns a list of all objects for which 'path', expanded, matched a concatenated parent path. Returns an empty list if no objects could be located for 'path'. """ results = [] try: path = UnescapeUnicode(path.strip()) except UnicodeEncodeError: path = path.strip() if path[-1] == self.sep: path = path[:-1] if '..' in path: comps = path.split(self.sep) resolved_comps = [] skip = False for comp in reversed(comps): if comp == '..': skip = True continue if skip: skip = False continue resolved_comps.append(comp) resolved_comps.reverse() path = self.sep.join(resolved_comps) if path[0] == '!': # hint to take path as a verbatim re pattern pat = path[1:] else: # wildcard version pat = re.escape(path) # go back one escape level pat = path.replace(r'\\', '\\') pat = pat.replace('?', '.').replace('*', '.*?') if strict is True: pat = '^%s$' % pat func = re.match else: pat = '%s' % pat func = re.search keys = [key for key in list(self.entries.keys()) if func(pat, UnescapeUnicode(key), flags=re.UNICODE)] if DEBUG: print("path = %r" % (path)) print("pat = %r" % (pat)) print("keys = %r" % (keys)) try: for key in keys: result = self.entries.get(key) results.extend(result) except KeyError: pass return results def Select(obj): if not obj.GetBit(c4d.BIT_ACTIVE): obj.ToggleBit(c4d.BIT_ACTIVE) return obj.GetBit(c4d.BIT_ACTIVE) def SelectAdd(obj): """ Same as select(obj) but uses a slightly different mechanism. See also BaseDocument.SetSelection(sel, mode). """ doc = obj.GetDocument() doc.SetActiveObject(obj, c4d.SELECTION_ADD) def SelectGroupMembers(grp): doc = documents.GetActiveDocument() for obj in grp: # add each group member to the selection # so we can group them in the object
for i in ts] mm = 3 out = [] feature_list.extend(['SampleEn_'+str(i) for i in range(mm + 1)]) if len(ts) >= (mm+1)*2: res = sampen2(ts, mm=mm, normalize=True) for ii in res: if ii[1] is None: out.append(100) else: out.append(ii[1]) return out else: return [0] * (mm + 1) def CDF(ts): ''' analysis of cumulative distribution functions [17], ''' n_bins = 60 hist, _ = np.histogram(ts, range=(100, 400), bins=n_bins) cdf = np.cumsum(hist)/len(ts) cdf_density = np.sum(cdf) / n_bins feature_list.extend(['CDF_cdf_density']) return [cdf_density] def CoeffOfVariation(ts): ''' analysis of cumulative distribution functions [17], ''' if len(ts) >= 3: tmp_ts = ts[1:-1] if np.mean(tmp_ts) == 0: coeff_ts = 0.0 else: coeff_ts = np.std(tmp_ts) / np.mean(tmp_ts) else: coeff_ts = 0.0 if len(ts) >= 4: tmp_ts = ts[1:-1] tmp_ts = np.diff(tmp_ts) if np.mean(tmp_ts) == 0: coeff_dts = 0.0 else: coeff_dts = np.std(tmp_ts) / np.mean(tmp_ts) else: coeff_dts = 0.0 feature_list.extend(['CoeffOfVariation_coeff_ts', 'CoeffOfVariation_coeff_dts']) return [coeff_ts, coeff_dts] def MAD(ts): ''' thresholding on the median absolute deviation (MAD) of RR intervals [18] ''' ts_median = np.median(ts) mad = np.median([np.abs(ii - ts_median) for ii in ts]) feature_list.extend(['MAD_mad']) return [mad] def QRSBasicStat(ts): feature_list.extend(['QRSBasicStat_Mean', 'QRSBasicStat_HR', 'QRSBasicStat_Count', 'QRSBasicStat_Range', 'QRSBasicStat_Var', 'QRSBasicStat_Skew', 'QRSBasicStat_Kurtosis', 'QRSBasicStat_Median', 'QRSBasicStat_Min', 'QRSBasicStat_p_5', 'QRSBasicStat_p_25', 'QRSBasicStat_p_75', 'QRSBasicStat_p_95', 'QRSBasicStat_range_95_5', 'QRSBasicStat_range_75_25']) if len(ts) >= 3: ts = ts[1:-1] Mean = np.mean(ts) if Mean == 0: HR = 0 else: HR = 1 / Mean Count = len(ts) Range = max(ts) - min(ts) Var = np.var(ts) Skew = stats.skew(ts) Kurtosis = stats.kurtosis(ts) Median = np.median(ts) Min = min(ts) p_5 = np.percentile(ts, 5) p_25 = np.percentile(ts, 25) p_75 = np.percentile(ts, 75) p_95 = np.percentile(ts, 95) range_95_5 = p_95 - p_5 range_75_25 = p_75 - p_25 return [Mean, HR, Count, Range, Var, Skew, Kurtosis, Median, Min, p_5, p_25, p_75, p_95, range_95_5, range_75_25] else: return [0.0] * 15 def QRSBasicStatPointMedian(ts): feature_list.extend(['QRSBasicStatPointMedian_Mean', 'QRSBasicStatPointMedian_HR', 'QRSBasicStatPointMedian_Count', 'QRSBasicStatPointMedian_Range', 'QRSBasicStatPointMedian_Var', 'QRSBasicStatPointMedian_Skew', 'QRSBasicStatPointMedian_Kurtosis', 'QRSBasicStatPointMedian_Median', 'QRSBasicStatPointMedian_Min', 'QRSBasicStatPointMedian_p_25', 'QRSBasicStatPointMedian_p_75']) ts = ThreePointsMedianPreprocess(ts) Mean = np.mean(ts) if Mean == 0: HR = 0 else: HR = 1 / Mean Count = len(ts) if Count != 0: Range = max(ts) - min(ts) Var = np.var(ts) Skew = stats.skew(ts) Kurtosis = stats.kurtosis(ts) Median = np.median(ts) Min = min(ts) p_25 = np.percentile(ts, 25) p_75 = np.percentile(ts, 75) else: Range = 0.0 Var = 0.0 Skew = 0.0 Kurtosis = 0.0 Median = 0.0 Min = 0.0 p_25 = 0.0 p_75 = 0.0 return [Mean, HR, Count, Range, Var, Skew, Kurtosis, Median, Min, p_25, p_75] def QRSBasicStatDeltaRR(ts): feature_list.extend(['QRSBasicStatDeltaRR_Mean', 'QRSBasicStatDeltaRR_HR', 'QRSBasicStatDeltaRR_Count', 'QRSBasicStatDeltaRR_Range', 'QRSBasicStatDeltaRR_Var', 'QRSBasicStatDeltaRR_Skew', 'QRSBasicStatDeltaRR_Kurtosis', 'QRSBasicStatDeltaRR_Median', 'QRSBasicStatDeltaRR_Min', 'QRSBasicStatDeltaRR_p_25', 'QRSBasicStatDeltaRR_p_75']) if len(ts) >= 4: ts = ts[1:-1] ts = np.diff(ts) Mean = np.mean(ts) if Mean == 0: HR = 0 else: HR = 1 / Mean Count = len(ts) Range = max(ts) - min(ts) Var = np.var(ts) Skew = stats.skew(ts) Kurtosis = stats.kurtosis(ts) Median = np.median(ts) Min = min(ts) p_25 = np.percentile(ts, 25) p_75 = np.percentile(ts, 75) return [Mean, HR, Count, Range, Var, Skew, Kurtosis, Median, Min, p_25, p_75] else: return [0.0] * 11 def QRSYuxi(ts): ''' pars: tol = 0.05 define if two QRS intervals are matched ''' tol = 0.05 feature_list.extend(['QRSYuxi']) if len(ts) >= 3: ts = ts[1:-1] avg_RR = np.median(ts) matched = [False] * len(ts) for i in range(len(ts)): seg_1 = ts[i] if abs(seg_1 - avg_RR) / avg_RR <= tol: matched[i] = True elif abs(seg_1 - 2 * avg_RR) / (2 * avg_RR) <= tol: matched[i] = True for i in range(len(ts)): if matched[i] is False: if i == 0: seg_2_forward = ts[i] else: seg_2_forward = ts[i-1] + ts[i] if i == len(ts)-1: seg_2_backward = ts[i] else: seg_2_backward = ts[i] + ts[i+1] if abs(seg_2_forward - 2 * avg_RR) / (2 * avg_RR) <= tol: matched[i] = True elif abs(seg_2_forward - 3 * avg_RR) / (3 * avg_RR) <= tol: matched[i] = True elif abs(seg_2_backward - 2 * avg_RR) / (2 * avg_RR) <= tol: matched[i] = True elif abs(seg_2_backward - 3 * avg_RR) / (3 * avg_RR) <= tol: matched[i] = True return [sum(matched) / len(matched)] else: return [0.0] * 1 def Variability(ts): ''' Variability(Time Domain) & Poincare plot https://zh.wikipedia.org/wiki/%E5%BF%83%E7%8E%87%E8%AE%8A%E7%95%B0%E5%88%86%E6%9E%90 compute SDNN, NN50 count, pNN50 [14] Atrial fibrillation detection by heart rate variability in Poincare plot Stepping: the mean stepping increment of the inter-beat intervals Dispersion: how spread the points in Poincaré plot are distributed around the diagonal line ''' feature_list.extend(['Variability_SDNN', 'Variability_NN50', 'Variability_pNN50', 'Variability_Stepping', 'Variability_Dispersion']) if len(ts) >= 3: ts = ts[1:-1] SDNN = np.std(ts) freq = 300 timelen = freq * 0.05 if len(ts) < 3: NN50 = 0 pNN50 = 0 Stepping = 0 Dispersion = 0 else: NN = [abs(ts[x + 1] - ts[x]) for x in range(len(ts) - 1)] NN50 = sum([x > timelen for x in NN]) pNN50 = float(NN50) / len(ts) Stepping = (sum([(NN[x] ** 2 + NN[x + 1] ** 2) ** 0.5 for x in range(len(NN) - 1)]) / (len(NN) - 1)) / (sum(ts) / len(ts)) Dispersion = (sum([x ** 2 for x in NN]) / (2 * len(NN)) - sum(NN) ** 2 / (2 * (len(NN)) ** 2)) ** 0.5 / ((-ts[0] - 2 * ts[-1] + 2 * sum(ts)) / (2 * len(NN))) return [SDNN, NN50, pNN50, Stepping, Dispersion] else: return [0.0] * 5 ################################################## ### get features NO!!!!! ################################################## def GetShortFeature(table): ''' rows of table is 330000+ no use now ''' features = [] step = 0 for ts in table: row = [] row.extend(ShortBasicStat(ts)) # row.extend(ShortZeroCrossing(ts)) features.append(row) step += 1 if step % 100000 == 0: print('extracting ...') print('extract DONE') return features ################################################## ### center waves ### get features YES!!!!! ### very slow, do not run everytime ################################################## def dist(ts1, ts2): dist_num = np.linalg.norm(np.array(ts1) - np.array(ts2)) return dist_num def resample_unequal(ts, length): resampled = [0.0] * length resampled_idx = list(np.linspace(0.0, len(ts)-1, length)) for i in range(length): idx_i = resampled_idx[i] low_idx = int(np.floor(idx_i)) low_weight = abs(idx_i - np.ceil(idx_i)) high_idx = int(np.ceil(idx_i)) high_weight = abs(idx_i - np.floor(idx_i)) resampled[i] = low_weight * ts[low_idx] + high_weight * ts[high_idx] # print(idx_i, resampled[i], low_weight, high_weight) # break return resampled #if __name__ == '__main__': def GetShortCenterWave(table, pid_list, long_pid_list): # table = short_data # pid_list = short_pid # long_pid_list = QRS_pid ''' find majority mean short wave, or center wave, resample to fixed length pars: resampled_length n_clusters radius ''' print('extract GetShortMajorityMeanWave begin') fout = open('../data/center_wave_euclid_direct.csv', 'w') resampled_length = 1000 n_clusters = 3 radius = 1 features = [] pid_short_dic = {} ### build pid_short_dic: (pid -> short_waves) for i in range(len(pid_list)): if pid_list[i] in pid_short_dic.keys(): pid_short_dic[pid_list[i]].append(table[i]) else: pid_short_dic[pid_list[i]] = [table[i]] step = 0 for pid in long_pid_list: ### select pid who has more than 2 short waves if pid in pid_short_dic.keys() and len(pid_short_dic[pid]) > 5: ### sub_table contains all short_waves of pid sub_table = pid_short_dic[pid] sub_table_resampled = [resample_unequal(i, resampled_length) for i in sub_table] ### construct distance matrix of short waves n_short = len(sub_table) dist_mat = np.zeros([n_short, n_short]) for i in range(n_short): dist_mat[i, i] = 0.0 for j in range(i+1, n_short): # tmp_dist = fastdtw(sub_table[i], sub_table[j], radius=radius)[0] tmp_dist = dist(sub_table_resampled[i], sub_table_resampled[j]) dist_mat[i, j] = tmp_dist dist_mat[j, i] = tmp_dist dist_mat_dist = np.sum(dist_mat, axis=1) resampled_center_wave = sub_table_resampled[np.argsort(dist_mat_dist)[0]] ### clustering vis distance matrix # sc = SpectralClustering(n_clusters=n_clusters, affinity='precomputed') # sc.fit(dist_mat) # ### find the most common labels # majority_labels = Counter(sc.labels_).most_common(1)[0][0] # ###### TODO: how to find the center of a dist matrix or a graph # ### selected_short_idx is int # selected_short_idx = np.array(list(range(n_short)))[np.array(sc.labels_) == majority_labels] # ### sub array of dists sum # majority_dist_mat_dist = np.sum(dist_mat, axis=1)[selected_short_idx] # ### min dists sum of sub array # majority_mean_idx = selected_short_idx[np.argsort(majority_dist_mat_dist)[0]] ## center_wave = sub_table[majority_mean_idx] # resampled_center_wave = sub_table_resampled[majority_mean_idx] # ###### TODO: resample # resampled_idx = [int(i) for i in list(np.linspace(0.0, len(center_wave)-1, resampled_length))] # resampled_center_wave = [center_wave[i] for i in
) : Ooooo00 = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False ) else : Ooooo00 = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False ) if ( Ooooo00 ) : Ooooo00 = Ooooo00 . lookup_source_cache ( map_request . target_eid , False ) if 91 - 91: OoOoOO00 + OoOoOO00 I11i11i1 = map_request . print_prefix ( ) if 73 - 73: i11iIiiIii . OoO0O00 + ooOoO0o if ( Ooooo00 == None ) : lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( I11i11i1 , False ) ) ) if 77 - 77: ooOoO0o . I11i + OoooooooOO return if 100 - 100: ooOoO0o . oO0o % I1ii11iIi11i . IiII * IiII - o0oOOo0O0Ooo if 49 - 49: iIii1I11I1II1 % Ii1I / OoooooooOO - II111iiii . Ii1I Oo00O0o = Ooooo00 . print_eid_tuple ( ) if 28 - 28: OoooooooOO / I1Ii111 / i1IIi lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( Oo00O0o , False ) , green ( I11i11i1 , False ) ) ) if 35 - 35: IiII / iIii1I11I1II1 - I1IiiI - OoO0O00 * O0 if 97 - 97: Oo0Ooo . i1IIi if 56 - 56: Ii1I if 2 - 2: i1IIi % oO0o + O0 - OoO0O00 if 34 - 34: ooOoO0o + oO0o - Oo0Ooo oO00o0o0O = map_request . itr_rlocs [ 0 ] if ( oO00o0o0O . is_private_address ( ) and lisp_nat_traversal ) : oO00o0o0O = source if 50 - 50: o0oOOo0O0Ooo + Oo0Ooo + i1IIi if 79 - 79: Ii1I / II111iiii . I1ii11iIi11i oOO000 = map_request . nonce oO0O0o00oOo = lisp_nonce_echoing oOoo0oO = map_request . keys if 82 - 82: IiII . O0 . iIii1I11I1II1 / ooOoO0o / OoooooooOO / OoooooooOO Ooooo00 . map_replies_sent += 1 if 98 - 98: O0 . oO0o * O0 IIii1i = lisp_build_map_reply ( Ooooo00 . eid , Ooooo00 . group , Ooooo00 . rloc_set , oOO000 , LISP_NO_ACTION , 1440 , map_request . rloc_probe , oOoo0oO , oO0O0o00oOo , True , ttl ) if 87 - 87: iII111i + iII111i + iII111i % I11i if 2 - 2: OOooOOo * O0 - OoOoOO00 * I1Ii111 - oO0o + I1ii11iIi11i if 47 - 47: ooOoO0o + I1ii11iIi11i if 40 - 40: OoooooooOO if 20 - 20: OOooOOo / O0 if 51 - 51: ooOoO0o - I1Ii111 * oO0o if 47 - 47: Oo0Ooo % OoO0O00 * Ii1I / OoOoOO00 if 1 - 1: I1IiiI if 68 - 68: ooOoO0o if 68 - 68: I11i % IiII if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo if 48 - 48: ooOoO0o - iII111i + I1ii11iIi11i * I1Ii111 % ooOoO0o * OoO0O00 if 28 - 28: i1IIi / iII111i + OOooOOo if 89 - 89: Oo0Ooo + II111iiii * OoO0O00 + Oo0Ooo % II111iiii if 59 - 59: O0 + Oo0Ooo if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) : ii1i11iiII = ( oO00o0o0O . is_private_address ( ) == False ) ooOoOo0O = oO00o0o0O . print_address_no_iid ( ) if ( ( ii1i11iiII and lisp_rtr_list . has_key ( ooOoOo0O ) ) or sport == 0 ) : lisp_encapsulate_rloc_probe ( lisp_sockets , oO00o0o0O , None , IIii1i ) return if 50 - 50: I11i . I11i % I1IiiI - i1IIi if 63 - 63: OoO0O00 . iII111i if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o if 25 - 25: I11i / I1Ii111 . i11iIiiIii % i1IIi if 21 - 21: O0 * IiII . iII111i / iII111i % i11iIiiIii / I11i if 15 - 15: o0oOOo0O0Ooo / OoO0O00 - i1IIi lisp_send_map_reply ( lisp_sockets , IIii1i , oO00o0o0O , sport ) return if 30 - 30: OoO0O00 / ooOoO0o % ooOoO0o if 40 - 40: i1IIi . iIii1I11I1II1 * OoOoOO00 if 83 - 83: iIii1I11I1II1 + Ii1I - Ii1I % II111iiii if 82 - 82: O0 if 18 - 18: iII111i . IiII . I1IiiI if 40 - 40: IiII / oO0o + OoooooooOO / iII111i / II111iiii + i1IIi if 33 - 33: I11i + I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport , ttl ) : if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo if 69 - 69: OoooooooOO if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii oO00o0o0O = map_request . itr_rlocs [ 0 ] if ( oO00o0o0O . is_private_address ( ) ) : oO00o0o0O = source oOO000 = map_request . nonce if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii OOo0O0O0o0 = map_request . target_eid O0o00oOOOO00 = map_request . target_group if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i ooo0oo = [ ] for OOOOOo0O0oOO in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] : if ( OOOOOo0O0oOO == None ) : continue oOo00O = lisp_rloc ( ) oOo00O . rloc . copy_address ( OOOOOo0O0oOO ) oOo00O . priority = 254 ooo0oo . append ( oOo00O ) if 99 - 99: OoO0O00 * I11i if 33 - 33: I1Ii111 % IiII * OOooOOo - I1Ii111 oO0O0o00oOo = lisp_nonce_echoing oOoo0oO = map_request . keys if 100 - 100: ooOoO0o . i11iIiiIii * Oo0Ooo - i11iIiiIii IIii1i = lisp_build_map_reply ( OOo0O0O0o0 , O0o00oOOOO00 , ooo0oo , oOO000 , LISP_NO_ACTION , 1440 , True , oOoo0oO , oO0O0o00oOo , True , ttl ) lisp_send_map_reply ( lisp_sockets , IIii1i , oO00o0o0O , sport ) return if 72 - 72: oO0o + I11i . OoooooooOO if 84 - 84: oO0o * oO0o - i1IIi + ooOoO0o if 83 - 83: i1IIi if 85 - 85: i11iIiiIii / OoO0O00 / oO0o if 12 - 12: iII111i % OOooOOo % i1IIi if 17 - 17: IiII if 63 - 63: ooOoO0o . i11iIiiIii / iIii1I11I1II1 if 8 - 8: i11iIiiIii . IiII * iIii1I11I1II1 * I1IiiI * Ii1I * i11iIiiIii if 24 - 24: I1IiiI * I11i - o0oOOo0O0Ooo / iII111i + IiII - I1ii11iIi11i if 53 - 53: I11i / I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo * OoOoOO00 def lisp_get_private_rloc_set ( target_site_eid , seid , group ) : ooo0oo = target_site_eid . registered_rlocs if 86 - 86: iIii1I11I1II1 - I1Ii111 OoO0OOOOO0OO = lisp_site_eid_lookup ( seid , group , False ) if ( OoO0OOOOO0OO == None ) : return ( ooo0oo ) if 5 - 5: o0oOOo0O0Ooo if 58 - 58: oO0o * II111iiii * Oo0Ooo - I1IiiI % iII111i if 77 - 77: I11i / iII111i * o0oOOo0O0Ooo % iIii1I11I1II1 if 26 - 26: i1IIi / OoO0O00 / IiII oO00OoO0O0O = None I1III = [ ] for iIII in ooo0oo : if ( iIII . is_rtr ( ) ) : continue if ( iIII . rloc . is_private_address ( ) ) : I1iiI1 = copy . deepcopy ( iIII ) I1III . append ( I1iiI1 ) continue if 74 - 74: I1ii11iIi11i / i11iIiiIii - II111iiii . Oo0Ooo / ooOoO0o oO00OoO0O0O = iIII break if 55 - 55: OoO0O00 % IiII if ( oO00OoO0O0O == None ) : return ( ooo0oo ) oO00OoO0O0O = oO00OoO0O0O . rloc . print_address_no_iid ( ) if 93 - 93: OoO0O00 . I1ii11iIi11i / OOooOOo % OoooooooOO + i1IIi + I1Ii111 if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00 if 68 - 68: O0 / Oo0Ooo / iIii1I11I1II1 if 63 - 63: I1Ii111 + iII111i iI1III = None for iIII in OoO0OOOOO0OO . registered_rlocs : if ( iIII . is_rtr ( ) ) :
<reponame>robertmacdavid/up4-abstract<gh_stars>1-10 # Copyright 2020-2021 Open Networking Foundation # Copyright 2021-present Princeton University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ # SPGWU TESTS # # To run all tests: # make check TEST=spgwu # # To run a specific test case: # make check TEST=spgwu.<TEST CLASS NAME> # # For example: # make check TEST=spgwu.GtpuEncapDownlinkTest # ------------------------------------------------------------------------------ from time import sleep from lib.base_test import pkt_route, pkt_decrement_ttl, P4RuntimeTest, \ autocleanup, print_inline from ptf.testutils import group from ptf import testutils as testutils from scapy.contrib import gtp from scapy.layers.inet import IP, TCP, UDP, ICMP, Ether from scapy.layers.inet6 import IPv6 #from scapy.all import IP, IPv6, TCP, UDP, ICMP, Ether from lib.convert import encode from spgwu_base import GtpuBaseTest, UDP_GTP_PORT, GTPU_EXT_PSC_TYPE_DL, \ GTPU_EXT_PSC_TYPE_UL from unittest import skip from lib.extra_headers import CpuHeader CPU_CLONE_SESSION_ID = 99 FSEID_BITWIDTH = 96 UE_IPV4 = "172.16.17.32" ENODEB_IPV4 = "172.16.58.3" S1U_IPV4 = "192.168.127.12" SGW_IPV4 = "192.168.127.12" PDN_IPV4 = "172.16.17.32" SWITCH_MAC = "AA:AA:AA:00:00:01" ENODEB_MAC = "BB:BB:BB:00:00:01" PDN_MAC = "CC:CC:CC:00:00:01" @group("gtpu") class GtpuDecapUplinkTest(GtpuBaseTest): """ Tests that a packet received from a UE gets decapsulated and routed. """ def runTest(self): # Test with different type of packets. for pkt_type in self.supported_l4: print_inline("%s ... " % pkt_type) pkt = getattr(testutils, "simple_%s_packet" % pkt_type)(eth_src=ENODEB_MAC, eth_dst=SWITCH_MAC, ip_src=UE_IPV4, ip_dst=PDN_IPV4) pkt = self.gtpu_encap(pkt, ip_src=ENODEB_IPV4, ip_dst=S1U_IPV4) self.testPacket(pkt) @autocleanup def testPacket(self, pkt): if gtp.GTP_U_Header not in pkt: raise AssertionError("Packet given to decap test is not encapsulated!") # build the expected decapsulated packet exp_pkt = self.gtpu_decap(pkt) dst_mac = PDN_MAC # Expected pkt should have routed MAC addresses and decremented hop # limit (TTL). pkt_route(exp_pkt, dst_mac) pkt_decrement_ttl(exp_pkt) # PDR counter ID ctr_id = self.new_counter_id() # program all the tables self.add_entries_for_uplink_pkt(pkt, exp_pkt, self.port1, self.port2, ctr_id, drop=False) # read pre and post-QoS packet and byte counters self.read_pdr_counters(ctr_id) # send packet and verify it is decapsulated and routed testutils.send_packet(self, self.port1, pkt) testutils.verify_packet(self, exp_pkt, self.port2) # Check if pre and post-QoS packet and byte counters incremented self.verify_counters_increased(ctr_id, 1, len(pkt), 1, len(pkt)) @group("gtpu") class GtpuEncapDownlinkTest(GtpuBaseTest): """ Tests that a packet received from the internet/core gets encapsulated and forwarded. """ def runTest(self): # Test with different type of packets. for pkt_type in self.supported_l4: print_inline("%s ... " % pkt_type) pkt = getattr(testutils, "simple_%s_packet" % pkt_type)(eth_src=PDN_MAC, eth_dst=SWITCH_MAC, ip_src=PDN_IPV4, ip_dst=UE_IPV4) self.testPacket(pkt) @autocleanup def testPacket(self, pkt): # build the expected encapsulated packet exp_pkt = pkt.copy() dst_mac = ENODEB_MAC # Should be encapped too obv. exp_pkt = self.gtpu_encap(exp_pkt, ip_src=S1U_IPV4, ip_dst=ENODEB_IPV4) # Expected pkt should have routed MAC addresses and decremented hop # limit (TTL). pkt_route(exp_pkt, dst_mac) pkt_decrement_ttl(exp_pkt) # PDR counter ID ctr_id = self.new_counter_id() # program all the tables self.add_entries_for_downlink_pkt(pkt, exp_pkt, self.port1, self.port2, ctr_id, drop=False) # read pre and post-QoS packet and byte counters self.read_pdr_counters(ctr_id) # send packet and verify it is decapsulated and routed testutils.send_packet(self, self.port1, pkt) testutils.verify_packet(self, exp_pkt, self.port2) # Check if pre and post-QoS packet and byte counters incremented self.verify_counters_increased(ctr_id, 1, len(pkt), 1, len(pkt)) @group("gtpu") class GtpuDropUplinkTest(GtpuBaseTest): """ Tests that a packet received from a UE gets decapsulated and dropped because of FAR rule. """ def runTest(self): # Test with different type of packets. for pkt_type in self.supported_l4: print_inline("%s ... " % pkt_type) pkt = getattr(testutils, "simple_%s_packet" % pkt_type)(eth_src=ENODEB_MAC, eth_dst=SWITCH_MAC, ip_src=UE_IPV4, ip_dst=PDN_IPV4) pkt = self.gtpu_encap(pkt, ip_src=ENODEB_IPV4, ip_dst=S1U_IPV4) self.testPacket(pkt) @autocleanup def testPacket(self, pkt): if gtp.GTP_U_Header not in pkt: raise AssertionError("Packet given to decap test is not encapsulated!") # build the expected decapsulated packet exp_pkt = self.gtpu_decap(pkt) dst_mac = self.random_mac_addr() # Expected pkt should have routed MAC addresses and decremented hop # limit (TTL). pkt_route(exp_pkt, dst_mac) pkt_decrement_ttl(exp_pkt) # PDR counter ID ctr_id = self.new_counter_id() # program all the tables self.add_entries_for_uplink_pkt(pkt, exp_pkt, self.port1, self.port2, ctr_id, drop=True) # read pre and post-QoS packet and byte counters self.read_pdr_counters(ctr_id) # send packet and verify it is dropped testutils.send_packet(self, self.port1, pkt) testutils.verify_no_other_packets(self) # Check if pre-QoS packet and byte counters incremented, # and verify the post-QoS counters did not increment self.verify_counters_increased(ctr_id, 1, len(pkt), 0, 0) @group("gtpu") class GtpuDropDownlinkTest(GtpuBaseTest): """ Tests that a packet received from the internet/core gets dropped because of FAR rule. """ def runTest(self): # Test with different type of packets. for pkt_type in self.supported_l4: print_inline("%s ... " % pkt_type) pkt = getattr(testutils, "simple_%s_packet" % pkt_type)(eth_src=PDN_MAC, eth_dst=SWITCH_MAC, ip_src=PDN_IPV4, ip_dst=UE_IPV4) self.testPacket(pkt) @autocleanup def testPacket(self, pkt): # build the expected encapsulated packet exp_pkt = pkt.copy() dst_mac = ENODEB_MAC # force recomputation of checksum after routing/ttl decrement del pkt[IP].chksum # Should be encapped too obv. exp_pkt = self.gtpu_encap(exp_pkt) # Expected pkt should have routed MAC addresses and decremented hop # limit (TTL). pkt_route(exp_pkt, dst_mac) pkt_decrement_ttl(exp_pkt) # PDR counter ID ctr_id = self.new_counter_id() # program all the tables self.add_entries_for_downlink_pkt(pkt, exp_pkt, self.port1, self.port2, ctr_id, drop=True) # read pre and post-QoS packet and byte counters self.read_pdr_counters(ctr_id) # send packet and verify it is dropped testutils.send_packet(self, self.port1, pkt) testutils.verify_no_other_packets(self) # Check if pre-QoS packet and byte counters incremented, # and verify the post-QoS counters did not increment self.verify_counters_increased(ctr_id, 1, len(pkt), 0, 0) class GtpuDdnDigestTest(GtpuBaseTest): """ Tests that the switch sends digests for buffering FARs. """ def runTest(self): # Test with different type of packets. for pkt_type in self.supported_l4: print_inline("%s ... " % pkt_type) pkt = getattr(testutils, "simple_%s_packet" % pkt_type)(eth_src=PDN_MAC, eth_dst=SWITCH_MAC, ip_src=PDN_IPV4, ip_dst=UE_IPV4) self.testPacket(pkt) @autocleanup def testPacket(self, pkt): # Wait up to 1 seconds before sending duplicate digests for the same FSEID. self.set_up_ddn_digest(ack_timeout_ns=1 * 10**9) # Build the expected encapsulated pkt that we would receive as output without buffering. # The actual pkt will be dropped, but we still need it to populate FAR with tunneling info. exp_pkt = pkt.copy() exp_pkt = self.gtpu_encap(exp_pkt, ip_src=S1U_IPV4, ip_dst=ENODEB_IPV4) pkt_route(exp_pkt, ENODEB_MAC) pkt_decrement_ttl(exp_pkt) # PDR counter ID. ctr_id = self.new_counter_id() # Program all the tables. fseid = 0xBEEF self.add_entries_for_downlink_pkt(pkt, exp_pkt, self.port1, self.port2, ctr_id, buffer=True, session_id=fseid) # Read pre and post-QoS packet and byte counters. self.read_pdr_counters(ctr_id) # Send 1st packet. testutils.send_packet(self, self.port1, pkt) # Only pre-QoS counters should increase self.verify_counters_increased(ctr_id, 1, len(pkt), 0, 0) # Verify that we have received the DDN digest exp_digest_data = self.helper.build_p4data_struct( [self.helper.build_p4data_bitstring(encode(fseid, FSEID_BITWIDTH))]) self.verify_digest_list("ddn_digest_t", exp_digest_data) # Send 2nd packet immediately, verify counter increase but NO digest should be generated. self.read_pdr_counters(ctr_id) testutils.send_packet(self, self.port1, pkt) self.verify_counters_increased(ctr_id, 1, len(pkt), 0, 0) self.verify_no_other_digest_list(timeout=1) # Send third packet after waiting at least ack_timeout_ns. # We should receive a new digest. sleep(1.1) self.read_pdr_counters(ctr_id) testutils.send_packet(self, self.port1, pkt) self.verify_counters_increased(ctr_id, 1, len(pkt), 0, 0) self.verify_digest_list("ddn_digest_t", exp_digest_data) # All packets should have been buffered, not forwarded. testutils.verify_no_other_packets(self) class GtpEndMarkerPacketOutTest(GtpuBaseTest): """ Tests that the switch can route end-marker packet-outs like regular packets, i.e., by rewriting MAC addresses and forwarding to an egress port. """ @autocleanup def runTest(self): # gtp_type=254 -> end marker pkt = Ether(src=0x0, dst=0x0) / \ IP(src=S1U_IPV4, dst=ENODEB_IPV4) / \ UDP(sport=UDP_GTP_PORT, dport=UDP_GTP_PORT, chksum=0) / \ gtp.GTPHeader(gtp_type=254, teid=1) # Expect routed packet exp_pkt = pkt.copy() exp_pkt[Ether].src = SWITCH_MAC exp_pkt[Ether].dst = ENODEB_MAC pkt_decrement_ttl(exp_pkt) outport = self.port2 self.add_routing_entry(ip_prefix=exp_pkt[IP].dst + '/32', src_mac=exp_pkt[Ether].src, dst_mac=exp_pkt[Ether].dst, egress_port=outport) self.send_packet_out(self.helper.build_packet_out(pkt, {"reserved": 0})) testutils.verify_packet(self, exp_pkt, outport) @group("gtpu") class AclPuntTest(GtpuBaseTest): """ Test that the ACL table punts a packet to the CPU """ def runTest(self): # Test with different type of packets. for pkt_type in self.supported_l4[:1]: print_inline("%s ... " % pkt_type) pkt = getattr(testutils, "simple_%s_packet" % pkt_type)() self.testPacket(pkt) @autocleanup def testPacket(self, pkt): # exp_pkt = CpuHeader(port_num=self.port1) / pkt exp_pkt = pkt exp_pkt_in_msg = self.helper.build_packet_in( exp_pkt, metadata={ "ingress_port": self.port1, "_pad": 0 }) self.add_device_mac(pkt[Ether].dst) self.add_cpu_clone_session() self.add_acl_entry(clone_to_cpu=True, eth_type=pkt[Ether].type, ipv4_src=pkt[IP].src, ipv4_dst=pkt[IP].dst, ipv4_proto=pkt[IP].proto) testutils.send_packet(self, self.port1, pkt) self.verify_packet_in(exp_pkt_in_msg) @group("gtpu") class GtpuEncapPscDownlinkTest(GtpuBaseTest): """ Tests that a packet received from the internet/core gets encapsulated and forwarded with PDU Session Container extension header. """ def runTest(self): # Test with different type of packets. for pkt_type in self.supported_l4: print_inline("%s ... " % pkt_type) pkt = getattr(testutils, "simple_%s_packet" % pkt_type)(eth_src=PDN_MAC, eth_dst=SWITCH_MAC, ip_src=PDN_IPV4, ip_dst=UE_IPV4) self.testPacket(pkt) @autocleanup def testPacket(self, pkt): # build the expected encapsulated packet exp_pkt = pkt.copy() dst_mac = ENODEB_MAC # Encap with PSC ext header and given QFI exp_pkt = self.gtpu_encap(exp_pkt, ip_src=S1U_IPV4, ip_dst=ENODEB_IPV4, ext_psc_type=GTPU_EXT_PSC_TYPE_DL, ext_psc_qfi=1)
<gh_stars>1-10 """Rules for describing impingement. Impingement is when something obstructs a region or a clear line of sight between two points. For example, suppose we have two entities that we think are a label/value pair. We may ask that the space between them be unimpinged, meaning that there are no other entities occupying that space. If the space is impinged upon, then maybe it's unlikely that the entities we've chosen for the label/value pair are correct. """ from dataclasses import dataclass from itertools import chain from typing import Callable, Optional, Tuple from uuid import uuid4 from ..bp_logging import bp_logging from ..document import DocRegion, Document, build_words_ez_doc_region, get_pages from ..entity import Entity, Page from ..functional import pairs from ..geometry import BBox, Interval from ..impingement import Impingement from ..rule import AtomScore, DegreeError, Lenience, Predicate, RuleScore from ..rules.spatial import Orientation IMPINGEMENT_SMALL_INSET = 0.25 IMPINGEMENT_LARGE_INSET = 1.0 @dataclass(frozen=True) class BoxUnimpinged(Predicate): """Says that a particular document region is not impinged upon. If we say that a box is unimpinged in the horizontal direction, this means that if we are standing on the left side of the box, we can see clear through to the right side -- there is nothing in the way, impinging our view. Impingement in the vertical direction is defined analogously. For multipage documents, the document pages will be assumed to be left-aligned when measuring impingement across pages. Args: doc_region_getter: Defines the document region which should be unimpinged in terms of a document and some number of entities. direction: The direction in which we are measuring impingement. Should be one of the values of the Orientation enumeration. degree_: The number of entities that doc_region_getter takes as argument. This should be a positive integer. illegal_characters: Normally, all impinging text is counted. If this is set, only these characters will be considered "illegal". maximum_impingement: If the total impingement (a number between 0 and 1) is above this number, will return a score of 0. """ def __init__( self, direction: Orientation, degree_: int = 2, illegal_characters: Optional[str] = None, maximum_impingement: float = 1.0, name: str = 'box_unimpinged', uuid: Optional[str] = None, ): if degree_ < 1: raise DegreeError(f'box_unimpinged degree must be at least 1, not {degree_}') super().__init__( uuid = str(uuid4()) if uuid is None else uuid, name = name, ) object.__setattr__(self, 'direction', direction) object.__setattr__(self, 'degree_', degree_) object.__setattr__(self, 'illegal_characters', illegal_characters) object.__setattr__(self, 'maximum_impingement', maximum_impingement) direction: Orientation degree_: int illegal_characters: Optional[str] maximum_impingement: float def leniency(self) -> float: return float(Lenience.LOW) @property def degree(self) -> int: return self.degree_ def doc_region_getter(self, doc: Document, *Es: Entity) \ -> Optional[DocRegion]: raise NotImplementedError def get_opacity(self, text: Optional[str]) -> float: if text is None or len(text) == 0: return 0.0 if self.illegal_characters is None: return 1.0 return sum(1 for c in text if c in self.illegal_characters) / len(text) def score(self, entities: Tuple[Entity, ...], doc: Document) -> RuleScore: doc_region = self.doc_region_getter(doc, *entities) # An empty or invalid box is considered to be unimpinged. if doc_region is None or not doc_region.bbox.non_empty: return AtomScore(1) def projection(doc_region: DocRegion) -> Interval: if self.direction == Orientation.VERTICAL: return doc_region.bbox.ix else: assert self.direction == Orientation.HORIZONTAL return doc_region.bbox.iy impingement_interval = Impingement(projection(doc_region)) box_defining_words = frozenset( chain.from_iterable(entity.entity_words() for entity in entities)) words_ez_doc_region = build_words_ez_doc_region(doc) for E in words_ez_doc_region.ts_intersecting(doc_region): assert len(tuple(E.entity_words())) == 1 if tuple(E.entity_words())[0] not in box_defining_words: E_doc_region = DocRegion.build(doc, E.bbox) assert E_doc_region impingement_interval.incorporate_subdivision( projection(E_doc_region), self.get_opacity(E.entity_text)) if impingement_interval.total_impingement > self.maximum_impingement: return AtomScore(0) return AtomScore(1 - impingement_interval.total_impingement) def _erode_if_possible(interval: Interval, amount: float) -> Interval: eroded_interval = interval.eroded(amount) if eroded_interval is None: return interval return eroded_interval def _space_between_vertically(E1: Entity, E2: Entity, doc: Document, spanning: bool) -> Optional[DocRegion]: """The vertical space between two entities.""" if spanning: ix = Interval.spanning_intervals([E1.bbox.ix, E2.bbox.ix]) else: intersection = Interval.intersection([E1.bbox.ix, E2.bbox.ix]) if intersection is None: return None ix = intersection return DocRegion.build(doc, BBox.build( _erode_if_possible(ix, IMPINGEMENT_SMALL_INSET * doc.median_line_height()), _erode_if_possible( Interval(E1.bbox.iy.b, E2.bbox.iy.a), IMPINGEMENT_SMALL_INSET * doc.median_line_height()))) def _space_between_horizontally(E1: Entity, E2: Entity, doc: Document, spanning: bool) -> Optional[DocRegion]: """The horizontal space between two entities.""" if spanning: iy = Interval.spanning_intervals([E1.bbox.iy, E2.bbox.iy]) else: intersection = Interval.intersection([E1.bbox.iy, E2.bbox.iy]) if intersection is None: return None iy = intersection return DocRegion.build(doc, BBox.build( _erode_if_possible( Interval(E1.bbox.ix.b, E2.bbox.ix.a), IMPINGEMENT_SMALL_INSET * doc.median_line_height()), _erode_if_possible(iy, IMPINGEMENT_SMALL_INSET * doc.median_line_height()))) def get_page_for_edge(E: Entity, doc: Document) -> Page: entity_pages = get_pages(E, doc) assert len(entity_pages) > 0 if len(entity_pages) > 1: # FIXME: Check the impingement on both pages, not just the first page. bp_logging.warning(f'entity {E} spans multiple pages, using first page ' 'for page edge impingement rules') return entity_pages[0] # FIXME: When we support multiline clustering across pages, these edge rules # will need to be updated. def _space_between_top_edge(doc: Document, E: Entity) -> Optional[DocRegion]: """The vertical space between an entity and the top edge of the page.""" page = get_page_for_edge(E, doc) return DocRegion.build(doc, BBox.build(E.bbox.ix, Interval(page.bbox.iy.a, E.bbox.iy.a))) def _space_between_bottom_edge(doc: Document, E: Entity) -> Optional[DocRegion]: """The vertical space between an entity and the bottom edge of the page.""" page = get_page_for_edge(E, doc) return DocRegion.build(doc, BBox.build(E.bbox.ix, Interval(E.bbox.iy.b, page.bbox.iy.b))) def _space_between_left_edge(doc: Document, E: Entity) -> Optional[DocRegion]: """The horizontal space between an entity and the left edge of the page.""" page = get_page_for_edge(E, doc) return DocRegion.build(doc, BBox.build( Interval(page.bbox.ix.a, E.bbox.ix.a), _erode_if_possible( E.bbox.iy, IMPINGEMENT_SMALL_INSET * doc.median_line_height()))) def _space_between_right_edge(doc: Document, E: Entity) -> Optional[DocRegion]: """The horizontal space between an entity and the right edge of the page.""" page = get_page_for_edge(E, doc) return DocRegion.build(doc, BBox.build( Interval(E.bbox.ix.b, page.bbox.width), _erode_if_possible( E.bbox.iy, IMPINGEMENT_SMALL_INSET * doc.median_line_height()))) @dataclass(frozen=True) class NothingBetweenHorizontally(BoxUnimpinged): spanning: bool def __init__( self, name: str = 'nothing_between_horizontally', uuid: Optional[str] = None, direction: Orientation = Orientation.HORIZONTAL, degree_: int = 2, spanning: bool = False, illegal_characters: Optional[str] = None, maximum_impingement: float = 1.0, ): super().__init__( uuid = str(uuid4()) if uuid is None else uuid, name = name, direction = direction, degree_ = degree_, illegal_characters = illegal_characters, maximum_impingement = maximum_impingement, ) object.__setattr__(self, 'spanning', spanning) def doc_region_getter(self, doc: Document, *Es: Entity) \ -> Optional[DocRegion]: assert len(Es) == 2 E1, E2 = Es return _space_between_horizontally(E1, E2, doc, self.spanning) @dataclass(frozen=True) class NothingBetweenVertically(BoxUnimpinged): spanning: bool def __init__( self, name: str = 'nothing_between_vertically', uuid: Optional[str] = None, direction: Orientation = Orientation.VERTICAL, degree_: int = 2, spanning: bool = False, illegal_characters: Optional[str] = None, maximum_impingement: float = 1.0, ): super().__init__( uuid = str(uuid4()) if uuid is None else uuid, name = name, direction = direction, degree_ = degree_, illegal_characters = illegal_characters, maximum_impingement = maximum_impingement, ) object.__setattr__(self, 'spanning', spanning) def doc_region_getter(self, doc: Document, *Es: Entity) \ -> Optional[DocRegion]: assert len(Es) == 2 E1, E2 = Es return _space_between_vertically(E1, E2, doc, self.spanning) LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @dataclass(frozen=True) class NoWordsBetweenVertically(BoxUnimpinged): spanning: bool """Says that there are no words in the vertical space between two entities. Example: This can be useful in the context of a table, where a column has several numeric values under its heading. So something like this would be ok: Amounts <- label we are looking for $50.00 $25.00 $30.00 <- value we are looking for """ def __init__( self, name: str = 'no_words_between_vertically', uuid: Optional[str] = None, direction: Orientation = Orientation.VERTICAL, degree_: int = 2, spanning: bool = False, illegal_characters: str = LETTERS, maximum_impingement: float = 0.5, ): super().__init__( uuid = str(uuid4()) if uuid is None else uuid, name = name, direction = direction, degree_ = degree_, illegal_characters = illegal_characters, maximum_impingement = maximum_impingement, ) object.__setattr__(self, 'spanning', spanning) def doc_region_getter(self, doc: Document, *Es: Entity) \ -> Optional[DocRegion]: assert len(Es) == 2 E1, E2 = Es return _space_between_vertically(E1, E2, doc, self.spanning) @dataclass(frozen=True) class NoWordsBetweenHorizontally(BoxUnimpinged): spanning: bool """Says that there are no words in the horizontal space between two entities. Similar to `NoWordsBetweenVertically`. """ def __init__( self, name: str = 'no_words_between_horizontally', uuid: Optional[str] = None, direction: Orientation = Orientation.HORIZONTAL, degree_: int = 2, spanning: bool = False, illegal_characters: str = LETTERS, maximum_impingement: float = 0.5, ): super().__init__( uuid = str(uuid4()) if uuid is None else uuid, name = name, direction = direction, degree_ = degree_, illegal_characters = illegal_characters, maximum_impingement = maximum_impingement, ) object.__setattr__(self, 'spanning', spanning) def doc_region_getter(self, doc: Document, *Es: Entity) \ -> Optional[DocRegion]: assert len(Es) == 2 E1, E2 = Es return _space_between_horizontally(E1, E2, doc, self.spanning) def nothing_between_vertically_custom( spanning: bool = False, illegal_characters: Optional[str] = None, maximum_impingement: float = 1.0,) -> Predicate: return NothingBetweenVertically( 'nothing_between_vertically', None, Orientation.VERTICAL, 2, spanning, illegal_characters, maximum_impingement) def nothing_between_horizontally_custom( spanning: bool = False, illegal_characters: Optional[str] = None, maximum_impingement:
#!/usr/bin/python # postProcessVels.py # Author: <NAME> # All rights reserved def postProcessVels(east_vels_list_path, ice_gmt_path, rock_gmt_path, dem_grd_path): import datetime; import os; import re; import subprocess; import sys; sys.path.append("/data/akm/Python"); from season import *; assert os.path.exists(east_vels_list_path), "\n***** ERROR: \"" + east_vels_list_path + "\" not found, exiting...\n"; assert os.path.exists(ice_gmt_path), "\n***** ERROR: \"" + ice_gmt_path + "\" not found, exiting...\n"; assert os.path.exists(rock_gmt_path), "\n***** ERROR: \"" + rock_gmt_path + "\" not found, exiting...\n"; assert os.path.exists(dem_grd_path), "\n***** ERROR: \"" + dem_grd_path + "\" not found, exiting...\n"; M_SCRIPTS_DIR = "/data/akm/MATLAB/Adam_Cleaner"; REF_UTM_ZONE = "41"; UTM_LETTER = "X"; VEL_PATH_COL = 0; REGION_COL = 1; UTM_COL = 2; SCALE = "500000"; # VEL_MAX = "5"; # TOL = "0.2"; # NUMDIF = "2"; # For INO velocities VEL_MAX = "5"; TOL = "0.2"; NUMDIF = "3"; num_pairs = {}; infile = open(east_vels_list_path, "r"); for line in infile: if line[0] == "#": continue; elements = line.strip().split(); east_vel_path = elements[VEL_PATH_COL]; north_vel_path = east_vel_path.replace("east", "north"); utm_zone = elements[UTM_COL]; if not os.path.exists(east_vel_path): print("\n***** Warning: \"" + east_vel_path + "\" not found, skipping...\n"); continue; east_vel_name = east_vel_path[east_vel_path.rfind("/") + 1 : ]; pair_dates = east_vel_name[re.search("\d{14}_\d{14}", east_vel_name).start(0) : re.search("\d{14}_\d{14}", east_vel_name).end(0)]; east_pair_type = east_vel_name[re.search("\d{14}_\d{14}", east_vel_name).end(0) + 1 : east_vel_name.rfind(".")]; if pair_dates[0:2] != "19" and pair_dates[0:2] != "20": pair_dates = pair_dates[4:8] + pair_dates[0:2] + pair_dates[2:4] + pair_dates[8:14] + "_" + pair_dates[19:23] + pair_dates[15:17] + pair_dates[17:19] + pair_dates[23:29]; key = pair_dates + "_" + east_pair_type; if key not in num_pairs: num_pairs[key] = 1; else: east_pair_type = east_pair_type + "_" + str(num_pairs[key]); num_pairs[key] += 1; north_pair_type = east_pair_type.replace("east", "north"); mag_pair_type = east_pair_type.replace("eastxyz", "mag"); new_east_name = pair_dates + "_" + east_pair_type; new_north_name = pair_dates + "_" + north_pair_type; new_mag_name = pair_dates + "_" + mag_pair_type; new_east_vel_path = pair_dates + "_" + east_pair_type + ".grd"; new_north_vel_path = pair_dates + "_" + north_pair_type + ".grd"; if utm_zone == REF_UTM_ZONE: if not os.path.exists(new_east_vel_path): cmd = "\ngdalwarp -of GTiff -srcnodata \"0\" -dstnodata \"nan\" " + east_vel_path + " temp_east.tif\n"; cmd += "\ngdalwarp -of GTiff -srcnodata \"0\" -dstnodata \"nan\" " + north_vel_path + " temp_north.tif\n"; cmd += "\ngdal_translate -of GMT -a_nodata \"nan\" temp_east.tif " + new_east_vel_path + "\n"; cmd += "\ngdal_translate -of GMT -a_nodata \"nan\" temp_north.tif " + new_north_vel_path + "\n"; cmd += "\ngrdsample -T " + new_east_vel_path + " -G" + new_east_vel_path + "\n"; cmd += "\ngrdsample -T " + new_north_vel_path + " -G" + new_north_vel_path + "\n"; subprocess.call(cmd,shell=True); os.remove("temp_east.tif"); os.remove("temp_north.tif"); else: if not os.path.exists(new_east_vel_path): cmd = "\ngdalwarp -of GTiff -t_srs '+proj=utm +zone=" + utm_zone + " +datum=WGS84 +north' -srcnodata \"0\" -dstnodata \"nan\" " + east_vel_path + " temp_east.tif\n"; cmd += "\ngdalwarp -of GTiff -t_srs '+proj=utm +zone=" + utm_zone + " +datum=WGS84 +north' -srcnodata \"0\" -dstnodata \"nan\" " + north_vel_path + " temp_north.tif\n"; cmd += "\ngdalwarp -of GTiff -t_srs '+proj=utm +zone=" + REF_UTM_ZONE + " +datum=WGS84 +north' -srcnodata \"nan\" -dstnodata \"nan\" temp_east.tif temp_east_warp.tif\n"; cmd += "\ngdalwarp -of GTiff -t_srs '+proj=utm +zone=" + REF_UTM_ZONE + " +datum=WGS84 +north' -srcnodata \"nan\" -dstnodata \"nan\" temp_north.tif temp_north_warp.tif\n"; cmd += "\ngdal_translate -of GMT -a_nodata \"nan\" temp_east_warp.tif " + new_east_vel_path + "\n"; cmd += "\ngdal_translate -of GMT -a_nodata \"nan\" temp_north_warp.tif " + new_north_vel_path + "\n"; cmd += "\ngrdsample -T " + new_east_vel_path + " -G" + new_east_vel_path + "\n"; cmd += "\ngrdsample -T " + new_north_vel_path + " -G" + new_north_vel_path + "\n"; subprocess.call(cmd, shell=True); os.remove("temp_east.tif"); os.remove("temp_north.tif"); os.remove("temp_east_warp.tif"); os.remove("temp_north_warp.tif"); cmd = "\ngrdinfo " + new_east_vel_path + "\n"; pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info = pipe.read(); pipe.close(); min_x = info[re.search("x_min: ",info).end(0) : re.search("x_min: -*\d+\.*\d*",info).end(0)]; max_x = info[re.search("x_max: ",info).end(0) : re.search("x_max: -*\d+\.*\d*",info).end(0)]; min_y = info[re.search("y_min: ",info).end(0) : re.search("y_min: -*\d+\.*\d*",info).end(0)]; max_y = info[re.search("y_max: ",info).end(0) : re.search("y_max: -*\d+\.*\d*",info).end(0)]; cmd = "\necho \"" + min_x + " " + max_y + "\\n" + max_x + " " + min_y + "\" | mapproject -Ju" + REF_UTM_ZONE + UTM_LETTER + "/1:1 -F -C -I\n"; pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout; geo_ul_x, geo_ul_y, geo_lr_x, geo_lr_y = pipe.read().split(); pipe.close(); R = "-R" + min_x + "/" + min_y + "/" + max_x + "/" + max_y + "r"; geoR = "-R" + geo_ul_x + "/" + geo_lr_y + "/" + geo_lr_x + "/" + geo_ul_y + "r"; J = "-Jx1:" + SCALE; geoJ = "-Ju" + REF_UTM_ZONE + "/1:" + SCALE; rock_grd_path = pair_dates + "_" + east_pair_type.replace("eastxyz","") + "_off_ice.grd"; rock_grd_path = rock_grd_path.replace("__","_") east_rr_path = new_east_name + "_rr.grd"; north_rr_path = new_north_name + "_rr.grd"; mag_rr_path = new_mag_name + "_rr.grd"; east_rock_path = new_east_name + "_off_ice.grd"; north_rock_path = new_north_name + "_off_ice.grd"; east_filt_path = new_east_name + "_filt.grd"; north_filt_path = new_north_name + "_filt.grd"; mag_filt_path = new_mag_name + "_filt.grd"; if not os.path.exists(east_rr_path): print("\n***** Removing ramp, creating \"" + east_rr_path + "\" and \"" + north_rr_path + "\"...\n"); if not os.path.exists(rock_grd_path): cmd = "\ngrdmask " + ice_gmt_path + " -N1/NaN/NaN -R" + new_east_vel_path + " -Gtemp_outside_ice.grd\n"; cmd += "\ngrdmask " + rock_gmt_path + " -NNaN/NaN/1 -R" + new_east_vel_path + " -Gtemp_inside_rock.grd\n"; cmd += "\ngrdmath temp_outside_ice.grd temp_inside_rock.grd AND = " + rock_grd_path + "\n"; subprocess.call(cmd, shell=True); os.remove("temp_outside_ice.grd"); os.remove("temp_inside_rock.grd"); if not os.path.exists("noiseremoval.m"): os.symlink(M_SCRIPTS_DIR + "/noiseremoval.m", "noiseremoval.m"); os.symlink(M_SCRIPTS_DIR + "/remloners.m", "remloners.m"); os.symlink(M_SCRIPTS_DIR + "/remnoise.m", "remnoise.m"); os.symlink(M_SCRIPTS_DIR + "/grdread2.m", "grdread2.m"); os.symlink(M_SCRIPTS_DIR + "/grdwrite2.m", "grdwrite2.m"); cmd = "\nmatlab -nodesktop -nosplash -r \"noiseremoval(5,0.3,3,'" + new_east_vel_path + "','" + new_north_vel_path + "'); exit;\"\n"; subprocess.call(cmd, shell=True); os.remove("noiseremoval.m"); os.remove("remloners.m"); os.remove("remnoise.m"); os.remove("grdread2.m"); os.remove("grdwrite2.m"); cmd = "\ngrdmath " + east_filt_path + " " + rock_grd_path + " OR = " + east_rock_path + "\n"; cmd += "\ngrdmath " + north_filt_path + " " + rock_grd_path + " OR = " + north_rock_path + "\n"; subprocess.call(cmd, shell=True); os.remove(east_filt_path); os.remove(north_filt_path); from removeTrendNoOutlines import *; removeTrendNoOutlines(new_east_vel_path, east_rock_path, "-2", "2"); removeTrendNoOutlines(new_north_vel_path, north_rock_path, "-2", "2"); # removeTrendNoOutlines(new_east_vel_path, east_rock_path, "-10", "10"); # removeTrendNoOutlines(new_north_vel_path, north_rock_path, "-10", "10"); os.remove(east_rock_path); os.remove(north_rock_path); cmd = "\ngrdmath " + east_rr_path + " " + north_rr_path + " HYPOT --IO_NC4_CHUNK_SIZE=c = " + mag_rr_path + "\n"; subprocess.call(cmd, shell=True); else: print("\n***** \"" + east_rr_path + "\" already exists, assuming results already ramp-removed...\n"); mag_rr_ps_path = new_mag_name + "_rr.ps"; cmd = "\nmakecpt -Cjet -T0/" + VEL_MAX + "/0.01 --COLOR_BACKGROUND=white --COLOR_NAN=white --COLOR_FOREGROUND=white > mag.cpt\n"; cmd += "\ngrdimage " + mag_rr_path + " " + J + " " + R + " -Cmag.cpt -P -K > " + mag_rr_ps_path + "\n"; cmd += "\npsxy " + ice_gmt_path + " " + J + " " + R + " -W1p,black -O -K >> " + mag_rr_ps_path + "\n"; cmd += "\npsxy " + rock_gmt_path + " " + J + " " + R + " -W1p,black -O -K >> " + mag_rr_ps_path + "\n"; cmd += "\npsbasemap " + geoJ + " " + geoR + " -Bf1a1g1:\"Longitude\":/a0.5g0.5:\"\"::,::.\"\":WeSn --MAP_FRAME_TYPE=inside --FORMAT_GEO_MAP=ddd:mmF --FONT_ANNOT_PRIMARY=12p,1,black --MAP_GRID_PEN_PRIMARY=0.25p,100/100/100,- -O -K >> " + mag_rr_ps_path + "\n"; cmd += "\npsbasemap " + geoJ + " " + geoR + " -Lfx3c/2c/76/5k+jr+u+p0.5,black+gwhite --FONT_ANNOT_PRIMARY=12p,1,black --FONT_LABEL=12p,1,black -O -K >> " + mag_rr_ps_path + "\n"; cmd += "\npsscale -D3c/4c/3c/0.1c -Cmag.cpt -B1:\"Speed\":/:\"m day@+-1@+\": --FONT_ANNOT_PRIMARY=12p,1,black --FONT_LABEL=12p,1,black -O >> " + mag_rr_ps_path + "\n"; cmd += "\nps2raster -A -Tf " + mag_rr_ps_path + "\n"; subprocess.call(cmd, shell=True); os.remove(mag_rr_ps_path); east_filt_path = east_rr_path[ : east_rr_path.rfind(".")] + "_filt.grd"; north_filt_path = north_rr_path[ : north_rr_path.rfind(".")] + "_filt.grd"; mag_filt_path = mag_rr_path[ : mag_rr_path.rfind(".")] + "_filt.grd"; if not os.path.exists(east_filt_path): print("\n***** Filtering results, creating \"" + east_filt_path + "\" and \"" + north_filt_path + "\"...\n"); if not os.path.exists("noiseremoval.m"): os.symlink(M_SCRIPTS_DIR + "/noiseremoval.m", "noiseremoval.m"); os.symlink(M_SCRIPTS_DIR + "/remloners.m", "remloners.m"); os.symlink(M_SCRIPTS_DIR + "/remnoise.m", "remnoise.m"); os.symlink(M_SCRIPTS_DIR + "/grdread2.m", "grdread2.m"); os.symlink(M_SCRIPTS_DIR + "/grdwrite2.m", "grdwrite2.m"); cmd = "\nmatlab -nodesktop -nosplash -r \"noiseremoval(" + \ VEL_MAX + "," + TOL + "," + NUMDIF + ",'" + east_rr_path + "','" + north_rr_path + "'); exit;\"\n"; cmd += "\ngrdmath " + east_filt_path + " " + north_filt_path + " HYPOT --IO_NC4_CHUNK_SIZE=c = " + mag_filt_path + "\n"; subprocess.call(cmd, shell=True); os.remove("noiseremoval.m"); os.remove("remloners.m"); os.remove("remnoise.m"); os.remove("grdread2.m"); os.remove("grdwrite2.m"); else: print("\n***** \"" + east_filt_path + "\" already exists, assuming results already filtered...\n"); mag_filt_ps_path = new_mag_name + "_rr_filt.ps"; cmd = "\nmakecpt -Chaxby -T0/" + VEL_MAX + "/0.01 --COLOR_BACKGROUND=white --COLOR_NAN=white --COLOR_FOREGROUND=white > mag.cpt\n"; cmd += "\ngrdimage " + mag_filt_path + " " + J + " " + R + " -Cmag.cpt -P -K > " + mag_filt_ps_path + "\n"; cmd += "\npsxy " + ice_gmt_path + " " + J + " " + R + " -W1p,black -O -K >> " + mag_filt_ps_path + "\n"; cmd += "\npsxy " + rock_gmt_path + " " + J + " " + R + " -W1p,black -O -K >> " + mag_filt_ps_path + "\n"; cmd += "\npsbasemap " + geoJ + " " + geoR + " -Bf1a1g1:\"Longitude\":/a0.5g0.5:\"\"::,::.\"\":WeSn --MAP_FRAME_TYPE=inside --FORMAT_GEO_MAP=ddd:mmF --FONT_ANNOT_PRIMARY=12p,1,black --MAP_GRID_PEN_PRIMARY=0.25p,100/100/100,- -O -K >> " + mag_filt_ps_path + "\n"; cmd += "\npsbasemap " + geoJ + " " + geoR + " -Lfx3c/2c/76/5k+jr+u+p0.5,black+gwhite --FONT_ANNOT_PRIMARY=12p,1,black --FONT_LABEL=12p,1,black -O -K >> " + mag_filt_ps_path + "\n"; cmd += "\npsscale -D3c/4c/3c/0.1c -Cmag.cpt -B1:\"Speed\":/:\"m day@+-1@+\": --FONT_ANNOT_PRIMARY=12p,1,black --FONT_LABEL=12p,1,black -O >> " + mag_filt_ps_path + "\n"; cmd += "\nps2raster -A -Tf " + mag_filt_ps_path + "\n"; subprocess.call(cmd, shell=True); os.remove(mag_filt_ps_path); mag_txt_path = mag_filt_path[ : mag_filt_path.rfind(".")] + ".txt"; if not os.path.exists(mag_txt_path): season_days = season(pair_dates); date1 = pair_dates[re.search("\d{14}_\d{14}", pair_dates).start(0) : re.search("\d{14}_\d{14}", pair_dates).start(0) + 14]; date2 = pair_dates[re.search("\d{14}_\d{14}", pair_dates).start(0) + 15 : re.search("\d{14}_\d{14}", pair_dates).end(0)]; if date1[0:2] != "19" and date1[0:2] != "20": date1
Value is scaled with factor 10: NOx Index = value / 10 *Note: If this value is unknown, 0x7FFF is returned. During the first 10..11 seconds after power-on or device reset, this value will be 0x7FFF as well.* :rtype: tuple :raise ~sensirion_i2c_driver.errors.I2cChecksumError: If a received CRC was wrong. """ # check and remove CRCs checked_data = Sen5xI2cCmdBase.interpret_response(self, data) # convert raw received data into proper data types mass_concentration_pm1p0 = int(unpack(">H", checked_data[0:2])[0]) # uint16 mass_concentration_pm2p5 = int(unpack(">H", checked_data[2:4])[0]) # uint16 mass_concentration_pm4p0 = int(unpack(">H", checked_data[4:6])[0]) # uint16 mass_concentration_pm10p0 = int(unpack(">H", checked_data[6:8])[0]) # uint16 ambient_humidity = int(unpack(">h", checked_data[8:10])[0]) # int16 ambient_temperature = int(unpack(">h", checked_data[10:12])[0]) # int16 voc_index = int(unpack(">h", checked_data[12:14])[0]) # int16 nox_index = int(unpack(">h", checked_data[14:16])[0]) # int16 return mass_concentration_pm1p0, \ mass_concentration_pm2p5, \ mass_concentration_pm4p0, \ mass_concentration_pm10p0, \ ambient_humidity, \ ambient_temperature, \ voc_index, \ nox_index class Sen5xI2cCmdStartFanCleaning(Sen5xI2cCmdBase): """ Start Fan Cleaning I²C Command Starts the fan cleaning manually. The "data ready"-flag will be cleared immediately and during the next few seconds, no new measurement results will be available (old values will be returned). Once the cleaning is finished, the "data ready"-flag will be set and new measurement results will be available. When executing this command while cleaning is already active, the command does nothing. If you stop the measurement while fan cleaning is active, the cleaning will be aborted immediately. .. note:: This command is only available in measure mode with PM measurement enabled, i.e. only if the fan is already running. In any other state, this command does nothing. """ def __init__(self): """ Constructor. """ super(Sen5xI2cCmdStartFanCleaning, self).__init__( command=0x5607, tx_data=None, rx_length=None, read_delay=0.0, timeout=0, post_processing_time=0.02, ) class Sen5xI2cCmdGetTemperatureOffsetParameters(Sen5xI2cCmdBase): """ Get Temperature Offset Parameters I²C Command Gets the temperature offset parameters from the device. """ def __init__(self): """ Constructor. """ super(Sen5xI2cCmdGetTemperatureOffsetParameters, self).__init__( command=0x60B2, tx_data=None, rx_length=9, read_delay=0.02, timeout=0, post_processing_time=0.0, ) def interpret_response(self, data): """ Validates the CRCs of the received data from the device and returns the interpreted data. :param bytes data: Received raw bytes from the read operation. :return: - offset (int) - Constant temperature offset scaled with factor 200 (T [°C] = value / 200). - slope (int) - Normalized temperature offset slope scaled with factor 10000 (applied factor = value / 10000). - time_constant (int) - Time constant [s] how fast the slope and offset are applied. After the specified value in seconds, 63% of the new slope and offset are applied. :rtype: tuple :raise ~sensirion_i2c_driver.errors.I2cChecksumError: If a received CRC was wrong. """ # check and remove CRCs checked_data = Sen5xI2cCmdBase.interpret_response(self, data) # convert raw received data into proper data types offset = int(unpack(">h", checked_data[0:2])[0]) # int16 slope = int(unpack(">h", checked_data[2:4])[0]) # int16 time_constant = int(unpack(">H", checked_data[4:6])[0]) # uint16 return offset, \ slope, \ time_constant class Sen5xI2cCmdSetTemperatureOffsetParameters(Sen5xI2cCmdBase): """ Set Temperature Offset Parameters I²C Command Sets the temperature offset parameters for the device. """ def __init__(self, offset, slope, time_constant): """ Constructor. :param int offset: Constant temperature offset scaled with factor 200 (T [°C] = value / 200). The default value is 0. :param int slope: Normalized temperature offset slope scaled with factor 10000 (applied factor = value / 10000). The default value is 0. :param int time_constant: Time constant [s] how fast the new slope and offset will be applied. After the specified value in seconds, 63% of the new slope and offset are applied. A time constant of zero means the new values will be applied immediately (within the next measure interval of 1 second). """ super(Sen5xI2cCmdSetTemperatureOffsetParameters, self).__init__( command=0x60B2, tx_data=b"".join([pack(">h", offset), pack(">h", slope), pack(">H", time_constant)]), rx_length=None, read_delay=0.0, timeout=0, post_processing_time=0.02, ) class Sen5xI2cCmdGetWarmStartParameter(Sen5xI2cCmdBase): """ Get Warm Start Parameter I²C Command Gets the warm start parameter from the device. """ def __init__(self): """ Constructor. """ super(Sen5xI2cCmdGetWarmStartParameter, self).__init__( command=0x60C6, tx_data=None, rx_length=3, read_delay=0.02, timeout=0, post_processing_time=0.0, ) def interpret_response(self, data): """ Validates the CRCs of the received data from the device and returns the interpreted data. :param bytes data: Received raw bytes from the read operation. :return: Warm start behavior as a value in the range from 0 (cold start) to 65535 (warm start). :rtype: int :raise ~sensirion_i2c_driver.errors.I2cChecksumError: If a received CRC was wrong. """ # check and remove CRCs checked_data = Sen5xI2cCmdBase.interpret_response(self, data) # convert raw received data into proper data types warm_start = int(unpack(">H", checked_data[0:2])[0]) # uint16 return warm_start class Sen5xI2cCmdSetWarmStartParameter(Sen5xI2cCmdBase): """ Set Warm Start Parameter I²C Command Sets the warm start parameter for the device. .. note:: This parameter can be changed in any state of the device (and the getter immediately returns the new value), but it is applied only the next time starting a measurement, i.e. when sending a "Start Measurement" command! So the parameter needs to be set *before* a warm-start measurement is started. """ def __init__(self, warm_start): """ Constructor. :param int warm_start: Warm start behavior as a value in the range from 0 (cold start) to 65535 (warm start). The default value is 0. """ super(Sen5xI2cCmdSetWarmStartParameter, self).__init__( command=0x60C6, tx_data=b"".join([pack(">H", warm_start)]), rx_length=None, read_delay=0.0, timeout=0, post_processing_time=0.02, ) class Sen5xI2cCmdGetVocAlgorithmTuningParameters(Sen5xI2cCmdBase): """ Get VOC Algorithm Tuning Parameters I²C Command Gets the currently set tuning parameters of the VOC algorithm. """ def __init__(self): """ Constructor. """ super(Sen5xI2cCmdGetVocAlgorithmTuningParameters, self).__init__( command=0x60D0, tx_data=None, rx_length=18, read_delay=0.02, timeout=0, post_processing_time=0.0, ) def interpret_response(self, data): """ Validates the CRCs of the received data from the device and returns the interpreted data. :param bytes data: Received raw bytes from the read operation. :return: - index_offset (int) - VOC index representing typical (average) conditions. - learning_time_offset_hours (int) - Time constant to estimate the VOC algorithm offset from the history in hours. Past events will be forgotten after about twice the learning time. - learning_time_gain_hours (int) - Time constant to estimate the VOC algorithm gain from the history in hours. Past events will be forgotten after about twice the learning time. - gating_max_duration_minutes (int) - Maximum duration of gating in minutes (freeze of estimator during high VOC index signal). Zero disables the gating. - std_initial (int) - Initial estimate for standard deviation. Lower value boosts events during initial learning period, but may result in larger device-to-device variations. - gain_factor (int) - Gain factor to amplify or to attenuate the VOC index output. :rtype: tuple :raise ~sensirion_i2c_driver.errors.I2cChecksumError: If a received CRC was wrong. """ # check and remove CRCs checked_data = Sen5xI2cCmdBase.interpret_response(self, data) # convert raw received data into proper data types index_offset = int(unpack(">h", checked_data[0:2])[0]) # int16 learning_time_offset_hours = int(unpack(">h", checked_data[2:4])[0]) # int16 learning_time_gain_hours = int(unpack(">h", checked_data[4:6])[0]) # int16 gating_max_duration_minutes = int(unpack(">h", checked_data[6:8])[0]) # int16 std_initial = int(unpack(">h", checked_data[8:10])[0]) # int16 gain_factor = int(unpack(">h", checked_data[10:12])[0]) # int16 return index_offset, \ learning_time_offset_hours, \ learning_time_gain_hours, \ gating_max_duration_minutes, \ std_initial, \ gain_factor class Sen5xI2cCmdSetVocAlgorithmTuningParameters(Sen5xI2cCmdBase): """ Set VOC Algorithm Tuning Parameters I²C Command Sets the tuning parameters of the VOC algorithm. .. note:: This command is available only in idle mode. In measure mode, this command has no effect. In addition, it has no effect if at least one parameter is outside the specified range. """ def __init__(self, index_offset, learning_time_offset_hours, learning_time_gain_hours, gating_max_duration_minutes, std_initial, gain_factor): """ Constructor. :param int index_offset: VOC index representing typical (average) conditions. Allowed values are in range 1..250. The default value is 100. :param int learning_time_offset_hours: Time constant to estimate the VOC algorithm offset from the history in hours. Past events will be forgotten after about twice the learning time. Allowed values are in range 1..1000. The default value is 12 hours. :param int learning_time_gain_hours: Time constant to estimate the VOC algorithm gain from the history in hours. Past events will be forgotten after about twice the learning time. Allowed values are in range 1..1000. The default value is 12 hours. :param int gating_max_duration_minutes: Maximum duration of gating in minutes (freeze of estimator during high VOC index signal). Set to zero to disable the gating. Allowed values are in range 0..3000. The default value is 180 minutes. :param int std_initial: Initial estimate for standard deviation. Lower value boosts events during initial learning period, but may result in
<gh_stars>0 import logging import os import random from PIL import Image, ImageDraw import PIL import PIL.ImageFont import PIL.ImageColor from pdfminer.converter import PDFPageAggregator from pdfminer.layout import LAParams from pdfminer.layout import LTTextBoxHorizontal from pdfminer.layout import LTRect, LTChar from pdfminer.layout import LTAnon, LTComponent from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.pdfinterp import PDFTextExtractionNotAllowed from pdfminer.pdfparser import PDFParser, PDFDocument from log_setup import logging_setup logger = logging.getLogger('pdf_table_parser').getChild(__name__) file_path = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(file_path, '') data_path = os.path.join(path, '..', 'data') class PdfTableParser: def __init__(self, filepath): self.filepath = os.path.normpath(filepath) self.layouts = [] self.rows = [] self.headers = [] def get_page_layouts(self): with open(self.filepath, 'rb') as fp: logger.info('Opening PDF file: {file}'.format(file=self.filepath)) parser = PDFParser(fp) doc = PDFDocument() parser.set_document(doc) doc.set_parser(parser) doc.initialize("") if not doc.is_extractable: raise PDFTextExtractionNotAllowed resource_manager = PDFResourceManager() device = PDFPageAggregator(resource_manager, laparams=LAParams(heuristic_word_margin=True, word_margin=0, char_margin=0.5)) interpreter = PDFPageInterpreter(resource_manager, device) self.layouts = [] for page in doc.get_pages(): interpreter.process_page(page) self.layouts.append(device.get_result()) logger.info('Opened PDF with {n} pages'.format(n=len(self.layouts))) def parse_layouts(self): for l in self.layouts: p = LayoutTableParser(l, self.filepath) p.to_tables() self.rows += p.rows self.identify_headers() self.unique_headers() def parse_first_layouts(self): p = LayoutTableParser(self.layouts[5]) p.to_tables() def save_output(self): import csv with open(self.filepath + '.csv', 'w', newline='', encoding="utf-8") as csv_file: writer = csv.writer(csv_file, delimiter=',') if self.headers: header = [c.get_text().strip() if c is not None else '' for c in self.headers[0]['cells']] writer.writerow(header) for row in self.rows: line = [c.get_text().strip() if c is not None else '' for c in row['cells']] writer.writerow(line) def identify_headers(self): # TODO should identify headers much earlier before attempting to merge any tables logger.info('Attempting to identify headers') fonts = [] found = False for row in self.rows: for font in fonts: if (font['name'] == row['font']) and (font['size'] / row['font_size'] > 0.95) and (font['size'] / row['font_size'] < 1.05): font['count'] = font['count'] + 1 found = True if not found: fonts.append({'name': row['font'], 'size': row['font_size'], 'count': 1}) found = False logger.info('{n} fonts in pdf'.format(n=len(fonts))) header_fonts = [] for font in fonts: if font['count'] / len(self.rows) < 0.10: header_fonts.append(font) logger.info('{n} header fonts in pdf'.format(n=len(header_fonts))) headers = list(filter(lambda x: is_font_in_list(x['font'], x['font_size'], header_fonts), self.rows)) rows = list(filter(lambda x: not is_font_in_list(x['font'], x['font_size'], header_fonts), self.rows)) logger.info('{n} header rows in pdf'.format(n=len(headers))) logger.info('{n} rows in pdf'.format(n=len(rows))) self.rows = rows self.headers = headers def unique_headers(self): uniques = [] for header in self.headers: if not uniques: uniques.append(header) elif not any(map(lambda y: is_row_identical(header, y), uniques)): uniques.append(header) self.headers = uniques rows = list(filter(lambda x: not any(map(lambda y: is_row_identical(x, y), self.headers)), self.rows)) self.rows = rows logger.info('{n} unique headers'.format(n=len(self.headers))) for row in self.headers: line = [c.get_text() if c is not None else '' for c in row['cells']] logger.info(line) def is_row_identical(a, b): if len(a['cells']) == len(b['cells']): for n in range(len(a['cells'])): if (a['cells'][n].get_text() if a['cells'][n] is not None else '').lower().strip() != (b['cells'][n].get_text() if b['cells'][n] is not None else '').lower().strip(): # all()? return False return True else: return False class LayoutTableParser: def __init__(self, layout, filepath, debug=False): self.layout = layout self.rows = [] self.columns = [] self.colgroups = [] self.finalcols = [] self.finalcols2 = [] self.lines = [] self.texts = [] self.multi_col_boxes = [] self.drawDebug = debug self.filepath = filepath self.scale_factor = 10 self.h = int(self.layout.height * self.scale_factor) def to_tables(self): self.to_tables1() self.to_tables2() self.to_tables3() self.to_tables4() if self.drawDebug: im = Image.new('RGBA', (int(self.layout.width * self.scale_factor), int(self.layout.height * self.scale_factor)), 'white') draw = ImageDraw.Draw(im) self.draw_text(draw) for c in self.finalcols: draw.rectangle([int(c['contain'].bbox[0] * self.scale_factor), self.h - int(c['contain'].bbox[3] * self.scale_factor), int(c['contain'].bbox[2] * self.scale_factor), self.h - int(c['contain'].bbox[1] * self.scale_factor)], fill=None, outline=c['color'], width=5 * self.scale_factor) im.save('{file}.page{pn}.stage{sn}.png'.format(file=self.filepath, pn=self.layout.pageid, sn=4), "PNG") self.to_tables5() if self.drawDebug: im = Image.new('RGBA', (int(self.layout.width * self.scale_factor), int(self.layout.height * self.scale_factor)), 'white') draw = ImageDraw.Draw(im) self.draw_text(draw) for c in self.finalcols2: draw.rectangle([int(c['contain'].bbox[0] * self.scale_factor), self.h - int(c['contain'].bbox[3] * self.scale_factor), int(c['contain'].bbox[2] * self.scale_factor), self.h - int(c['contain'].bbox[1] * self.scale_factor)], fill=None, outline=c['color'], width=5 * self.scale_factor) im.save('{file}.page{pn}.stage{sn}.png'.format(file=self.filepath, pn=self.layout.pageid, sn=5), "PNG") self.to_tables6() if self.drawDebug: im = Image.new('RGBA', (int(self.layout.width * self.scale_factor), int(self.layout.height * self.scale_factor)), 'white') draw = ImageDraw.Draw(im) self.draw_text(draw) for c in self.finalcols2: draw.rectangle([int(c['contain'].bbox[0] * self.scale_factor), self.h - int(c['contain'].bbox[3] * self.scale_factor), int(c['contain'].bbox[2] * self.scale_factor), self.h - int(c['contain'].bbox[1] * self.scale_factor)], fill=None, outline=c['color'], width=5 * self.scale_factor) im.save('{file}.page{pn}.stage{sn}.png'.format(file=self.filepath, pn=self.layout.pageid, sn=6), "PNG") for c in self.multi_col_boxes: draw.rectangle([int(c.bbox[0] * self.scale_factor), self.h - int(c.bbox[3] * self.scale_factor), int(c.bbox[2] * self.scale_factor), self.h - int(c.bbox[1] * self.scale_factor)], fill=None, outline='black', width=5 * self.scale_factor) im.save('{file}.page{pn}.stage{sn}.png'.format(file=self.filepath, pn=self.layout.pageid, sn=6.1), "PNG") self.to_tables7() self.to_tables8() def draw_text(self, draw): for e in self.texts: char = e._objs[0]._objs[0] fontname = char.fontname font = PIL.ImageFont.truetype(font="arial.ttf", size=int(char.size * self.scale_factor)) for line in e: c = random.choice(list(PIL.ImageColor.colormap.keys())) draw.rectangle([int(line.bbox[0] * self.scale_factor), self.h - int(line.bbox[3] * self.scale_factor), int(line.bbox[2] * self.scale_factor), self.h - int(line.bbox[1] * self.scale_factor)], fill=None, outline=c, width=2 * self.scale_factor) c = random.choice(list(PIL.ImageColor.colormap.keys())) for char in line: if not isinstance(char, LTAnon): draw.text((int(char.bbox[0] * self.scale_factor), self.h - int(char.bbox[3] * self.scale_factor)), char.get_text(), fill='black', font=font) draw.rectangle([int(char.bbox[0] * self.scale_factor), self.h - int(char.bbox[3] * self.scale_factor), int(char.bbox[2] * self.scale_factor), self.h - int(char.bbox[1] * self.scale_factor)], fill=None, outline=c, width=int(0.5 * self.scale_factor)) def to_tables1(self): texts = [] rectangles = [] other = [] logger.info("Starting page {id}".format(id=self.layout.pageid)) #with open('page.png', 'r+') as imagefile: ##im = Image.new('RGBA', (int(self.layout.width * scale_factor), int(self.layout.height * scale_factor)), 'white') ##draw = ImageDraw.Draw(im) #draw.line((0, 0) + im.size, fill=128) #draw.line((0, im.size[1], im.size[0], 0), fill=128) for e in self.layout: if isinstance(e, LTTextBoxHorizontal): texts.append(e) char = e._objs[0]._objs[0] fontname = char.fontname #font = PIL.ImageFont.truetype(font="calibri.ttf", size=int(char.size * scale_factor)) #draw.text((int(e.bbox[0] * 100), h - int(e.bbox[3] * 100)), e.get_text(), fill='black', font=font) c = random.choice(list(PIL.ImageColor.colormap.keys())) ##draw.rectangle([int(e.bbox[0] * scale_factor), h - int(e.bbox[3] * scale_factor), int(e.bbox[2] * scale_factor), h - int(e.bbox[1] * scale_factor)], fill=None, outline=c, width=3 * scale_factor) logger.debug(e) elif isinstance(e, LTRect): rectangles.append(e) ##draw.rectangle([int(e.pts[0][0] * scale_factor), h - int(e.pts[2][1] * scale_factor), int(e.pts[2][0] * scale_factor), h - int(e.pts[0][1] * scale_factor)], fill=None, outline='black', width=e.linewidth) else: other.append(e) logger.debug(e) self.texts = texts # write to stdout ##im.save('page3.png', "PNG") def to_tables2(self): columns = [] # texts.sort(key=lambda x: x.width) for e in self.layout: if isinstance(e, LTTextBoxHorizontal): logger.info('Finding a column for box {i}'.format(i=e.index)) ##im2 = im.copy() ##d = ImageDraw.Draw(im2) col = None for c in columns: if (e.x1 < c['contain'].x1) and (e.x0 > c['contain'].x0): if (e.width / c['contain'].width) < 0.8: logger.info('Item too small, column may be several columns wide') else: logger.info('Item totally contained in column') logger.info('{ex1} < {cx1} and {ex0} > {cx0}'.format(ex1=e.x1, cx1=c['contain'].x1, ex0=e.x0, cx0=c['contain'].x0)) col = c col['boxes'].append(e) col['contain'].set_bbox((c['contain'].x0, min(c['contain'].y0, e.y0), c['contain'].x1, max(c['contain'].y1, e.y1))) ##d.rectangle([int(c['contain'].bbox[0] * scale_factor), h - int(c['contain'].bbox[3] * scale_factor), int(c['contain'].bbox[2] * scale_factor), h - int(c['contain'].bbox[1] * scale_factor)], fill=None, outline=c['color'], width=5 * scale_factor) break elif ((c['contain'].hoverlap(e) / c['contain'].width) > 0.9) and ((c['contain'].hoverlap(e) / c['contain'].width) < 1.1): logger.info('Item is within 10% of current col width') logger.info('Overlap of {hdist}, column width of {width}'.format(hdist=c['contain'].hoverlap(e), width=c['contain'].width)) col = c col['boxes'].append(e) col['contain'].set_bbox((min(c['contain'].x0, e.x0), min(c['contain'].y0, e.y0), max(c['contain'].x1, e.x1), max(c['contain'].y1, e.y1))) ##d.rectangle([int(c['contain'].bbox[0] * scale_factor), h - int(c['contain'].bbox[3] * scale_factor), int(c['contain'].bbox[2] * scale_factor), h - int(c['contain'].bbox[1] * scale_factor)], fill=None, outline=c['color'], width=5 * scale_factor) break if not col: logger.info('Creating new column') col = {'contain': LTComponent(e.bbox), 'boxes': list(e), 'color': random.choice(list(PIL.ImageColor.colormap.keys()))} columns.append(col) columns.sort(key=lambda x: x['contain'].width) ##d.rectangle([int(col['contain'].bbox[0] * scale_factor), h - int(col['contain'].bbox[3] * scale_factor), int(col['contain'].bbox[2] * scale_factor), h - int(col['contain'].bbox[1] * scale_factor)], fill=None, outline=col['color'], width=5 * scale_factor) ##im2.save('page0.{x}.png'.format(x=e.index), "PNG") self.columns = columns ##im3 = im.copy() ##draw2 = ImageDraw.Draw(im3) ##for c in columns: ## draw.rectangle([int(c['contain'].bbox[0] * scale_factor), h - int(c['contain'].bbox[3] * scale_factor), int(c['contain'].bbox[2] * scale_factor), h - int(c['contain'].bbox[1] * scale_factor)], fill=None, outline=c['color'], width=5 * scale_factor) ##im.save('page4.png', "PNG") def to_tables3(self): colgroups = [] for c in self.columns: colg = None for d in colgroups: if c is not d: if all(c['contain'].is_hoverlap(e['contain']) for e in d['cols']): d['cols'].append(c) colg = d break if not colg: colgroups.append({'contain': LTComponent(c['contain'].bbox), 'cols': [c]}) logger.info('{x} colgroups'.format(x=len(colgroups))) self.colgroups = colgroups def to_tables4(self): finalcols = [] multi_col_boxes = [] for g in self.colgroups: logger.info('colgroup with {x} columns'.format(x=len(g['cols']))) if len(g) == 0: continue elif len(g) == 1: finalcols.append(g['cols'][0]) elif all(col1['contain'].is_hoverlap(col2['contain']) for col1 in g['cols'] for col2 in g['cols']): logger.info('All cols have horizontal overlap') # all voverlap is less than the average line height # if all() x0 = min([col['contain'].x0 for col in g['cols']]) y0 = min([col['contain'].y0 for col in g['cols']]) x1 = max([col['contain'].x1 for col in g['cols']]) y1 = max([col['contain'].y1 for col in g['cols']]) g['cols'][0]['contain'].set_bbox((x0, y0, x1, y1)) for e in g['cols'][1:]: g['cols'][0]['boxes'] += e['boxes'] if all(not g['cols'][0]['contain'].is_hoverlap(col['contain']) for col in finalcols): finalcols.append(g['cols'][0]) else: boxes = list(filter(lambda x: all(not x.is_hoverlap(y['contain']) for y in finalcols), g['cols'][0]['boxes'])) if not len(boxes): logger.info('Engulfed column') for col in finalcols: if any(col['contain'].is_hoverlap(x) for x in g['cols'][0]['boxes']): col['multicol'] = True finalcols.append(g['cols'][0]) else: x0 = min([box.x0 for box in boxes]) y0 = min([box.y0 for box in boxes]) x1 = max([box.x1 for box in boxes]) y1 = max([box.y1 for box in boxes]) g['cols'][0]['contain'].set_bbox((x0, y0, x1, y1)) g['cols'][0]['boxes'] = boxes finalcols.append(g['cols'][0]) multi_col_boxes = list(filter(lambda x: any(x.is_hoverlap(y['contain']) for y in finalcols), g['cols'][0]['boxes'])) logger.info('{n} multi col boxes'.format(n=len(multi_col_boxes))) else: # multi-col logger.info('Need to split multi-col') g['cols'].sort(key=lambda x: x['contain'].width, reverse=True) #if c is not d: # if (d['contain'].x1 <= c['contain'].x1) and (d['contain'].x0 >= c['contain'].x0): # if ((c['contain'].hoverlap(d['contain']) / c['contain'].width) < 0.9): # # solely contained column d that is less than 90% of the width of c # if c in columns: # columns.remove(c) # break # elif ((c['contain'].hoverlap(d['contain']) / c['contain'].width) > 0.9) and ((c['contain'].hoverlap(d['contain']) / c['contain'].width) < 1.1): # c['contain'].set_bbox((min(c['contain'].x0, d['contain'].x0), min(c['contain'].y0, d['contain'].y0), max(c['contain'].x1, d['contain'].x1), max(c['contain'].y1, d['contain'].y1))) # if d in columns: # columns.remove(d) self.finalcols = finalcols def to_tables5(self): finalcols2 = [] for col in self.finalcols: if 'multicol' in col and col['multicol']: continue else: finalcols2.append(col) for col in self.finalcols: if 'multicol' in col and col['multicol']: boxes = list(filter(lambda x: all(not x.is_hoverlap(y['contain'] if x is not col else True) for y in finalcols2), col['boxes'])) if not len(boxes): logger.info('Nested engulfed column, need to while with limit') else: x0 = min([box.x0 for box in boxes]) y0 = min([box.y0 for box in boxes]) x1 = max([box.x1 for box in boxes]) y1 = max([box.y1 for box in boxes]) col['contain'].set_bbox((x0, y0, x1, y1)) col['boxes'] = boxes finalcols2.append(col) multi_col_boxes = list(filter(lambda x: any(x.is_hoverlap(y['contain']) for y in finalcols2), col['boxes'])) logger.info('{n} multi col boxes'.format(n=len(multi_col_boxes))) self.finalcols2 = finalcols2 def to_tables6(self): multi_col_boxes = [] for c in self.finalcols2: c['boxes'] = [] # reacquire all text boxes that only overlap with a single column for t in self.texts: hoverlaps = 0 col = None for c in self.finalcols2: if t.hoverlap(c['contain']): hoverlaps += 1 col = c if hoverlaps == 1: col['boxes'].append(t) elif hoverlaps > 1: if t not in multi_col_boxes: multi_col_boxes.append(t) for c in self.finalcols2: if len(c['boxes']): x0 = min([box.x0 for box in c['boxes']]) y0 = min([box.y0 for box in c['boxes']]) x1 = max([box.x1 for box in c['boxes']]) y1 = max([box.y1 for box in
<gh_stars>1-10 r""" Contains two main base classes (for naming and sub/superSys) and their helper classes, functions, decorators. .. currentmodule:: quanguru.classes.base .. autosummary:: named qBase .. autosummary:: aliasClass keySearch aliasDict .. autosummary:: _auxiliaryClass _recurseIfList addDecorator .. |c| unicode:: U+2705 .. |x| unicode:: U+274C .. |w| unicode:: U+2000 ======================= ================== ============== ================ =============== **Function Name** **Docstrings** **Examples** **Unit Tests** **Tutorials** ======================= ================== ============== ================ =============== `named` |w| |w| |w| |c| |w| |w| |x| |w| |w| |c| |w| |w| |x| `qBase` |w| |w| |w| |c| |w| |w| |x| |w| |w| |c| |w| |w| |x| `aliasClass` |w| |w| |w| |c| |w| |w| |x| |w| |w| |c| |w| |w| |x| `keySearch` |w| |w| |w| |c| |w| |w| |x| |w| |w| |x| |w| |w| |x| `aliasDict` |w| |w| |w| |c| |w| |w| |x| |w| |w| |c| |w| |w| |x| `_auxiliaryClass` |w| |w| |w| |c| |w| |w| |x| |w| |w| |x| |w| |w| |x| `_recurseIfList` |w| |w| |w| |c| |w| |w| |x| |w| |w| |x| |w| |w| |x| `addDecorator` |w| |w| |w| |c| |w| |w| |x| |w| |w| |x| |w| |w| |x| ======================= ================== ============== ================ =============== """ from functools import wraps #import weakref import warnings import weakref from itertools import chain from typing import Hashable, Dict, Optional, List, Union, Any, Tuple, Mapping from .exceptions import raiseAttrType __all__ = [ 'qBase', 'named' ] class aliasClass: r""" aliasClass provides a flexible naming functionality for the qObjects. It is created to be used as the name attribute of qObjects and to work with the extended dictionary :class:`~aliasDict`. The default name of qObjects is assigned to be ``__name`` attribute, and the user assigned aliases for a qObject are stored in the ``__alias`` list. The string representation and hash value of an aliasClass objects is obtained from its name. """ __slots__ = ["__name", "__alias"] @raiseAttrType([str, type(None)], "name") def __init__(self, name: Optional[str] = None, alias: List = list) -> None: #pylint:disable=unsubscriptable-object self.__name: Optional[str] = name #pylint:disable=unsubscriptable-object r""" Protected name attribute of an aliasClass object, set&get through the :py:attr:`~aliasClass.name` property. Default is ``None``. It can be set to any string (which cannot be changed later, unless directly overwritting ``self._aliasClass__name``). """ #: list of aliases of an aliasClass objects, set&get through the :py:attr:`~aliasClass.alias` property self.__alias: List = [] if isinstance(alias, type) else alias if isinstance(alias, list) else [alias] @property def name(self) -> Union[str, None]: #pylint:disable=unsubscriptable-object r""" Getter of the name property, returns ``self.__name``. Setter of the name property, sets ``self.__name`` to given ``name`` provided that the ``self.__name is None`` and the given ``name`` is a string. This means that the name can only be a string and cannot be changed once set. Unless, of course, directly overwriting the protected attribute. Raises ------ TypeError Raised if given name is not string """ return self._aliasClass__name #pylint:disable = no-member @name.setter @raiseAttrType([str, type(None)]) def name(self, name: str) -> None: if self._aliasClass__name is None: #pylint:disable = no-member self._aliasClass__name = name #pylint:disable = no-member, assigning-non-slot else: warnings.warn("name cannot be changed") @property def alias(self) -> List: r""" Getter of the alias property, returns the alias list. Setter of the alias property, adds a new alias for the aliasClass object (if the given alias is not already in the list). """ return self._aliasClass__alias #pylint:disable = no-member @alias.setter def alias(self, ali: str) -> None: if ali not in self._aliasClass__alias: #pylint:disable = no-member self._aliasClass__alias.append(ali) #pylint:disable = no-member def __members(self) -> Tuple: r""" :returns: a tuple containing the name and all aliases """ return (self.name, *self._aliasClass__alias) #pylint:disable = no-member def _allStringSum(self) -> str: r""" Adds and returns all the strings in members """ sumStr = self.name for s in self._aliasClass__alias: sumStr += s return sumStr def __repr__(self) -> str: r""" representation of the object is equal to ``repr(self.name)``. """ return repr(self.name) def __str__(self) -> str: r""" string representation of the object is its name """ return self.name def __eq__(self, other: Union["aliasClass", str]) -> bool: #pylint:disable=unsubscriptable-object r""" Equality of any two aliasClass objects (or an aliasClass object to a string) is determined by comparing their names and all their aliases (or to given string), if at least one of them are the same (or the same as the given string), aliasClass objects (or the aliasClass object and the given string) are considereed to be equal. Parameters ---------- other : Union[aliasClass, str] aliasClass object or string to check the equality with self """ if type(other) is type(self): return any(it in self._aliasClass__members() for it in other._aliasClass__members())#pylint:disable = no-member return any(it == other for it in self._aliasClass__members()) #pylint:disable = no-member def __hash__(self) -> int: r""" Hash value of an aliasClass object is equal to hash of its name. """ return hash(self.name) def keySearch(obj: Dict, k: Any) -> Hashable: r""" Method to find a key or any other obj equal to the key in a ``dictionary.keys()``. This method is used in :class:`~aliasDict` class (extending ``dict`` class) to find the actual key when using :class:`~aliasClass` as the key, which returns equal for a specific string (its name) or any other string in its list of aliases. Parameters ---------- obj : Dict The dictionary to search the key k : Any The key to search in the dictionary (obj) :returns: the key, if the key itself or no equality is found in the dictionary keys. returns the equal key from the dictionary, if an equal key is found in the dictionary. """ # NOTE this returns the first match, meaning there can be more than one equality. Example, two string keys in the # dictionary and the given key is an aliasClass object with these keys in its members (tuple of its name and # aliasess) if k not in obj.keys(): for key in obj.keys(): if k == key: k = key break return k class aliasDict(dict): r""" Extending the dictionary class to treat the keys satisfying ``key1 == keys2`` as the same key. This functionality is implemented to use :class:`~aliasClass` objects as keys and to get the value by using the aliasClass object itself, its name, or any of its aliases as the key. NOTE no explicit tests for most of the extended methods, be careful in modifications. """ def __getitem__(self, k: Hashable) -> Any: r""" Gets the value from the dictionary for a given key or any of the keys that is equal to the given key. This enables to get a value using an :class:`~aliasClass` object itself, its name, or any any of it aliases. """ k = keySearch(self, k) return super().__getitem__(k) def get(self, key: Hashable, default: Optional[Any] = None) -> Any: #pylint:disable=unsubscriptable-object r""" Modified get method to be compatible with extended :meth:`~__getitem__` method. """ try: return self.__getitem__(key) except: #pylint:disable=bare-except # noqa: E722 return default def __setitem__(self, k: Hashable, v: Any) -> None: r""" Updates the value of a key in the dictionary, if the given key exists or any of the keys is equal to given key, otherwise creates an item (ie key:value pair) in the dictionary. This enables to set a value using an :class:`~aliasClass` object itself, its name, or any any of it aliases. """ # might need to overwrite update and setdefault k = keySearch(self, k) super().__setitem__(k, v) def __delitem__(self, k: Hashable) -> None: r""" Deletes the item for a given key or any of the keys that is equal to the given key. This enables to delete a value using an :class:`~aliasClass` object itself, its name, or any any of it aliases. """ k = keySearch(self, k) super().__delitem__(k) def __contains__(self, o: Hashable) -> bool: r""" Returns ``True`` if the key or any object equal to the key exists. This enables to ``return True`` for an :class:`~aliasClass` object itself, its name, or any of it aliases. """ return super().__contains__(keySearch(self, o)) #any([o == k for k in self.keys()]) def update(self, mapping: Optional[Mapping] = (), **kwargs) -> None: #pylint:disable=unsubscriptable-object r""" update method compatible with the extended get/set methods. """ if hasattr(mapping, "keys"): for k in mapping: self[k] = mapping[k] else: for
no candidate found. The image cutout will be taken from this central area and so the "Image" tab will still have content. It will also be marked with a central red "X". The contour plot will still be produced from the cutout. .. figure:: figures/pick-contour-no-candidate.png :width: 400px :align: center :alt: Contour when no candidate found. Contour when no candidate found. All the other plots will be cleared. **The Settings Tab** .. figure:: figures/pick-settings.png :width: 400px :align: center :alt: Settings tab of Pick plugin "Settings" tab of ``Pick`` plugin. The "Settings" tab controls aspects of the search within the pick area: * The "Show Candidates" checkbox controls whether all detected sources are marked or not (as shown in the figure below). Additionally, if checked, then all the found objects are added to the pick log table when using the "Report" controls. * The "Draw type" parameter is used to choose the shape of the pick area to be drawn. * The "Radius" parameter sets the radius to be used when finding and evaluating bright peaks in the image. * The "Threshold" parameter is used to set a threshold for peak finding; if set to "None", then a reasonable default value will be chosen. * The "Min FWHM" and "Max FWHM" parameters can be used to eliminate certain sized objects from being candidates. * The "Ellipticity" parameter is used to eliminate candidates based on their asymmetry in shape. * The "Edge" parameter is used to eliminate candidates based on how close to the edge of the cutout they are. *NOTE: currently this works reliably only for non-rotated rectangular shapes.* * The "Max side" parameter is used to limit the size of the bounding box that can be used in the pick shape. Larger sizes take longer to evaluate. * The "Coordinate Base" parameter is an offset to apply to located sources. Set to "1" if you want sources pixel locations reported in a FITS-compliant manner and "0" if you prefer 0-based indexing. * The "Calc center" parameter is used to determine whether the center is calculated from FWHM fitting ("fwhm") or centroiding ("centroid"). * The "FWHM fitting" parameter is used to determine which function is is used for FWHM fitting ("gaussian" or "moffat"). The option to use "lorentz" is also available if "calc_fwhm_lib" is set to "astropy" in ``~/.ginga/plugin_Pick.cfg``. * The "Contour Interpolation" parameter is used to set the interpolation method used in rendering the background image in the "Contour" plot. * The "EE total radius" defines the radius (for encircled energy) and box half-width (for ensquared energy) in pixels where EE fraction is expected to be 1 (i.e., all the flux for a point-spread function is contained within). * The "EE sampling radius" is the radius in pixel used to sample the measured EE curves for reporting. The "Redo Pick" button will redo the search operation. It is convenient if you have changed some parameters and want to see the effect based on the current pick area without disturbing it. .. figure:: figures/pick-candidates.png :width: 600px :align: center :alt: The channel viewer when "Show Candidates" is checked. The channel viewer when "Show Candidates" is checked. **User Configuration** """ import threading import sys import traceback import time from collections import OrderedDict import numpy as np from ginga.gw import Widgets, Viewers from ginga.misc import Bunch from ginga.util import wcs, contour from ginga import GingaPlugin, cmap, trcalc try: from ginga.gw import Plot from ginga.util import plots have_mpl = True except ImportError: have_mpl = False region_default_width = 30 region_default_height = 30 __all__ = ['Pick'] class Pick(GingaPlugin.LocalPlugin): def __init__(self, fv, fitsimage): # superclass defines some variables for us, like logger super(Pick, self).__init__(fv, fitsimage) self.layertag = 'pick-canvas' self.pickimage = None self.pickcenter = None self.pick_qs = None self.pick_obj = None self._textlabel = 'Pick' self.contour_image = None self.contour_plot = None self.fwhm_plot = None self.radial_plot = None self.contour_interp_methods = trcalc.interpolation_methods # types of pick shapes that can be drawn self.drawtypes = ['box', 'squarebox', 'rectangle', 'circle', 'ellipse', 'freepolygon', 'polygon', ] # get Pick preferences prefs = self.fv.get_preferences() self.settings = prefs.create_category('plugin_Pick') self.settings.load(onError='silent') self.sync_preferences() self.pick_x1 = 0 self.pick_y1 = 0 self.pick_data = None self.pick_log = None self.dx = region_default_width self.dy = region_default_height # For offloading intensive calculation from graphics thread self.serialnum = 0 self.lock = threading.RLock() self.lock2 = threading.RLock() self.ev_intr = threading.Event() self._wd, self._ht = 400, 300 self._split_sizes = [self._ht, self._ht] self.last_rpt = [] self.rpt_dict = OrderedDict({}) self.rpt_cnt = 0 self.rpt_tbl = None self.rpt_mod_time = 0.0 self.rpt_wrt_time = 0.0 self.rpt_wrt_interval = self.settings.get('report_write_interval', 30.0) if self.iqcalc_lib == 'astropy': self.logger.debug('Using iqcalc_astropy') from ginga.util import iqcalc_astropy as iqcalc else: # Falls back to native self.logger.debug('Using native iqcalc') from ginga.util import iqcalc if not iqcalc.have_scipy: raise ImportError('Please install scipy to use this plugin') self.iqcalc = iqcalc.IQCalc(self.logger) self.copy_attrs = ['transforms', 'cutlevels'] if (self.settings.get('pick_cmap_name', None) is None and self.settings.get('pick_imap_name', None) is None): self.copy_attrs.append('rgbmap') self.dc = self.fv.get_draw_classes() canvas = self.dc.DrawingCanvas() canvas.enable_draw(True) canvas.enable_edit(True) canvas.set_drawtype(self.pickshape, color='cyan', linestyle='dash') canvas.set_callback('draw-event', self.draw_cb) canvas.set_callback('edit-event', self.edit_cb) canvas.add_draw_mode('move', down=self.btn_down, move=self.btn_drag, up=self.btn_up) canvas.register_for_cursor_drawing(self.fitsimage) canvas.set_surface(self.fitsimage) canvas.set_draw_mode('move') self.canvas = canvas self.have_mpl = have_mpl self.gui_up = False def sync_preferences(self): # Load various preferences self.pickcolor = self.settings.get('color_pick', 'green') self.pickshape = self.settings.get('shape_pick', 'box') if self.pickshape not in self.drawtypes: self.pickshape = 'box' self.candidate_color = self.settings.get('color_candidate', 'orange') self.quick_mode = self.settings.get('quick_mode', False) self.from_peak = self.settings.get('quick_from_peak', True) # Peak finding parameters and selection criteria self.max_side = self.settings.get('max_side', 1024) self.radius = self.settings.get('radius', 10) self.ee_total_radius = self.settings.get('ee_total_radius', 10.0) self.ee_sampling_radius = self.settings.get('ee_sampling_radius', 2.5) self.threshold = self.settings.get('threshold', None) self.min_fwhm = self.settings.get('min_fwhm', 1.5) self.max_fwhm = self.settings.get('max_fwhm', 50.0) self.min_ellipse = self.settings.get('min_ellipse', 0.5) self.edgew = self.settings.get('edge_width', 0.01) self.show_candidates = self.settings.get('show_candidates', False) # Report in 0- or 1-based coordinates coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0) self.pixel_coords_offset = self.settings.get('pixel_coords_offset', coord_offset) self.center_algs = ['fwhm', 'centroid'] self.center_alg = self.settings.get('calc_center_alg', 'fwhm') self.fwhm_algs = ['gaussian', 'moffat'] self.iqcalc_lib = self.settings.get('calc_fwhm_lib', 'native') if self.iqcalc_lib == 'astropy': self.fwhm_algs.append('lorentz') self.fwhm_alg = self.settings.get('calc_fwhm_alg', 'gaussian') self.center_on_pick = self.settings.get('center_on_pick', False) # For controls self.delta_bg = self.settings.get('delta_bg', 0.0) self.delta_sky = self.settings.get('delta_sky', 0.0) self.delta_bright = self.settings.get('delta_bright', 0.0) # Formatting for reports self.do_record = self.settings.get('record_picks', False) columns = [("RA", 'ra_txt'), ("DEC", 'dec_txt'), ("Equinox", 'equinox'), ("X", 'x'), ("Y", 'y'), ("FWHM", 'fwhm'), ("FWHM_X", 'fwhm_x'), ("FWHM_Y", 'fwhm_y'), ("EE_circ", 'encircled_energy'), ("EE_sq", 'ensquared_energy'), ("EE_r", 'ee_sampling_radius'), ("Star Size", 'starsize'), ("Ellip", 'ellipse'), ("Background", 'background'), ("Sky Level", 'skylevel'), ("Brightness", 'brightness'), ("Time Local", 'time_local'), ("Time UT", 'time_ut'), ("RA deg", 'ra_deg'), ("DEC deg", 'dec_deg'), ] self.rpt_columns = self.settings.get('report_columns', columns) # For contour plot self.num_contours = self.settings.get('num_contours', 8) self.contour_size_max = self.settings.get('contour_size_limit', 100) self.contour_size_min = self.settings.get('contour_size_min', 30) self.contour_interpolation = self.settings.get('contour_interpolation', 'nearest') def build_gui(self, container): vtop = Widgets.VBox() vtop.set_border_width(4) box, sw, orientation = Widgets.get_oriented_box(container, orientation=self.settings.get('orientation', None)) box.set_border_width(4) box.set_spacing(2) paned = Widgets.Splitter(orientation=orientation) self.w.splitter = paned nb = Widgets.TabWidget(tabpos='bottom') self.w.nb1 = nb paned.add_widget(Widgets.hadjust(nb, orientation)) cm, im = self.fv.cm, self.fv.im # Set up "Image" tab viewer di = Viewers.CanvasView(logger=self.logger) width, height = self._wd, self._ht di.set_desired_size(width, height) di.enable_autozoom('override') di.enable_autocuts('off') di.set_zoom_algorithm('rate') di.set_zoomrate(1.6) settings = di.get_settings() settings.get_setting('zoomlevel').add_callback('set', self.zoomset, di) cmname = self.settings.get('pick_cmap_name', None) if cmname is not None: di.set_color_map(cmname) else: di.set_cmap(cm) imname = self.settings.get('pick_imap_name', None) if imname is not None: di.set_intensity_map(imname) else: di.set_imap(im) di.set_callback('none-move', self.detailxy) di.set_bg(0.4, 0.4, 0.4) # for debugging di.set_name('pickimage') di.show_mode_indicator(True) self.pickimage = di bd = di.get_bindings() bd.enable_pan(True) bd.enable_zoom(True) bd.enable_cuts(True) bd.enable_cmap(True) di.set_desired_size(width, height) p_canvas = di.get_canvas() tag = p_canvas.add(self.dc.Point(width / 2, height / 2, 5, linewidth=1, color='red')) self.pickcenter = p_canvas.get_object_by_tag(tag) iw = Viewers.GingaViewerWidget(viewer=di) iw.resize(width, height) nb.add_widget(iw, title="Image") # Set up "Contour" tab viewer if contour.have_skimage: # Contour plot, Ginga-style ci = Viewers.CanvasView(logger=self.logger) width, height = 400, 300 ci.set_desired_size(width, height) ci.enable_autozoom('override') ci.enable_autocuts('override') ci.set_zoom_algorithm('rate') ci.set_zoomrate(1.6) ci.set_autocut_params('histogram') t_ = ci.get_settings() if self.contour_interpolation not in self.contour_interp_methods: self.contour_interpolation = 'basic' t_.set(interpolation=self.contour_interpolation) ci.set_bg(0.4, 0.4, 0.4) # for debugging ci.set_name('contour_image') self.contour_canvas = self.dc.DrawingCanvas() ci.get_canvas().add(self.contour_canvas) if cmap.has_cmap('RdYlGn_r'): ci.set_color_map('RdYlGn_r') else: ci.set_color_map('pastel') ci.show_color_bar(True) self.contour_image = ci bd = ci.get_bindings() bd.enable_pan(True) bd.enable_zoom(True) bd.enable_cuts(True) bd.enable_cmap(True) ci.set_desired_size(width, height) ci.show_mode_indicator(True) ciw = Viewers.GingaViewerWidget(viewer=ci) ciw.resize(width, height) nb.add_widget(ciw, title="Contour") if have_mpl: if not contour.have_skimage: # Contour plot self.contour_plot = plots.ContourPlot( logger=self.logger, width=width, height=height) self.contour_plot.add_axis(facecolor='black') pw = Plot.PlotWidget(self.contour_plot) pw.resize(width, height) self.contour_plot.enable(pan=True, zoom=True) self.contour_interp_methods = ('bilinear', 'nearest', 'bicubic') if self.contour_interpolation not in self.contour_interp_methods: self.contour_interpolation = 'nearest' self.contour_plot.interpolation = self.contour_interpolation nb.add_widget(pw, title="Contour") # FWHM gaussians plot self.fwhm_plot = plots.FWHMPlot(logger=self.logger, width=width, height=height) self.fwhm_plot.add_axis(facecolor='white') pw = Plot.PlotWidget(self.fwhm_plot) pw.resize(width, height) nb.add_widget(pw, title="FWHM") # Radial profile plot self.radial_plot = plots.RadialPlot(logger=self.logger, width=width, height=height) self.radial_plot.add_axis(facecolor='white') pw = Plot.PlotWidget(self.radial_plot) pw.resize(width, height) nb.add_widget(pw,
in channels])[sel] digital_min = np.array([float(fid.read(8).decode()) for ch in channels])[sel] digital_max = np.array([float(fid.read(8).decode()) for ch in channels])[sel] prefiltering = [fid.read(80).decode().strip(' \x00') for ch in channels][:-1] highpass = np.ravel([re.findall(r'HP:\s+(\w+)', filt) for filt in prefiltering]) lowpass = np.ravel([re.findall(r'LP:\s+(\w+)', filt) for filt in prefiltering]) # number of samples per record n_samps = np.array([int(fid.read(8).decode()) for ch in channels]) # Populate edf_info edf_info.update( ch_names=ch_names, data_offset=header_nbytes, digital_max=digital_max, digital_min=digital_min, highpass=highpass, sel=sel, lowpass=lowpass, meas_date=meas_date, n_records=n_records, n_samps=n_samps, nchan=nchan, subject_info=patient, physical_max=physical_max, physical_min=physical_min, record_length=record_length, subtype=subtype, tal_idx=tal_idx) fid.read(32 * nchan).decode() # reserved assert fid.tell() == header_nbytes fid.seek(0, 2) n_bytes = fid.tell() n_data_bytes = n_bytes - header_nbytes total_samps = (n_data_bytes // 3 if subtype == 'bdf' else n_data_bytes // 2) read_records = total_samps // np.sum(n_samps) if n_records != read_records: warn('Number of records from the header does not match the file ' 'size (perhaps the recording was not stopped before exiting).' ' Inferring from the file size.') edf_info['n_records'] = n_records = read_records if subtype == 'bdf': edf_info['dtype_byte'] = 3 # 24-bit (3 byte) integers edf_info['dtype_np'] = np.uint8 else: edf_info['dtype_byte'] = 2 # 16-bit (2 byte) integers edf_info['dtype_np'] = np.int16 return edf_info, orig_units def _read_gdf_header(fname, exclude): """Read GDF 1.x and GDF 2.x header info.""" edf_info = dict() events = None with open(fname, 'rb') as fid: version = fid.read(8).decode() gdftype_np = (None, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64, None, None, None, None, None, None, None, np.float32, np.float64) gdftype_byte = [np.dtype(x).itemsize if x is not None else 0 for x in gdftype_np] assert sum(gdftype_byte) == 42 edf_info['type'] = edf_info['subtype'] = version[:3] edf_info['number'] = float(version[4:]) meas_date = DATE_NONE # GDF 1.x # --------------------------------------------------------------------- if edf_info['number'] < 1.9: # patient ID pid = fid.read(80).decode('latin-1') pid = pid.split(' ', 2) patient = {} if len(pid) >= 2: patient['id'] = pid[0] patient['name'] = pid[1] # Recording ID meas_id = {} meas_id['recording_id'] = fid.read(80).decode().strip(' \x00') # date tm = fid.read(16).decode().strip(' \x00') try: if tm[14:16] == ' ': tm = tm[:14] + '00' + tm[16:] date = datetime.datetime(int(tm[0:4]), int(tm[4:6]), int(tm[6:8]), int(tm[8:10]), int(tm[10:12]), int(tm[12:14]), int(tm[14:16]) * pow(10, 4)) meas_date = (calendar.timegm(date.utctimetuple()), 0) except Exception: pass header_nbytes = np.fromfile(fid, np.int64, 1)[0] meas_id['equipment'] = np.fromfile(fid, np.uint8, 8)[0] meas_id['hospital'] = np.fromfile(fid, np.uint8, 8)[0] meas_id['technician'] = np.fromfile(fid, np.uint8, 8)[0] fid.seek(20, 1) # 20bytes reserved n_records = np.fromfile(fid, np.int64, 1)[0] # record length in seconds record_length = np.fromfile(fid, np.uint32, 2) if record_length[0] == 0: record_length[0] = 1. warn('Header information is incorrect for record length. ' 'Default record length set to 1.') nchan = np.fromfile(fid, np.uint32, 1)[0] channels = list(range(nchan)) ch_names = [fid.read(16).decode('latin-1').strip(' \x00') for ch in channels] fid.seek(80 * len(channels), 1) # transducer units = [fid.read(8).decode('latin-1').strip(' \x00') for ch in channels] exclude = _find_exclude_idx(ch_names, exclude) sel = list() for i, unit in enumerate(units): if unit[:2] == 'uV': units[i] = 1e-6 else: units[i] = 1 sel.append(i) ch_names = [ch_names[idx] for idx in sel] physical_min = np.fromfile(fid, np.float64, len(channels)) physical_max = np.fromfile(fid, np.float64, len(channels)) digital_min = np.fromfile(fid, np.int64, len(channels)) digital_max = np.fromfile(fid, np.int64, len(channels)) prefiltering = [fid.read(80).decode().strip(' \x00') for ch in channels][:-1] highpass = np.ravel([re.findall(r'HP:\s+(\w+)', filt) for filt in prefiltering]) lowpass = np.ravel([re.findall('LP:\\s+(\\w+)', filt) for filt in prefiltering]) # n samples per record n_samps = np.fromfile(fid, np.int32, len(channels)) # channel data type dtype = np.fromfile(fid, np.int32, len(channels)) # total number of bytes for data bytes_tot = np.sum([gdftype_byte[t] * n_samps[i] for i, t in enumerate(dtype)]) # Populate edf_info edf_info.update( bytes_tot=bytes_tot, ch_names=ch_names, data_offset=header_nbytes, digital_min=digital_min, digital_max=digital_max, dtype_byte=[gdftype_byte[t] for t in dtype], dtype_np=[gdftype_np[t] for t in dtype], exclude=exclude, highpass=highpass, sel=sel, lowpass=lowpass, meas_date=meas_date, meas_id=meas_id, n_records=n_records, n_samps=n_samps, nchan=nchan, subject_info=patient, physical_max=physical_max, physical_min=physical_min, record_length=record_length, units=units) fid.seek(32 * edf_info['nchan'], 1) # reserved assert fid.tell() == header_nbytes # Event table # ----------------------------------------------------------------- etp = header_nbytes + n_records * edf_info['bytes_tot'] # skip data to go to event table fid.seek(etp) etmode = np.fromfile(fid, np.uint8, 1)[0] if etmode in (1, 3): sr = np.fromfile(fid, np.uint8, 3) event_sr = sr[0] for i in range(1, len(sr)): event_sr = event_sr + sr[i] * 2 ** (i * 8) n_events = np.fromfile(fid, np.uint32, 1)[0] pos = np.fromfile(fid, np.uint32, n_events) - 1 # 1-based inds typ = np.fromfile(fid, np.uint16, n_events) if etmode == 3: chn = np.fromfile(fid, np.uint16, n_events) dur = np.fromfile(fid, np.uint32, n_events) else: chn = np.zeros(n_events, dtype=np.int32) dur = np.ones(n_events, dtype=np.uint32) np.clip(dur, 1, np.inf, out=dur) events = [n_events, pos, typ, chn, dur] # GDF 2.x # --------------------------------------------------------------------- else: # FIXED HEADER handedness = ('Unknown', 'Right', 'Left', 'Equal') gender = ('Unknown', 'Male', 'Female') scale = ('Unknown', 'No', 'Yes', 'Corrected') # date pid = fid.read(66).decode() pid = pid.split(' ', 2) patient = {} if len(pid) >= 2: patient['id'] = pid[0] patient['name'] = pid[1] fid.seek(10, 1) # 10bytes reserved # Smoking / Alcohol abuse / drug abuse / medication sadm = np.fromfile(fid, np.uint8, 1)[0] patient['smoking'] = scale[sadm % 4] patient['alcohol_abuse'] = scale[(sadm >> 2) % 4] patient['drug_abuse'] = scale[(sadm >> 4) % 4] patient['medication'] = scale[(sadm >> 6) % 4] patient['weight'] = np.fromfile(fid, np.uint8, 1)[0] if patient['weight'] == 0 or patient['weight'] == 255: patient['weight'] = None patient['height'] = np.fromfile(fid, np.uint8, 1)[0] if patient['height'] == 0 or patient['height'] == 255: patient['height'] = None # Gender / Handedness / Visual Impairment ghi = np.fromfile(fid, np.uint8, 1)[0] patient['sex'] = gender[ghi % 4] patient['handedness'] = handedness[(ghi >> 2) % 4] patient['visual'] = scale[(ghi >> 4) % 4] # Recording identification meas_id = {} meas_id['recording_id'] = fid.read(64).decode().strip(' \x00') vhsv = np.fromfile(fid, np.uint8, 4) loc = {} if vhsv[3] == 0: loc['vertpre'] = 10 * int(vhsv[0] >> 4) + int(vhsv[0] % 16) loc['horzpre'] = 10 * int(vhsv[1] >> 4) + int(vhsv[1] % 16) loc['size'] = 10 * int(vhsv[2] >> 4) + int(vhsv[2] % 16) else: loc['vertpre'] = 29 loc['horzpre'] = 29 loc['size'] = 29 loc['version'] = 0 loc['latitude'] = \ float(np.fromfile(fid, np.uint32, 1)[0]) / 3600000 loc['longitude'] = \ float(np.fromfile(fid, np.uint32, 1)[0]) / 3600000 loc['altitude'] = float(np.fromfile(fid, np.int32, 1)[0]) / 100 meas_id['loc'] = loc date = np.fromfile(fid, np.uint64, 1)[0] if date != 0: date = datetime.datetime(1, 1, 1) + \ datetime.timedelta(date * pow(2, -32) - 367) meas_date = (calendar.timegm(date.utctimetuple()), 0) birthday = np.fromfile(fid, np.uint64, 1).tolist()[0] if birthday == 0: birthday = datetime.datetime(1, 1, 1) else: birthday = (datetime.datetime(1, 1, 1) + datetime.timedelta(birthday * pow(2, -32) - 367)) patient['birthday'] = birthday if patient['birthday'] != datetime.datetime(1, 1, 1, 0, 0): today = datetime.datetime.today() patient['age'] = today.year - patient['birthday'].year today = today.replace(year=patient['birthday'].year) if today < patient['birthday']: patient['age'] -= 1 else: patient['age'] = None header_nbytes = np.fromfile(fid, np.uint16, 1)[0] * 256 fid.seek(6, 1) # 6 bytes reserved meas_id['equipment'] = np.fromfile(fid, np.uint8, 8) meas_id['ip'] = np.fromfile(fid, np.uint8, 6) patient['headsize'] = np.fromfile(fid, np.uint16, 3) patient['headsize'] = np.asarray(patient['headsize'], np.float32) patient['headsize'] = np.ma.masked_array( patient['headsize'], np.equal(patient['headsize'], 0), None).filled() ref = np.fromfile(fid, np.float32, 3) gnd = np.fromfile(fid, np.float32, 3) n_records = np.fromfile(fid, np.int64, 1)[0] # record length in seconds record_length = np.fromfile(fid, np.uint32, 2) if record_length[0] == 0: record_length[0] = 1. warn('Header information is incorrect for record length. ' 'Default record length set to 1.') nchan = np.fromfile(fid, np.uint16, 1)[0] fid.seek(2, 1) # 2bytes reserved # Channels (variable header) channels = list(range(nchan)) ch_names = [fid.read(16).decode().strip(' \x00') for ch in channels] exclude = _find_exclude_idx(ch_names, exclude) fid.seek(80 * len(channels), 1) # reserved space fid.seek(6 * len(channels), 1) # phys_dim, obsolete """The Physical Dimensions are encoded as int16, according to: - Units codes : https://sourceforge.net/p/biosig/svn/HEAD/tree/trunk/biosig/doc/units.csv - Decimal factors codes: https://sourceforge.net/p/biosig/svn/HEAD/tree/trunk/biosig/doc/DecimalFactors.txt """ # noqa units = np.fromfile(fid, np.uint16, len(channels)).tolist() unitcodes = np.array(units[:]) sel = list() for i, unit in enumerate(units): if unit == 4275: # microvolts units[i] = 1e-6 elif unit == 512: # dimensionless units[i] = 1 elif unit == 0: units[i] = 1 # unrecognized else: warn('Unsupported physical dimension for channel %d ' '(assuming dimensionless). Please contact the ' 'MNE-Python developers for support.' % i) units[i] = 1 sel.append(i) ch_names = [ch_names[idx] for idx in sel] physical_min = np.fromfile(fid, np.float64, len(channels)) physical_max = np.fromfile(fid, np.float64, len(channels)) digital_min = np.fromfile(fid, np.float64, len(channels)) digital_max = np.fromfile(fid, np.float64, len(channels)) fid.seek(68 * len(channels), 1) # obsolete lowpass = np.fromfile(fid, np.float32, len(channels)) highpass =
maximum number of bytes to read :return: The string read. """ if self._from_ssl is None: raise TypeError("Connection sock was not None") if not isinstance(bufsiz, int): raise TypeError("bufsiz must be an integer") buf = _no_zero_allocator("char[]", bufsiz) result = _lib.BIO_read(self._from_ssl, buf, bufsiz) if result <= 0: self._handle_bio_errors(self._from_ssl, result) return _ffi.buffer(buf, result)[:] def bio_write(self, buf): """ If the Connection was created with a memory BIO, this method can be used to add bytes to the read end of that memory BIO. The Connection can then read the bytes (for example, in response to a call to :meth:`recv`). :param buf: The string to put into the memory BIO. :return: The number of bytes written """ buf = _text_to_bytes_and_warn("buf", buf) if self._into_ssl is None: raise TypeError("Connection sock was not None") with _ffi.from_buffer(buf) as data: result = _lib.BIO_write(self._into_ssl, data, len(data)) if result <= 0: self._handle_bio_errors(self._into_ssl, result) return result def renegotiate(self): """ Renegotiate the session. :return: True if the renegotiation can be started, False otherwise :rtype: bool """ if not self.renegotiate_pending(): _openssl_assert(_lib.SSL_renegotiate(self._ssl) == 1) return True return False def do_handshake(self): """ Perform an SSL handshake (usually called after :meth:`renegotiate` or one of :meth:`set_accept_state` or :meth:`set_connect_state`). This can raise the same exceptions as :meth:`send` and :meth:`recv`. :return: None. """ result = _lib.SSL_do_handshake(self._ssl) self._raise_ssl_error(self._ssl, result) def renegotiate_pending(self): """ Check if there's a renegotiation in progress, it will return False once a renegotiation is finished. :return: Whether there's a renegotiation in progress :rtype: bool """ return _lib.SSL_renegotiate_pending(self._ssl) == 1 def total_renegotiations(self): """ Find out the total number of renegotiations. :return: The number of renegotiations. :rtype: int """ return _lib.SSL_total_renegotiations(self._ssl) def connect(self, addr): """ Call the :meth:`connect` method of the underlying socket and set up SSL on the socket, using the :class:`Context` object supplied to this :class:`Connection` object at creation. :param addr: A remote address :return: What the socket's connect method returns """ _lib.SSL_set_connect_state(self._ssl) return self._socket.connect(addr) def connect_ex(self, addr): """ Call the :meth:`connect_ex` method of the underlying socket and set up SSL on the socket, using the Context object supplied to this Connection object at creation. Note that if the :meth:`connect_ex` method of the socket doesn't return 0, SSL won't be initialized. :param addr: A remove address :return: What the socket's connect_ex method returns """ connect_ex = self._socket.connect_ex self.set_connect_state() return connect_ex(addr) def accept(self): """ Call the :meth:`accept` method of the underlying socket and set up SSL on the returned socket, using the Context object supplied to this :class:`Connection` object at creation. :return: A *(conn, addr)* pair where *conn* is the new :class:`Connection` object created, and *address* is as returned by the socket's :meth:`accept`. """ client, addr = self._socket.accept() conn = Connection(self._context, client) conn.set_accept_state() return (conn, addr) def DTLSv1_listen(self): """ Call the OpenSSL function DTLSv1_listen on this connection. See the OpenSSL manual for more details. :return: None """ # Possible future extension: return the BIO_ADDR in some form. bio_addr = _lib.BIO_ADDR_new() try: result = _lib.DTLSv1_listen(self._ssl, bio_addr) finally: _lib.BIO_ADDR_free(bio_addr) # DTLSv1_listen is weird. A zero return value means 'didn't find a # ClientHello with valid cookie, but keep trying'. So basically # WantReadError. But it doesn't work correctly with _raise_ssl_error. # So we raise it manually instead. if self._cookie_generate_helper is not None: self._cookie_generate_helper.raise_if_problem() if self._cookie_verify_helper is not None: self._cookie_verify_helper.raise_if_problem() if result == 0: raise WantReadError() if result < 0: self._raise_ssl_error(self._ssl, result) def bio_shutdown(self): """ If the Connection was created with a memory BIO, this method can be used to indicate that *end of file* has been reached on the read end of that memory BIO. :return: None """ if self._from_ssl is None: raise TypeError("Connection sock was not None") _lib.BIO_set_mem_eof_return(self._into_ssl, 0) def shutdown(self): """ Send the shutdown message to the Connection. :return: True if the shutdown completed successfully (i.e. both sides have sent closure alerts), False otherwise (in which case you call :meth:`recv` or :meth:`send` when the connection becomes readable/writeable). """ result = _lib.SSL_shutdown(self._ssl) if result < 0: self._raise_ssl_error(self._ssl, result) elif result > 0: return True else: return False def get_cipher_list(self): """ Retrieve the list of ciphers used by the Connection object. :return: A list of native cipher strings. """ ciphers = [] for i in count(): result = _lib.SSL_get_cipher_list(self._ssl, i) if result == _ffi.NULL: break ciphers.append(_ffi.string(result).decode("utf-8")) return ciphers def get_client_ca_list(self): """ Get CAs whose certificates are suggested for client authentication. :return: If this is a server connection, the list of certificate authorities that will be sent or has been sent to the client, as controlled by this :class:`Connection`'s :class:`Context`. If this is a client connection, the list will be empty until the connection with the server is established. .. versionadded:: 0.10 """ ca_names = _lib.SSL_get_client_CA_list(self._ssl) if ca_names == _ffi.NULL: # TODO: This is untested. return [] result = [] for i in range(_lib.sk_X509_NAME_num(ca_names)): name = _lib.sk_X509_NAME_value(ca_names, i) copy = _lib.X509_NAME_dup(name) _openssl_assert(copy != _ffi.NULL) pyname = X509Name.__new__(X509Name) pyname._name = _ffi.gc(copy, _lib.X509_NAME_free) result.append(pyname) return result def makefile(self, *args, **kwargs): """ The makefile() method is not implemented, since there is no dup semantics for SSL connections :raise: NotImplementedError """ raise NotImplementedError( "Cannot make file object of OpenSSL.SSL.Connection" ) def get_app_data(self): """ Retrieve application data as set by :meth:`set_app_data`. :return: The application data """ return self._app_data def set_app_data(self, data): """ Set application data :param data: The application data :return: None """ self._app_data = data def get_shutdown(self): """ Get the shutdown state of the Connection. :return: The shutdown state, a bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN. """ return _lib.SSL_get_shutdown(self._ssl) def set_shutdown(self, state): """ Set the shutdown state of the Connection. :param state: bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN. :return: None """ if not isinstance(state, int): raise TypeError("state must be an integer") _lib.SSL_set_shutdown(self._ssl, state) def get_state_string(self): """ Retrieve a verbose string detailing the state of the Connection. :return: A string representing the state :rtype: bytes """ return _ffi.string(_lib.SSL_state_string_long(self._ssl)) def server_random(self): """ Retrieve the random value used with the server hello message. :return: A string representing the state """ session = _lib.SSL_get_session(self._ssl) if session == _ffi.NULL: return None length = _lib.SSL_get_server_random(self._ssl, _ffi.NULL, 0) _openssl_assert(length > 0) outp = _no_zero_allocator("unsigned char[]", length) _lib.SSL_get_server_random(self._ssl, outp, length) return _ffi.buffer(outp, length)[:] def client_random(self): """ Retrieve the random value used with the client hello message. :return: A string representing the state """ session = _lib.SSL_get_session(self._ssl) if session == _ffi.NULL: return None length = _lib.SSL_get_client_random(self._ssl, _ffi.NULL, 0) _openssl_assert(length > 0) outp = _no_zero_allocator("unsigned char[]", length) _lib.SSL_get_client_random(self._ssl, outp, length) return _ffi.buffer(outp, length)[:] def master_key(self): """ Retrieve the value of the master key for this session. :return: A string representing the state """ session = _lib.SSL_get_session(self._ssl) if session == _ffi.NULL: return None length = _lib.SSL_SESSION_get_master_key(session, _ffi.NULL, 0) _openssl_assert(length > 0) outp = _no_zero_allocator("unsigned char[]", length) _lib.SSL_SESSION_get_master_key(session, outp, length) return _ffi.buffer(outp, length)[:] def export_keying_material(self, label, olen, context=None): """ Obtain keying material for application use. :param: label - a disambiguating label string as described in RFC 5705 :param: olen - the length of the exported key material in bytes :param: context - a per-association context value :return: the exported key material bytes or None """ outp = _no_zero_allocator("unsigned char[]", olen) context_buf = _ffi.NULL context_len = 0 use_context = 0 if context is not None: context_buf = context context_len = len(context) use_context = 1 success = _lib.SSL_export_keying_material( self._ssl, outp, olen, label, len(label), context_buf, context_len, use_context, ) _openssl_assert(success == 1) return _ffi.buffer(outp, olen)[:] def sock_shutdown(self, *args, **kwargs): """ Call the :meth:`shutdown` method of the underlying socket. See :manpage:`shutdown(2)`. :return: What the socket's shutdown() method returns """ return self._socket.shutdown(*args, **kwargs) def get_certificate(self): """ Retrieve the local certificate (if any) :return: The local certificate """ cert = _lib.SSL_get_certificate(self._ssl) if cert != _ffi.NULL: _lib.X509_up_ref(cert) return X509._from_raw_x509_ptr(cert) return None def get_peer_certificate(self): """ Retrieve the other side's certificate (if any) :return: The peer's certificate """ cert = _lib.SSL_get_peer_certificate(self._ssl) if cert != _ffi.NULL: return X509._from_raw_x509_ptr(cert) return None @staticmethod def _cert_stack_to_list(cert_stack): """ Internal helper to convert a STACK_OF(X509) to a list of X509 instances. """ result = [] for i in range(_lib.sk_X509_num(cert_stack)): cert
return self._columns def _get_attrs_simple(self): return ['title'] def _get_attrs_complex_list(self): return ['columns'] def get_column_by_id(self, id_): for col in self.columns: if col.id == id_: return col return None def add_column(self, column): """ Add a column to the table :param column: (Column instance) """ if not isinstance(column, Column): raise TypeError( "Got type {x}. Expected Column type.".format(x=type(column))) BaseReportElement.is_unique(self, column.id) self._columns.append(column) def append_data(self, column_index, item): """ This should be deprecated in favor of `add_data_by_column_id`. Append datum to a column by column index :param column_index: (int) Index into internal column list :param item: (float, str) data item. """ if column_index < len(self._columns): self._columns[column_index].values.append(item) else: raise IndexError( "Unable to find index {i} in columns.".format(i=column_index)) def add_data_by_column_id(self, column_id, value): """Add a value to column. :param column_id: (str) Column id :param value: (float, str, int) """ if column_id in [c.id for c in self.columns]: # _columns should really be a dict # self._columns[column_id].values.append(value) for column in self.columns: if column_id == column.id: column.values.append(value) else: raise KeyError("Unable to Column with id '{i}' to assign value {v}".format( i=column_id, v=value)) @staticmethod def merge(tables): table_id = tables[0].id table_title = tables[0].title column_ids = sorted([col.id for col in tables[0].columns]) col_collisions = {col_id: [] for col_id in column_ids} for table in tables: assert table.id == table_id assert table.title == table_title assert sorted([col.id for col in table.columns]) == column_ids for col in table.columns: col_collisions[col.id].append(col) columns = {} for col_id, cols in col_collisions.items(): assert len(cols) == len(tables) columns[col_id] = Column.merge(cols) # order by table[0]'s column order: columns = [columns[col.id] for col in tables[0].columns] return Table(table_id, table_title, columns=columns) def to_csv(self, file_name, delimiter=','): if len(self.columns) == 0: return "" for column in self.columns: if len(column.values) != len(self.columns[0].values): raise ValueError("Column lengths differ ({i} versus {j}".format( i=len(column.values), j=len(self.columns[0].values))) with open(file_name, "w") as csv_out: writer = csv.writer(csv_out, delimiter=delimiter, lineterminator="\n") writer.writerow([c.header for c in self.columns]) for i in range(len(self.columns[0].values)): writer.writerow([str(c.values[i]) for c in self.columns]) def to_columns_d(self): """ Returns the columns as a list of dicts """ items = [] if self.columns: for i in range(self.columns[0].nvalues): dx = {} for c in self.columns: dx[c.id] = c.values[i] items.append(dx) return items class Column(BaseReportElement): """ A column consists of an id, header, and list of values. """ def __init__(self, id_, header=None, values=()): """ :param id_: (str) :param header: (str, None) Header of Column. """ BaseReportElement.__init__(self, id_) self._id = id_ self._header = header self._values = list(values) def __repr__(self): _d = dict(k=self.__class__.__name__, i=self.id, h=self.header, n=self.nvalues) return "<{k} id:{i} header:{h} nvalues:{n} >".format(**_d) @property def id(self): return self._id @property def header(self): return self._header @property def nvalues(self): return len(self.values) @property def values(self): return self._values def _get_attrs_simple(self): return ['header', 'values'] def _get_attrs_complex_list(self): return [] @staticmethod def merge(columns): column_id = columns[0].id column_header = columns[0].header values = [] for col in columns: assert col.id == column_id assert col.header == column_header values.extend(col.values) return Column(column_id, column_header, values=values) class Report(BaseReportElement): """ A report is a container for attributes, plotGroups, and tables. It can be serialized to json. """ def __init__(self, id_, title=None, tables=(), attributes=(), plotgroups=(), dataset_uuids=(), uuid=None, tags=()): """ :param id_: (str) Should be a string that identifies the report, like 'adapter'. :param title: Display name of report Defaults to the Report+id if None (added in 0.3.9) :param tables: (list of table instances) :param attributes: (list of attribute instances) :param plotgroups: (list of plot group instances) :param dataset_uuids: list[string] DataSet uuids of files used to generate the report :param uuid: the unique identifier for the Report :param tags: a list of strings """ BaseReportElement.__init__(self, id_) self._attributes = [] self._plotgroups = [] self._tables = [] self.title = "Report {i}".format(i=self.id) if title is None else title # FIXME(mkocher)(2016-3-30) Add validation to make sure it's a well formed value # this needs to be required self.uuid = uuid if uuid is not None else str(U.uuid4()) if tables: for table in tables: self.add_table(table) if attributes: for attr in attributes: self.add_attribute(attr) if plotgroups: for plotgroup in plotgroups: self.add_plotgroup(plotgroup) # Datasets that self._dataset_uuids = dataset_uuids self.tags = tags @property def dataset_uuids(self): return self._dataset_uuids def add_attribute(self, attribute): """Add an attribute to the report :param attribute: (Attribute instance) """ if not isinstance(attribute, Attribute): TypeError("Got type {x}. Expected Attribute type.".format( x=type(attribute))) BaseReportElement.is_unique(self, attribute.id) self._attributes.append(attribute) def add_plotgroup(self, plotgroup): """ Add a plotgroup to the report """ if not isinstance(plotgroup, PlotGroup): TypeError("Got type {x}. Expected Attribute type.".format( x=type(plotgroup))) BaseReportElement.is_unique(self, plotgroup.id) self._plotgroups.append(plotgroup) def add_table(self, table): """ Add a table to the report """ BaseReportElement.is_unique(self, table.id) self._tables.append(table) def __repr__(self): t = ",".join(self.tags) if self.tags else "" _d = dict(k=self.__class__.__name__, i=self.id, n=self.title, a=len(self.attributes), p=len(self.plotGroups), t=len(self.tables), u=self.uuid, g=t) return "<{k} id:{i} title:{n} uuid:{u} nattributes:{a} nplot_groups:{p} ntables:{t} tags:{g} >".format(**_d) @property def attributes(self): return self._attributes @property def plotGroups(self): return self._plotgroups @property def tables(self): return self._tables def _get_attrs_simple(self): return [] def _get_attrs_complex_list(self): return ['attributes', 'plotGroups', 'tables'] def get_attribute_by_id(self, id_): """Get an attribute by id. The id should NOT contain the root report id :returns: (None, Attribute) Example: report.get_attribute_by_id('nmovies') *NOT* report.get_attribute_by_id('overview.nmovies') """ for attr in self.attributes: if attr.id == id_: return attr return None def get_table_by_id(self, id_): for table in self.tables: if table.id == id_: return table return None def get_plotgroup_by_id(self, id_): for pg in self.plotGroups: if pg.id == id_: return pg return None def to_dict(self, id_parts=None): _d = dict(v=pbcommand.get_version(), t=datetime.datetime.now().isoformat()) d = BaseReportElement.to_dict(self, id_parts=id_parts) d['_comment'] = "Generated with pbcommand version {v} at {t}".format(**_d) # Required in 1.0.0 of the spec d['uuid'] = self.uuid d['title'] = self.title d['version'] = PB_REPORT_SCHEMA_VERSION d['dataset_uuids'] = list(set(self.dataset_uuids)) d['tags'] = self.tags return d def to_json(self): """Return a json string of the report""" from pbcommand.schemas import validate_pbreport try: s = _to_json_with_decoder(self.to_dict()) # FIXME(mkocher)(2016-6-20) Enable schema validation # this needs to be processed by the decoder, then validate the # dict # _ = validate_pbreport(json.loads(s)) return s except TypeError as e: msg = "Unable to serialize report due to {e} \n".format(e=e) log.error(msg) log.error("Object: " + pformat(self.to_dict())) raise def write_json(self, file_name): """ Serialized the report to a json file. :param file_name: (str) Path to write output json file to. """ with open(file_name, 'w') as f: f.write(self.to_json()) # log.info("Wrote report {r}".format(r=file_name)) @staticmethod def from_simple_dict(report_id, raw_d, namespace): """ Generate a Report with populated attributes, starting from a flat dictionary (without namespace). """ attributes = [] for k, v in raw_d.items(): ns = "_".join([namespace, k.lower()]) # These can't be none for some reason if v is not None: a = Attribute(ns, v, name=k) attributes.append(a) else: warnings.warn("skipping null entry {k}->{v}".format(k=k, v=v)) return Report(report_id, attributes=attributes) @staticmethod def merge(reports): report_id = reports[0].id def _merge_attributes_d(attributes_list): attrs = OrderedDict() for ax in attributes_list: for a in ax: if a.id in attrs: attrs[a.id].append(a.value) else: attrs[a.id] = [a.value] return attrs def _merge_attributes_names(attributes_list): names = {} for ax in attributes_list: for a in ax: if a.id in names: assert names[a.id] == a.name else: names[a.id] = a.name return names def _attributes_to_table(attributes_list, table_id, title): attrs = _merge_attributes_d(attributes_list) labels = _merge_attributes_names(attributes_list) columns = [Column(k.lower(), header=labels[k], values=values) for k, values in attrs.items()] table = Table(table_id, title=title, columns=columns) return table def _sum_attributes(attributes_list): d = _merge_attributes_d(attributes_list) labels = _merge_attributes_names(attributes_list) return [Attribute(k, sum(values), name=labels[k]) for k, values in d.items()] def _merge_tables(tables): """Pass through singletons, Table.merge dupes""" id_collisions = defaultdict(list) merged = [] for tab in tables: id_collisions[tab.id].append(tab) for tabs in id_collisions.values(): if len(tabs) == 1: merged.append(tabs[0]) else: merged.append(Table.merge(tabs)) return merged attr_list = [] table_list = [] dataset_uuids = set() for report in reports: assert report.id == report_id attr_list.append(report.attributes) table_list.extend(report.tables) dataset_uuids.update(set(report.dataset_uuids)) table = _attributes_to_table(attr_list, 'chunk_metrics', "Chunk Metrics") tables = _merge_tables(table_list) tables.append(table) merged_attributes = _sum_attributes(attr_list) return Report(report_id, attributes=merged_attributes, tables=tables, dataset_uuids=sorted(list(dataset_uuids))) ######################################################################## # SPECIFICATION MODELS FS_RE = r"{([GMkp]{0,1})(:)([,]{0,1})([\.]{0,1})([0-9]*)([dfg]{1})}(.*)$" def validate_format(format_str): m = re.match(FS_RE, format_str) if m is None: raise ValueError("Format string '{s}' is uninterpretable".format( s=format_str)) return m def format_metric(format_str, value): """ Format a report metric (attribute or table column value) according to our in-house rules. These resemble Python format strings (plus optional suffix), but with the addition of optional scaling flags. """ if value is None: return "NA" elif format_str is None: return str(value) else: m = validate_format(format_str) if m.groups()[0] == 'p': value *= 100.0 elif m.groups()[0] == 'G': value /= 1000000000.0 elif m.groups()[0] == 'M':
""" This is a python client implementation of the STOMP protocol. It aims to be transport layer neutral. This module provides functions to create and parse STOMP messages in a programmatic fashion. The examples package contains two examples using twisted as the transport framework. Other frameworks can be used and I may add other examples as time goes on. The STOMP protocol specification maybe found here: * http://stomp.codehaus.org/Protocol I've looked at the stomp client by <NAME> and have based the message generation on how his client does it. The client can be found at the follow address however it isn't a dependancy. * http://www.briggs.net.nz/log/projects/stomppy In testing this library I run against ActiveMQ project. The server runs in java, however its fairly standalone and easy to set up. The projects page is here: * http://activemq.apache.org/ (c) <NAME>, 2007-07-26. License: http://www.apache.org/licenses/LICENSE-2.0 """ from __future__ import absolute_import from builtins import object import re import uuid import types import logging from . import utils from . import stompbuffer # This is used as a return from message responses functions. # It is used more for readability more then anything or reason. NO_RESPONSE_NEEDED = '' # For backwards compatibility NO_REPONSE_NEEDED = '' # The version of the protocol we implement. STOMP_VERSION = '1.0' # Message terminator: NULL = '\x00' # STOMP Spec v1.0 valid commands: VALID_COMMANDS = [ 'ABORT', 'ACK', 'BEGIN', 'COMMIT', 'CONNECT', 'CONNECTED', 'DISCONNECT', 'MESSAGE', 'SEND', 'SUBSCRIBE', 'UNSUBSCRIBE', 'RECEIPT', 'ERROR', ] try: stringTypes = (str, unicode) except NameError: stringTypes = (str,) def get_log(): return logging.getLogger("stomper") class FrameError(Exception): """Raise for problem with frame generation or parsing. """ class Frame(object): """This class is used to create or read STOMP message frames. The method pack() is used to create a STOMP message ready for transmission. The method unpack() is used to read a STOMP message into a frame instance. It uses the unpack_frame(...) function to do the initial parsing. The frame has three important member variables: * cmd * headers * body The 'cmd' is a property that represents the STOMP message command. When you assign this a check is done to make sure its one of the VALID_COMMANDS. If not then FrameError will be raised. The 'headers' is a dictionary which the user can added to if needed. There are no restrictions or checks imposed on what values are inserted. The 'body' is just a member variable that the body text is assigned to. """ def __init__(self): """Setup the internal state.""" self._cmd = '' self.body = '' self.headers = {} def getCmd(self): """Don't use _cmd directly!""" return self._cmd def setCmd(self, cmd): """Check the cmd is valid, FrameError will be raised if its not.""" cmd = cmd.upper() if cmd not in VALID_COMMANDS: raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % ( cmd, VALID_COMMANDS, STOMP_VERSION) ) else: self._cmd = cmd cmd = property(getCmd, setCmd) def pack(self): """Called to create a STOMP message from the internal values. """ headers = ''.join( ['%s:%s\n' % (f, v) for f, v in sorted(self.headers.items())] ) stomp_message = "%s\n%s\n%s%s\n" % (self._cmd, headers, self.body, NULL) # import pprint # print "stomp_message: ", pprint.pprint(stomp_message) return stomp_message def unpack(self, message): """Called to extract a STOMP message into this instance. message: This is a text string representing a valid STOMP (v1.0) message. This method uses unpack_frame(...) to extract the information, before it is assigned internally. retuned: The result of the unpack_frame(...) call. """ if not message: raise FrameError("Unpack error! The given message isn't valid '%s'!" % message) msg = unpack_frame(message) self.cmd = msg['cmd'] self.headers = msg['headers'] # Assign directly as the message will have the null # character in the message already. self.body = msg['body'] return msg def unpack_frame(message): """Called to unpack a STOMP message into a dictionary. returned = { # STOMP Command: 'cmd' : '...', # Headers e.g. 'headers' : { 'destination' : 'xyz', 'message-id' : 'some event', : etc, } # Body: 'body' : '...1234...\x00', } """ body = [] returned = dict(cmd='', headers={}, body='') breakdown = message.split('\n') # Get the message command: returned['cmd'] = breakdown[0] breakdown = breakdown[1:] def headD(field): # find the first ':' everything to the left of this is a # header, everything to the right is data: index = field.find(':') if index: header = field[:index].strip() data = field[index+1:].strip() # print "header '%s' data '%s'" % (header, data) returned['headers'][header.strip()] = data.strip() def bodyD(field): field = field.strip() if field: body.append(field) # Recover the header fields and body data handler = headD for field in breakdown: # print "field:", field if field.strip() == '': # End of headers, it body data next. handler = bodyD continue handler(field) # Stich the body data together: # print "1. body: ", body body = "".join(body) returned['body'] = body.replace('\x00', '') # print "2. body: <%s>" % returned['body'] return returned def abort(transactionid): """STOMP abort transaction command. Rollback whatever actions in this transaction. transactionid: This is the id that all actions in this transaction. """ return "ABORT\ntransaction: %s\n\n\x00\n" % transactionid def ack(messageid, transactionid=None): """STOMP acknowledge command. Acknowledge receipt of a specific message from the server. messageid: This is the id of the message we are acknowledging, what else could it be? ;) transactionid: This is the id that all actions in this transaction will have. If this is not given then a random UUID will be generated for this. """ header = 'message-id: %s' % messageid if transactionid: header = 'message-id: %s\ntransaction: %s' % (messageid, transactionid) return "ACK\n%s\n\n\x00\n" % header def begin(transactionid=None): """STOMP begin command. Start a transaction... transactionid: This is the id that all actions in this transaction will have. If this is not given then a random UUID will be generated for this. """ if not transactionid: # Generate a random UUID: transactionid = uuid.uuid4() return "BEGIN\ntransaction: %s\n\n\x00\n" % transactionid def commit(transactionid): """STOMP commit command. Do whatever is required to make the series of actions permanent for this transactionid. transactionid: This is the id that all actions in this transaction. """ return "COMMIT\ntransaction: %s\n\n\x00\n" % transactionid def connect(username, password): """STOMP connect command. username, password: These are the needed auth details to connect to the message server. After sending this we will receive a CONNECTED message which will contain our session id. """ return "CONNECT\nlogin:%s\npasscode:%s\n\n\x00\n" % (username, password) def disconnect(): """STOMP disconnect command. Tell the server we finished and we'll be closing the socket soon. """ return "DISCONNECT\n\n\x00\n" def send(dest, msg, transactionid=None): """STOMP send command. dest: This is the channel we wish to subscribe to msg: This is the message body to be sent. transactionid: This is an optional field and is not needed by default. """ transheader = '' if transactionid: transheader = 'transaction: %s\n' % transactionid return "SEND\ndestination: %s\n%s\n%s\x00\n" % (dest, transheader, msg) def subscribe(dest, ack='auto'): """STOMP subscribe command. dest: This is the channel we wish to subscribe to ack: 'auto' | 'client' If the ack is set to client, then messages received will have to have an acknowledge as a reply. Otherwise the server will assume delivery failure. """ return "SUBSCRIBE\ndestination: %s\nack: %s\n\n\x00\n" % (dest, ack) def unsubscribe(dest): """STOMP unsubscribe command. dest: This is the channel we wish to subscribe to Tell the server we no longer wish to receive any further messages for the given subscription. """ return "UNSUBSCRIBE\ndestination:%s\n\n\x00\n" % dest class Engine(object): """This is a simple state machine to return a response to received message if needed. """ def __init__(self, testing=False): self.testing = testing self.log = logging.getLogger("stomper.Engine") self.sessionId = '' # Entry Format: # # COMMAND : Handler_Function # self.states = { 'CONNECTED' : self.connected, 'MESSAGE' : self.ack, 'ERROR' : self.error, 'RECEIPT' : self.receipt, } def react(self, msg): """Called to provide a response to a message if needed. msg: This is a dictionary as returned by unpack_frame(...) or it can be a straight STOMP message. This function will attempt to determine which an deal with it. returned: A message to return or an empty string. """ returned = "" # If its not a string assume its a dict. mtype = type(msg) if mtype in stringTypes: msg = unpack_frame(msg) elif mtype == dict: pass else: raise FrameError("Unknown message type '%s', I don't know
<gh_stars>0 # # Copyright (C) EMC Corporation. All rights reserved. # # Module Name: # # create_dir_open_additional.py # # Abstract: # # Directory open tests: Directories which are created on the share have to be deleted before test execution otherwise # it will result in STATUS_OBJECT_NAME_COLLISION. # # Authors: <NAME> (<EMAIL>) # from pike.smb2 import * import pike.test import utils import unittest import pike.model class directory_open(pike.test.PikeTest): def test_01_open_directory_with_file_list_directory(self): try: print "\n--------------------Open_Directory_TC 01 --------------------" print "Test case to list the contents of the directory with FILE_LIST_DIRECTORY permission.\n" expected_status = "STATUS_SUCCESS" print "Expected status: ", expected_status print "Creating session and tree connect..." chan, tree = self.tree_connect() print "Session setup and Tree connect is successful." print "Open a directory named Directory_open1 with FILE_LIST_DIRECTORY permission:" directory_handle =chan.create(tree,"Directory_open1",access = pike.smb2.FILE_LIST_DIRECTORY|pike.smb2.FILE_ADD_FILE,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Directory created." print "Create a normal file inside Directory_open1 with the name Child_file:" file_handle = chan.create(tree,"Directory_open1\Child_file",access = pike.smb2.FILE_READ_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE ).result() print "File created inside Directory_open1 directory." print "Close the file handle:" chan.close(file_handle) print "Child_file handle closed." print "Verify whether Child_file is present inside the Directory_open1 directory:" names = map(lambda res: res.file_name,chan.query_directory(directory_handle)) self.assertIn("Child_file", names) print "Listing the contents of the directory completed" actual_status = "STATUS_SUCCESS" print "Close the directory handle:" chan.close(directory_handle) print "Directory_open1 handle closed." except Exception as e: actual_status = str(e) print "Actual status: ",actual_status self.assertIn(expected_status,actual_status) print "TC 01 Passed" def test_02_open_directory_without_file_list_directory(self): try: print "\n--------------------Open_Directory_TC 02 --------------------" print "Test case to list the contents of the directory without FILE_LIST_DIRECTORY permission.\n" expected_status = "STATUS_ACCESS_DENIED" print "Expected status: ", expected_status print "Creating session and tree connect..." chan, tree = self.tree_connect() print "Session setup and Tree connect is successful." print "Open a directory named Directory_open2 without FILE_LIST_DIRECTORY permission:" directory_handle =chan.create(tree,"Directory_open2",access = pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Directory created." print "Create a normal file inside Directory_open2 with the name Child_file:" file_handle = chan.create(tree,"Directory_open2\Child_file",access = pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE ).result() print "File created inside Directory_open2 directory." print "Close the file handle:" chan.close(file_handle) print "Child_file handle closed." print "Verify whether Child_file is present inside the Directory_open2 directory:" names = map(lambda res: res.file_name,chan.query_directory(directory_handle)) self.assertIn("Child_file", names) print "Listing the contents of the directory completed" actual_status = "STATUS_SUCCESS" print "Close the directory handle:" chan.close(directory_handle) print "Directory_open2 handle closed." except Exception as e: actual_status = str(e) print "Actual status: ",actual_status self.assertIn(expected_status,actual_status) print "TC 02 Passed" def test_03_open_Child_directory_without_file_list_directory(self): try: print "\n--------------------Open_Directory_TC 03 --------------------" print "Test case to list the contents of the child directory without FILE_LIST_DIRECTORY permission\n." expected_status = "STATUS_ACCESS_DENIED" print "Expected status: ", expected_status print "Creating session and tree connect..." chan, tree = self.tree_connect() print "Session setup and Tree connect is successful." print "Create a directory named Directory_open3 with FILE_LIST_DIRECTORY permission:" directory_handle = chan.create(tree,"Directory_open3",access = pike.smb2.FILE_LIST_DIRECTORY,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Directory created." print "Create child directory inside Directory_open3, named Child_directory without FILE_LIST_DIRECTORY permission:" child_handle = chan.create(tree,"Directory_open3\Child_directory",access = pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Child_Directory created inside Directory_open3 directory." print "Create a normal file inside Directory_open3\Child_directory named Child_file:" file_handle = chan.create(tree,"Directory_open3\Child_directory\Child_file",access = pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE ).result() print "File created inside Child_directory." print "Close the file handle of Child_file:" chan.close(file_handle) print "Child_file handle closed." print "Verify whether listing the contents of Child_directory:" names = map(lambda res: res.file_name,chan.query_directory(child_handle)) self.assertIn("Child_directory", names) print "Listing the contents of the directory completed" actual_status = "STATUS_SUCCESS" print "Close the child directory handle:" chan.close(child_handle) print "Child_directory handle closed." print "Close the directory handle:" chan.close(directory_handle) print "Directory_open3 handle closed." except Exception as e: actual_status = str(e) print "Actual status: ",actual_status self.assertIn(expected_status,actual_status) print "TC 03 Passed" def test_04_open_directory_with_file_add_file(self): try: print "\n--------------------Open_Directory_TC 04 --------------------" print "Test case to open a directory with FILE_ADD_FILE in desired access, create a file and list the contents of directory\n." expected_status = "STATUS_SUCCESS" print "Expected status: ", expected_status print "Creating session and tree connect..." chan, tree = self.tree_connect() print "Session setup and Tree connect is successful." print "Open a directory named Directory_open4 with FILE_ADD_FILE permission:" directory_handle =chan.create(tree,"Directory_open4",access = pike.smb2.FILE_LIST_DIRECTORY|pike.smb2.FILE_ADD_FILE,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Directory created." print "Create a normal file inside Directory_open4 named Child_file:" file_handle = chan.create(tree,"Directory_open4\Child_file",access = pike.smb2.FILE_READ_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE ).result() print "File created inside Directory_open4 directory." print "Close the file handle:" chan.close(file_handle) print "Child_file handle closed." print "Verify whether Child_file is present inside the Directory_open4 directory:" names = map(lambda res: res.file_name,chan.query_directory(directory_handle)) self.assertIn("Child_file", names) print "Listing the contents of the directory completed" actual_status = "STATUS_SUCCESS" print "Close the directory handle:" chan.close(directory_handle) print "Directory_open4 handle closed." except Exception as e: actual_status = str(e) print "Actual status: ",actual_status self.assertIn(expected_status,actual_status) print "TC 04 Passed" def test_05_open_directory_without_file_add_file(self): try: print "\n--------------------Open_Directory_TC 05 --------------------" print "Test case to open a directory without FILE_ADD_FILE in desired access, create a file and list the contents of directory\n." expected_status = "STATUS_SUCCESS" print "Expected status: ", expected_status print "Creating session and tree connect..." chan, tree = self.tree_connect() print "Session setup and Tree connect is successful." print "Open a directory named Directory_open5 without FILE_ADD_FILE permission:" directory_handle =chan.create(tree,"Directory_open5",access = pike.smb2.FILE_LIST_DIRECTORY|pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Directory created." print "Create a normal file inside Directory_open5 named Child_file:" file_handle = chan.create(tree,"Directory_open5\Child_file",access = pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE ).result() print "File created inside Directory_open6 directory." print "Close the file handle:" chan.close(file_handle) print "Child_file handle closed." print "Verify whether Child_file is present inside the Directory_open5 directory:" names = map(lambda res: res.file_name,chan.query_directory(directory_handle)) self.assertIn("Child_file", names) print "Listing the contents of the directory completed" actual_status = "STATUS_SUCCESS" print "Close the directory handle:" chan.close(directory_handle) print "Directory_open5 handle closed." except Exception as e: actual_status = str(e) print "Actual status: ",actual_status self.assertIn(expected_status,actual_status) print "TC 05 Passed" def test_06_open_directory_without_read_attributes(self): try: print "\n--------------------Open_Directory_TC 06 --------------------" print "Test case to open a directory to read the attributes without FILE_READ_ATTRIBUTES permission." expected_status = "STATUS_ACCESS_DENIED" print "Expected status for this test: ",expected_status print "Creating session and tree connect..." chan, tree = self.tree_connect() print "Session setup and Tree connect is successful." print "Create a directory named Directory_open6 with FILE_WRITE_ATTRIBUTES permission:" directory_handle =chan.create(tree,"Directory_open6",access = pike.smb2.FILE_WRITE_ATTRIBUTES,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Directory created." print "Reading attributes on the directory:" info = chan.query_file_info(directory_handle,pike.smb2.FILE_ALL_INFORMATION) print "Reading attributes of the directory completed" actual_status = "STATUS_SUCCESS" print "Close the directory handle:" chan.close(directory_handle) print "Directory_open6 handle closed." except Exception as e: actual_status = str(e) print "Actual status: ",actual_status self.assertIn(expected_status,actual_status) print "TC 06 Passed" def test_07_open_directory_without_write_attributes(self): try: print "\n--------------------Open_Directory_TC 07 --------------------" print "Test case to open a directory to write the attributes that does not have FILE_WRITE_ATTRIBUTES permission set." expected_status = "STATUS_ACCESS_DENIED" print "Expected status for this test: ",expected_status print "Creating session and tree connect..." chan, tree = self.tree_connect() print "Session setup and Tree connect is successful." print "Create a directory named Directory_open7 without FILE_WRITE_ATTRIBUTES permission:" directory_handle =chan.create(tree,"Directory_open7",access = pike.smb2.FILE_READ_ATTRIBUTES,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Directory created." print "Reading attributes on the directory before changing them:" info = chan.query_file_info(directory_handle,pike.smb2.FILE_ALL_INFORMATION) print "Reading attributes of the directory completed" attribute_before = info.basic_information.file_attributes print "Directory FILE_ATTRIBUTE before changing : ",attribute_before print "Change the attributes of the directory:" with chan.set_file_info(directory_handle, pike.smb2.FileBasicInformation) as directory_info: directory_info.file_attributes = pike.smb2.FILE_ATTRIBUTE_READONLY print "Changing attributes of the directory completed." print "Query attributes on the directory after changing them:" info1 = chan.query_file_info(directory_handle,pike.smb2.FILE_ALL_INFORMATION) print "Querying attributes of the directory completed" attribute_after = info1.basic_information.file_attributes print "Directory FILE_ATTRIBUTE after changing : ",attribute_after actual_status = "STATUS_SUCCESS" print "Close the directory handle:" chan.close(directory_handle) print "Directory_open7 handle closed." except Exception as e: actual_status = str(e) print "Actual status: ",actual_status self.assertIn(expected_status,actual_status) print "TC 07 Passed" def test_08_open_directory_with_GENERIC_ALL(self): try: print "\n--------------------Open_Directory_TC 08 --------------------" print "Test case to open a directory with GENERIC_ALL permission and try related operations." expected_status = "STATUS_SUCCESS" print "Expected status: ", expected_status print "Creating session and tree connect..." chan, tree = self.tree_connect() print "Session setup and Tree connect is successful." print "Create a directory named Directory_open8 with GENERIC_ALL permission:" directory_handle =chan.create(tree,"Directory_open8",access = pike.smb2.GENERIC_ALL,disposition = pike.smb2.FILE_OPEN_IF,options=pike.smb2.FILE_DIRECTORY_FILE ).result() print "Directory created." print "Create a file inside Directory_open10 directory:" file_handle = chan.create(tree,"Directory_open8\Child_file1",access = pike.smb2.FILE_WRITE_DATA,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE,share=pike.smb2.FILE_SHARE_DELETE ).result() print "File created inside Directory_open8 directory." print "Close the file handle:" chan.close(file_handle) print "Child_file1 file handle closed." print "Verifying FILE_ADD_FILE on Directory_open8 directory:" file_add_file =chan.create(tree,"Directory_open8\Child_file2",access = pike.smb2.GENERIC_READ,disposition = pike.smb2.FILE_CREATE,options=pike.smb2.FILE_NON_DIRECTORY_FILE ).result() print "File created inside Directory_open8 directory." print "Close the Child_file2 file handle:"
return sage_aven, sage_aven, @card("Syphon Mind") def syphon_mind(card, abilities): def syphon_mind(): return AbilityNotImplemented return syphon_mind, @card("Mistform Mutant") def mistform_mutant(card, abilities): def mistform_mutant(): return AbilityNotImplemented return mistform_mutant, @card("Gravel Slinger") def gravel_slinger(card, abilities): def gravel_slinger(): return AbilityNotImplemented def gravel_slinger(): return AbilityNotImplemented return gravel_slinger, gravel_slinger, @card("Threaten") def threaten(card, abilities): def threaten(): return AbilityNotImplemented return threaten, @card("Prowling Pangolin") def prowling_pangolin(card, abilities): def prowling_pangolin(): return AbilityNotImplemented return prowling_pangolin, @card("Righteous Cause") def righteous_cause(card, abilities): def righteous_cause(): return AbilityNotImplemented return righteous_cause, @card("Endemic Plague") def endemic_plague(card, abilities): def endemic_plague(): return AbilityNotImplemented def endemic_plague(): return AbilityNotImplemented return endemic_plague, endemic_plague, @card("Feeding Frenzy") def feeding_frenzy(card, abilities): def feeding_frenzy(): return AbilityNotImplemented return feeding_frenzy, @card("Haunted Cadaver") def haunted_cadaver(card, abilities): def haunted_cadaver(): return AbilityNotImplemented def haunted_cadaver(): return AbilityNotImplemented return haunted_cadaver, haunted_cadaver, @card("Insurrection") def insurrection(card, abilities): def insurrection(): return AbilityNotImplemented return insurrection, @card("Overwhelming Instinct") def overwhelming_instinct(card, abilities): def overwhelming_instinct(): return AbilityNotImplemented return overwhelming_instinct, @card("Shade's Breath") def shades_breath(card, abilities): def shades_breath(): return AbilityNotImplemented return shades_breath, @card("Crowd Favorites") def crowd_favorites(card, abilities): def crowd_favorites(): return AbilityNotImplemented def crowd_favorites(): return AbilityNotImplemented return crowd_favorites, crowd_favorites, @card("Graxiplon") def graxiplon(card, abilities): def graxiplon(): return AbilityNotImplemented return graxiplon, @card("Withering Hex") def withering_hex(card, abilities): def withering_hex(): return AbilityNotImplemented def withering_hex(): return AbilityNotImplemented def withering_hex(): return AbilityNotImplemented return withering_hex, withering_hex, withering_hex, @card("Airdrop Condor") def airdrop_condor(card, abilities): def airdrop_condor(): return AbilityNotImplemented def airdrop_condor(): return AbilityNotImplemented return airdrop_condor, airdrop_condor, @card("Gustcloak Skirmisher") def gustcloak_skirmisher(card, abilities): def gustcloak_skirmisher(): return AbilityNotImplemented def gustcloak_skirmisher(): return AbilityNotImplemented return gustcloak_skirmisher, gustcloak_skirmisher, @card("Disruptive Pitmage") def disruptive_pitmage(card, abilities): def disruptive_pitmage(): return AbilityNotImplemented def disruptive_pitmage(): return AbilityNotImplemented return disruptive_pitmage, disruptive_pitmage, @card("Symbiotic Wurm") def symbiotic_wurm(card, abilities): def symbiotic_wurm(): return AbilityNotImplemented return symbiotic_wurm, @card("Crown of Awe") def crown_of_awe(card, abilities): def crown_of_awe(): return AbilityNotImplemented def crown_of_awe(): return AbilityNotImplemented def crown_of_awe(): return AbilityNotImplemented return crown_of_awe, crown_of_awe, crown_of_awe, @card("Goblin Piledriver") def goblin_piledriver(card, abilities): def goblin_piledriver(): return AbilityNotImplemented def goblin_piledriver(): return AbilityNotImplemented return goblin_piledriver, goblin_piledriver, @card("Ascending Aven") def ascending_aven(card, abilities): def ascending_aven(): return AbilityNotImplemented def ascending_aven(): return AbilityNotImplemented def ascending_aven(): return AbilityNotImplemented return ascending_aven, ascending_aven, ascending_aven, @card("Unified Strike") def unified_strike(card, abilities): def unified_strike(): return AbilityNotImplemented return unified_strike, @card("Goblin Taskmaster") def goblin_taskmaster(card, abilities): def goblin_taskmaster(): return AbilityNotImplemented def goblin_taskmaster(): return AbilityNotImplemented return goblin_taskmaster, goblin_taskmaster, @card("Ixidor's Will") def ixidors_will(card, abilities): def ixidors_will(): return AbilityNotImplemented return ixidors_will, @card("Mistform Dreamer") def mistform_dreamer(card, abilities): def mistform_dreamer(): return AbilityNotImplemented def mistform_dreamer(): return AbilityNotImplemented return mistform_dreamer, mistform_dreamer, @card("<NAME>") def krosan_tusker(card, abilities): def krosan_tusker(): return AbilityNotImplemented def krosan_tusker(): return AbilityNotImplemented return krosan_tusker, krosan_tusker, @card("Centaur Glade") def centaur_glade(card, abilities): def centaur_glade(): return AbilityNotImplemented return centaur_glade, @card("Akroma's Blessing") def akromas_blessing(card, abilities): def akromas_blessing(): return AbilityNotImplemented def akromas_blessing(): return AbilityNotImplemented return akromas_blessing, akromas_blessing, @card("Leery Fogbeast") def leery_fogbeast(card, abilities): def leery_fogbeast(): return AbilityNotImplemented return leery_fogbeast, @card("Glarecaster") def glarecaster(card, abilities): def glarecaster(): return AbilityNotImplemented def glarecaster(): return AbilityNotImplemented return glarecaster, glarecaster, @card("Gustcloak Harrier") def gustcloak_harrier(card, abilities): def gustcloak_harrier(): return AbilityNotImplemented def gustcloak_harrier(): return AbilityNotImplemented return gustcloak_harrier, gustcloak_harrier, @card("Grassland Crusader") def grassland_crusader(card, abilities): def grassland_crusader(): return AbilityNotImplemented return grassland_crusader, @card("Barkhide Mauler") def barkhide_mauler(card, abilities): def barkhide_mauler(): return AbilityNotImplemented return barkhide_mauler, @card("Doubtless One") def doubtless_one(card, abilities): def doubtless_one(): return AbilityNotImplemented def doubtless_one(): return AbilityNotImplemented return doubtless_one, doubtless_one, @card("Riptide Chronologist") def riptide_chronologist(card, abilities): def riptide_chronologist(): return AbilityNotImplemented return riptide_chronologist, @card("Mistform Shrieker") def mistform_shrieker(card, abilities): def mistform_shrieker(): return AbilityNotImplemented def mistform_shrieker(): return AbilityNotImplemented def mistform_shrieker(): return AbilityNotImplemented return mistform_shrieker, mistform_shrieker, mistform_shrieker, @card("Screaming Seahawk") def screaming_seahawk(card, abilities): def screaming_seahawk(): return AbilityNotImplemented def screaming_seahawk(): return AbilityNotImplemented return screaming_seahawk, screaming_seahawk, @card("Ironfist Crusher") def ironfist_crusher(card, abilities): def ironfist_crusher(): return AbilityNotImplemented def ironfist_crusher(): return AbilityNotImplemented return ironfist_crusher, ironfist_crusher, @card("Erratic Explosion") def erratic_explosion(card, abilities): def erratic_explosion(): return AbilityNotImplemented return erratic_explosion, @card("Cruel Revival") def cruel_revival(card, abilities): def cruel_revival(): return AbilityNotImplemented return cruel_revival, @card("Shaleskin Bruiser") def shaleskin_bruiser(card, abilities): def shaleskin_bruiser(): return AbilityNotImplemented def shaleskin_bruiser(): return AbilityNotImplemented return shaleskin_bruiser, shaleskin_bruiser, @card("Silklash Spider") def silklash_spider(card, abilities): def silklash_spider(): return AbilityNotImplemented def silklash_spider(): return AbilityNotImplemented return silklash_spider, silklash_spider, @card("Gigapede") def gigapede(card, abilities): def gigapede(): return AbilityNotImplemented def gigapede(): return AbilityNotImplemented return gigapede, gigapede, @card("Wellwisher") def wellwisher(card, abilities): def wellwisher(): return AbilityNotImplemented return wellwisher, @card("Piety Charm") def piety_charm(card, abilities): def piety_charm(): return AbilityNotImplemented return piety_charm, @card("Seaside Haven") def seaside_haven(card, abilities): def seaside_haven(): return AbilityNotImplemented def seaside_haven(): return AbilityNotImplemented return seaside_haven, seaside_haven, @card("Fade from Memory") def fade_from_memory(card, abilities): def fade_from_memory(): return AbilityNotImplemented def fade_from_memory(): return AbilityNotImplemented return fade_from_memory, fade_from_memory, @card("Shared Triumph") def shared_triumph(card, abilities): def shared_triumph(): return AbilityNotImplemented def shared_triumph(): return AbilityNotImplemented return shared_triumph, shared_triumph, @card("Snarling Undorak") def snarling_undorak(card, abilities): def snarling_undorak(): return AbilityNotImplemented def snarling_undorak(): return AbilityNotImplemented return snarling_undorak, snarling_undorak, @card("Festering Goblin") def festering_goblin(card, abilities): def festering_goblin(): return AbilityNotImplemented return festering_goblin, @card("Dream Chisel") def dream_chisel(card, abilities): def dream_chisel(): return AbilityNotImplemented return dream_chisel, @card("Strongarm Tactics") def strongarm_tactics(card, abilities): def strongarm_tactics(): return AbilityNotImplemented return strongarm_tactics, @card("Wirewood Savage") def wirewood_savage(card, abilities): def wirewood_savage(): return AbilityNotImplemented return wirewood_savage, @card("Mobilization") def mobilization(card, abilities): def mobilization(): return AbilityNotImplemented def mobilization(): return AbilityNotImplemented return mobilization, mobilization, @card("Headhunter") def headhunter(card, abilities): def headhunter(): return AbilityNotImplemented def headhunter(): return AbilityNotImplemented return headhunter, headhunter, @card("Improvised Armor") def improvised_armor(card, abilities): def improvised_armor(): return AbilityNotImplemented def improvised_armor(): return AbilityNotImplemented def improvised_armor(): return AbilityNotImplemented return improvised_armor, improvised_armor, improvised_armor, @card("Run Wild") def run_wild(card, abilities): def run_wild(): return AbilityNotImplemented return run_wild, @card("Dwarven Blastminer") def dwarven_blastminer(card, abilities): def dwarven_blastminer(): return AbilityNotImplemented def dwarven_blastminer(): return AbilityNotImplemented return dwarven_blastminer, dwarven_blastminer, @card("Fever Charm") def fever_charm(card, abilities): def fever_charm(): return AbilityNotImplemented return fever_charm, @card("Dispersing Orb") def dispersing_orb(card, abilities): def dispersing_orb(): return AbilityNotImplemented return dispersing_orb, @card("Annex") def annex(card, abilities): def annex(): return AbilityNotImplemented def annex(): return AbilityNotImplemented return annex, annex, @card("Undead Gladiator") def undead_gladiator(card, abilities): def undead_gladiator(): return AbilityNotImplemented def undead_gladiator(): return AbilityNotImplemented return undead_gladiator, undead_gladiator, @card("Tephraderm") def tephraderm(card, abilities): def tephraderm(): return AbilityNotImplemented def tephraderm(): return AbilityNotImplemented return tephraderm, tephraderm, @card("Head Games") def head_games(card, abilities): def head_games(): return AbilityNotImplemented return head_games, @card("Animal Magnetism") def animal_magnetism(card, abilities): def animal_magnetism(): return AbilityNotImplemented return animal_magnetism, @card("Entrails Feaster") def entrails_feaster(card, abilities): def entrails_feaster(): return AbilityNotImplemented return entrails_feaster, @card("Skittish Valesk") def skittish_valesk(card, abilities): def skittish_valesk(): return AbilityNotImplemented def skittish_valesk(): return AbilityNotImplemented return skittish_valesk, skittish_valesk, @card("Aven Brigadier") def aven_brigadier(card, abilities): def aven_brigadier(): return AbilityNotImplemented def aven_brigadier(): return AbilityNotImplemented def aven_brigadier(): return AbilityNotImplemented return aven_brigadier, aven_brigadier, aven_brigadier, @card("<NAME>") def aven_soulgazer(card, abilities): def aven_soulgazer(): return AbilityNotImplemented def aven_soulgazer(): return AbilityNotImplemented return aven_soulgazer, aven_soulgazer, @card("Arcanis the Omnipotent") def arcanis_the_omnipotent(card, abilities): def arcanis_the_omnipotent(): return AbilityNotImplemented def arcanis_the_omnipotent(): return AbilityNotImplemented return arcanis_the_omnipotent, arcanis_the_omnipotent, @card("Dawning Purist") def dawning_purist(card, abilities): def dawning_purist(): return AbilityNotImplemented def dawning_purist(): return AbilityNotImplemented return dawning_purist, dawning_purist, @card("Tranquil Thicket") def tranquil_thicket(card, abilities): def tranquil_thicket(): return AbilityNotImplemented def tranquil_thicket(): return AbilityNotImplemented def tranquil_thicket(): return AbilityNotImplemented return tranquil_thicket, tranquil_thicket, tranquil_thicket, @card("Flamestick Courier") def flamestick_courier(card, abilities): def flamestick_courier(): return AbilityNotImplemented def flamestick_courier(): return AbilityNotImplemented return flamestick_courier, flamestick_courier, @card("Riptide Replicator") def riptide_replicator(card, abilities): def riptide_replicator(): return AbilityNotImplemented def riptide_replicator(): return AbilityNotImplemented def riptide_replicator(): return AbilityNotImplemented return riptide_replicator, riptide_replicator, riptide_replicator, @card("Charging Slateback") def charging_slateback(card, abilities): def charging_slateback(): return AbilityNotImplemented def charging_slateback(): return AbilityNotImplemented return charging_slateback, charging_slateback, @card("Crafty Pathmage") def crafty_pathmage(card, abilities): def crafty_pathmage(): return AbilityNotImplemented return crafty_pathmage, @card("Risky Move") def risky_move(card, abilities): def risky_move(): return AbilityNotImplemented def risky_move(): return AbilityNotImplemented return risky_move, risky_move, @card("Riptide Entrancer") def riptide_entrancer(card, abilities): def riptide_entrancer(): return AbilityNotImplemented def riptide_entrancer(): return AbilityNotImplemented return riptide_entrancer, riptide_entrancer, @card("Quicksilver Dragon") def quicksilver_dragon(card, abilities): def quicksilver_dragon(): return AbilityNotImplemented def quicksilver_dragon(): return AbilityNotImplemented def quicksilver_dragon(): return AbilityNotImplemented return quicksilver_dragon, quicksilver_dragon, quicksilver_dragon, @card("Elvish Scrapper") def elvish_scrapper(card, abilities): def elvish_scrapper(): return AbilityNotImplemented return elvish_scrapper, @card("Towering Baloth") def towering_baloth(card, abilities): def towering_baloth(): return AbilityNotImplemented return towering_baloth, @card("Aphetto Dredging") def aphetto_dredging(card, abilities): def aphetto_dredging(): return AbilityNotImplemented return aphetto_dredging, @card("Commando Raid") def commando_raid(card, abilities): def commando_raid(): return AbilityNotImplemented return commando_raid, @card("Explosive Vegetation") def explosive_vegetation(card, abilities): def explosive_vegetation(): return AbilityNotImplemented return explosive_vegetation, @card("Cryptic Gateway") def cryptic_gateway(card, abilities): def cryptic_gateway(): return AbilityNotImplemented return cryptic_gateway, @card("<NAME>") def anurid_murkdiver(card, abilities): def anurid_murkdiver(): return AbilityNotImplemented return anurid_murkdiver, @card("Ancestor's Prophet") def ancestors_prophet(card, abilities): def ancestors_prophet(): return AbilityNotImplemented return ancestors_prophet, @card("Walking Desecration") def walking_desecration(card, abilities): def walking_desecration(): return AbilityNotImplemented return walking_desecration, @card("Trade Secrets") def trade_secrets(card, abilities): def trade_secrets(): return AbilityNotImplemented return trade_secrets, @card("Sunfire Balm") def sunfire_balm(card, abilities): def sunfire_balm(): return AbilityNotImplemented def sunfire_balm(): return AbilityNotImplemented def sunfire_balm(): return AbilityNotImplemented return sunfire_balm, sunfire_balm, sunfire_balm, @card("Venomspout Brackus") def venomspout_brackus(card, abilities): def venomspout_brackus(): return AbilityNotImplemented def venomspout_brackus(): return AbilityNotImplemented return venomspout_brackus, venomspout_brackus, @card("Doom Cannon") def doom_cannon(card, abilities): def doom_cannon(): return AbilityNotImplemented def doom_cannon(): return AbilityNotImplemented return doom_cannon, doom_cannon, @card("Serpentine Basilisk") def serpentine_basilisk(card, abilities): def serpentine_basilisk(): return AbilityNotImplemented def serpentine_basilisk(): return AbilityNotImplemented return serpentine_basilisk, serpentine_basilisk, @card("Daru Healer") def daru_healer(card, abilities): def daru_healer(): return AbilityNotImplemented def daru_healer(): return AbilityNotImplemented return daru_healer, daru_healer, @card("Reminisce") def reminisce(card, abilities): def reminisce(): return AbilityNotImplemented return reminisce, @card("Riptide Shapeshifter") def riptide_shapeshifter(card, abilities): def riptide_shapeshifter(): return AbilityNotImplemented return riptide_shapeshifter, @card("Information Dealer") def information_dealer(card, abilities): def information_dealer(): return AbilityNotImplemented return information_dealer, @card("Crown of Suspicion") def crown_of_suspicion(card, abilities): def crown_of_suspicion(): return AbilityNotImplemented def crown_of_suspicion(): return AbilityNotImplemented def crown_of_suspicion(): return AbilityNotImplemented return crown_of_suspicion, crown_of_suspicion, crown_of_suspicion, @card("Chain of Plasma") def chain_of_plasma(card, abilities): def chain_of_plasma(): return AbilityNotImplemented return chain_of_plasma, @card("Death Pulse") def death_pulse(card, abilities): def death_pulse(): return AbilityNotImplemented def death_pulse(): return AbilityNotImplemented def death_pulse(): return AbilityNotImplemented return death_pulse, death_pulse, death_pulse, @card("Elvish Vanguard") def elvish_vanguard(card, abilities): def elvish_vanguard(): return AbilityNotImplemented return elvish_vanguard, @card("Brightstone Ritual") def brightstone_ritual(card, abilities): def brightstone_ritual(): return AbilityNotImplemented return brightstone_ritual, @card("Searing Flesh") def searing_flesh(card, abilities): def searing_flesh(): return AbilityNotImplemented return searing_flesh, @card("Soulless One") def soulless_one(card, abilities): def soulless_one(): return AbilityNotImplemented return soulless_one, @card("Peer Pressure") def peer_pressure(card, abilities): def peer_pressure(): return AbilityNotImplemented return peer_pressure, @card("Akroma's Vengeance") def akromas_vengeance(card,
params['booking_id'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse20012', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def delete_booking(self, id, **kwargs): """ Delete a booking by ID This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_booking(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :return: InlineResponse2003 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_booking_with_http_info(id, **kwargs) else: (data) = self.delete_booking_with_http_info(id, **kwargs) return data def delete_booking_with_http_info(self, id, **kwargs): """ Delete a booking by ID This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_booking_with_http_info(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :return: InlineResponse2003 If the method is called asynchronously, returns the request thread. """ all_params = ['id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_booking" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params) or (params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `delete_booking`") resource_path = '/booking/delete'.replace('{format}', 'json') path_params = {} query_params = {} if 'id' in params: query_params['id'] = params['id'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse2003', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def dettach_accommodation(self, booking_id, accommodation_id, customer_id, **kwargs): """ Dettach an accommodation booking to a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.dettach_accommodation(booking_id, accommodation_id, customer_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :param int accommodation_id: (required) :param int customer_id: (required) :param date start: :return: InlineResponse20017 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.dettach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, **kwargs) else: (data) = self.dettach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, **kwargs) return data def dettach_accommodation_with_http_info(self, booking_id, accommodation_id, customer_id, **kwargs): """ Dettach an accommodation booking to a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.dettach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :param int accommodation_id: (required) :param int customer_id: (required) :param date start: :return: InlineResponse20017 If the method is called asynchronously, returns the request thread. """ all_params = ['booking_id', 'accommodation_id', 'customer_id', 'start'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method dettach_accommodation" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'booking_id' is set if ('booking_id' not in params) or (params['booking_id'] is None): raise ValueError("Missing the required parameter `booking_id` when calling `dettach_accommodation`") # verify the required parameter 'accommodation_id' is set if ('accommodation_id' not in params) or (params['accommodation_id'] is None): raise ValueError("Missing the required parameter `accommodation_id` when calling `dettach_accommodation`") # verify the required parameter 'customer_id' is set if ('customer_id' not in params) or (params['customer_id'] is None): raise ValueError("Missing the required parameter `customer_id` when calling `dettach_accommodation`") resource_path = '/booking/remove-accommodation'.replace('{format}', 'json') path_params = {} query_params = {} if 'booking_id' in params: query_params['booking_id'] = params['booking_id'] if 'accommodation_id' in params: query_params['accommodation_id'] = params['accommodation_id'] if 'customer_id' in params: query_params['customer_id'] = params['customer_id'] if 'start' in params: query_params['start'] = params['start'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse20017', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def dettach_addon(self, booking_id, bookingdetail_id, addon_id, **kwargs): """ Dettach an addon to a trip of a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.dettach_addon(booking_id, bookingdetail_id, addon_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :param int bookingdetail_id: (required) :param int addon_id: (required) :param int packagefacade_id: :return: InlineResponse20017 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.dettach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, **kwargs) else: (data) = self.dettach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, **kwargs) return data def dettach_addon_with_http_info(self, booking_id, bookingdetail_id, addon_id, **kwargs): """ Dettach an addon to a trip of a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.dettach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :param int bookingdetail_id: (required) :param int addon_id: (required) :param int packagefacade_id: :return: InlineResponse20017 If the method is called asynchronously, returns the request thread. """ all_params = ['booking_id', 'bookingdetail_id', 'addon_id', 'packagefacade_id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method dettach_addon" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'booking_id' is set if ('booking_id' not in params) or (params['booking_id'] is None): raise ValueError("Missing the required parameter `booking_id` when calling `dettach_addon`") # verify the required parameter 'bookingdetail_id' is set if ('bookingdetail_id' not in params) or (params['bookingdetail_id'] is None): raise ValueError("Missing the required parameter `bookingdetail_id` when calling `dettach_addon`") # verify the required parameter 'addon_id' is set if ('addon_id' not in params) or (params['addon_id'] is None): raise ValueError("Missing the required parameter `addon_id` when calling `dettach_addon`") resource_path = '/booking/remove-addon'.replace('{format}', 'json') path_params = {} query_params = {} if 'booking_id' in params: query_params['booking_id'] = params['booking_id'] if 'bookingdetail_id' in params: query_params['bookingdetail_id'] = params['bookingdetail_id'] if 'addon_id' in params: query_params['addon_id'] = params['addon_id'] if 'packagefacade_id' in params: query_params['packagefacade_id'] = params['packagefacade_id'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse20017', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def dettach_pickup(self, booking_id, **kwargs): """ Dettach a pickup location for a booking This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.dettach_pickup(booking_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int booking_id: (required) :param int id: :return: InlineResponse2003 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.dettach_pickup_with_http_info(booking_id, **kwargs) else:
# PEDA - Python Exploit Development Assistance for GDB # # Copyright (C) 2012 <NAME> <longld at vnsecurity.net> # # License: see LICENSE file for details # from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import os import sys import shlex import string import time import signal import traceback import codecs # point to absolute path of peda.py PEDAFILE = os.path.abspath(os.path.expanduser(__file__)) if os.path.islink(PEDAFILE): PEDAFILE = os.readlink(PEDAFILE) sys.path.insert(0, os.path.dirname(PEDAFILE) + "/lib/") # Use six library to provide Python 2/3 compatibility import six from six.moves import range from six.moves import input try: import six.moves.cPickle as pickle except ImportError: import pickle from skeleton import * from shellcode import * from utils import * import config from nasm import * if sys.version_info.major == 3: from urllib.request import urlopen from urllib.parse import urlencode pyversion = 3 else: from urllib import urlopen from urllib import urlencode pyversion = 2 REGISTERS = { 8 : ["al", "ah", "bl", "bh", "cl", "ch", "dl", "dh"], 16: ["ax", "bx", "cx", "dx"], 32: ["eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp", "eip"], 64: ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp", "rip", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"] } ########################################################################### class PEDA(object): """ Class for actual functions of PEDA commands """ def __init__(self): self.SAVED_COMMANDS = {} # saved GDB user's commands #################################### # GDB Interaction / Misc Utils # #################################### def execute(self, gdb_command): """ Wrapper for gdb.execute, catch the exception so it will not stop python script Args: - gdb_command (String) Returns: - True if execution succeed (Bool) """ try: gdb.execute(gdb_command) return True except Exception as e: if config.Option.get("debug") == "on": msg('Exception (%s): %s' % (gdb_command, e), "red") traceback.print_exc() return False def execute_redirect(self, gdb_command, silent=False): """ Execute a gdb command and capture its output Args: - gdb_command (String) - silent: discard command's output, redirect to /dev/null (Bool) Returns: - output of command (String) """ result = None #init redirection if silent: logfd = open(os.path.devnull, "r+") else: logfd = tmpfile() logname = logfd.name gdb.execute('set logging off') # prevent nested call gdb.execute('set height 0') # disable paging gdb.execute('set logging file %s' % logname) gdb.execute('set logging overwrite on') gdb.execute('set logging redirect on') gdb.execute('set logging on') try: gdb.execute(gdb_command) gdb.flush() gdb.execute('set logging off') if not silent: logfd.flush() result = logfd.read() logfd.close() except Exception as e: gdb.execute('set logging off') #to be sure if config.Option.get("debug") == "on": msg('Exception (%s): %s' % (gdb_command, e), "red") traceback.print_exc() logfd.close() if config.Option.get("verbose") == "on": msg(result) return result def parse_and_eval(self, exp): """ Work around implementation for gdb.parse_and_eval with enhancements Args: - exp: expression to evaluate (String) Returns: - value of expression """ regs = sum(REGISTERS.values(), []) for r in regs: if "$"+r not in exp and "e"+r not in exp and "r"+r not in exp: exp = exp.replace(r, "$%s" % r) p = re.compile("(.*)\[(.*)\]") # DWORD PTR [esi+eax*1] matches = p.search(exp) if not matches: p = re.compile("(.*).s:(0x.*)") # DWORD PTR ds:0xdeadbeef matches = p.search(exp) if matches: mod = "w" if "BYTE" in matches.group(1): mod = "b" elif "QWORD" in matches.group(1): mod = "g" elif "DWORD" in matches.group(1): mod = "w" elif "WORD" in matches.group(1): mod = "h" out = self.execute_redirect("x/%sx %s" % (mod, matches.group(2))) if not out: return None else: return out.split(":\t")[-1].strip() else: out = self.execute_redirect("print %s" % exp) if not out: return None else: out = gdb.history(0).__str__() out = out.encode('ascii', 'ignore') out = decode_string_escape(out) return out.strip() def string_to_argv(self, str): """ Convert a string to argv list, pre-processing register and variable values Args: - str: input string (String) Returns: - argv list (List) """ try: str = str.encode('ascii', 'ignore') except: pass args = list(map(lambda x: decode_string_escape(x), shlex.split(str.decode()))) # need more processing here for idx, a in enumerate(args): a = a.strip(",") if a.startswith("$"): # try to get register/variable value v = self.parse_and_eval(a) if v != None and v != "void": if v.startswith("0x"): # int args[idx] = v.split()[0] # workaround for 0xdeadbeef <symbol+x> else: # string, complex data args[idx] = v elif a.startswith("+"): # relative value to prev arg adder = to_int(self.parse_and_eval(a[1:])) if adder is not None: args[idx] = "%s" % to_hex(to_int(args[idx-1]) + adder) elif is_math_exp(a): try: v = eval("%s" % a) # XXX hack to avoid builtin functions/types if not isinstance(v, six.string_types + six.integer_types): continue args[idx] = "%s" % (to_hex(v) if to_int(v) != None else v) except: pass if config.Option.get("verbose") == "on": msg(args) return args ################################ # GDB User-Defined Helpers # ################################ def save_user_command(self, cmd): """ Save user-defined command and deactivate it Args: - cmd: user-defined command (String) Returns: - True if success to save (Bool) """ commands = self.execute_redirect("show user %s" % cmd) if not commands: return False commands = "\n".join(commands.splitlines()[1:]) commands = "define %s\n" % cmd + commands + "end\n" self.SAVED_COMMANDS[cmd] = commands tmp = tmpfile() tmp.write("define %s\nend\n" % cmd) tmp.flush() result = self.execute("source %s" % tmp.name) tmp.close() return result def define_user_command(self, cmd, code): """ Define a user-defined command, overwrite the old content Args: - cmd: user-defined command (String) - code: gdb script code to append (String) Returns: - True if success to define (Bool) """ commands = "define %s\n" % cmd + code + "\nend\n" tmp = tmpfile(is_binary_file=False) tmp.write(commands) tmp.flush() result = self.execute("source %s" % tmp.name) tmp.close() return result def append_user_command(self, cmd, code): """ Append code to a user-defined command, define new command if not exist Args: - cmd: user-defined command (String) - code: gdb script code to append (String) Returns: - True if success to append (Bool) """ commands = self.execute_redirect("show user %s" % cmd) if not commands: return self.define_user_command(cmd, code) # else commands = "\n".join(commands.splitlines()[1:]) if code in commands: return True commands = "define %s\n" % cmd + commands + code + "\nend\n" tmp = tmpfile() tmp.write(commands) tmp.flush() result = self.execute("source %s" % tmp.name) tmp.close() return result def restore_user_command(self, cmd): """ Restore saved user-defined command Args: - cmd: user-defined command (String) Returns: - True if success to restore (Bool) """ if cmd == "all": commands = "\n".join(self.SAVED_COMMANDS.values()) self.SAVED_COMMANDS = {} else: if cmd not in self.SAVED_COMMANDS: return False else: commands = self.SAVED_COMMANDS[cmd] self.SAVED_COMMANDS.pop(cmd) tmp = tmpfile() tmp.write(commands) tmp.flush() result = self.execute("source %s" % tmp.name) tmp.close() return result def run_gdbscript_code(self, code): """ Run basic gdbscript code as it is typed in interactively Args: - code: gdbscript code, lines are splitted by "\n" or ";" (String) Returns: - True if success to run (Bool) """ tmp = tmpfile() tmp.write(code.replace(";", "\n")) tmp.flush() result = self.execute("source %s" % tmp.name) tmp.close() return result ######################### # Debugging Helpers # ######################### @memoized def is_target_remote(self): """ Check if current target is remote Returns: - True if target is remote (Bool) """ out = self.execute_redirect("info program") if out and "serial line" in out: # remote target return True return False @memoized def getfile(self): """ Get exec file of debugged program Returns: - full path to executable file (String) """ result = None out = self.execute_redirect('info files') if out and '"' in out: p = re.compile(".*exec file:\s*`(.*)'") m = p.search(out) if m: result = m.group(1) else: # stripped file, get symbol file p = re.compile("Symbols from \"([^\"]*)") m = p.search(out) if m: result = m.group(1) return result def get_status(self): """ Get execution status of debugged program Returns: - current status of program (String) STOPPED - not being run BREAKPOINT - breakpoint hit SIGXXX - stopped by signal XXX UNKNOWN - unknown, not implemented """ status = "UNKNOWN" out = self.execute_redirect("info program") for line in out.splitlines(): if line.startswith("It stopped"): if "signal" in line: # stopped by signal status = line.split("signal")[1].split(",")[0].strip() break if "breakpoint" in line: # breakpoint hit status = "BREAKPOINT" break if "not being run" in line: status = "STOPPED" break return status @memoized def getpid(self): """ Get PID of the debugged process Returns: - pid (Int) """ out = None status = self.get_status() if not status or status == "STOPPED": return None pid = gdb.selected_inferior().pid return int(pid) if pid else None def getos(self): """ Get running OS info Returns: - os version (String) """ # TODO: get remote os by calling uname() return os.uname()[0] @memoized def getarch(self): """ Get architecture of debugged program Returns: - tuple of architecture
name is not None and value is not None: named_values[name] = value return named_values def _load_line(self, line: str) -> Tuple[str, str]: """Attempts to split the name and value for dictionary loading. Args: line: a record in a .value file Returns: The name/value pair that will be loaded into a dictionary. """ name = None value = None try: name, value = line.split(self.delimiter) except ValueError: LOG.exception( f"Failed to load value file {self.file_path} " f"record containing {line}" ) return name, value class ListFile(DialogFile): pass class TemplateFile(DialogFile): pass class RegexFile(ResourceFile): def load(self): regex_patterns = [] if self.file_path: regex_patterns = [line for line in self._read()] return regex_patterns class WordFile(ResourceFile): """Defines a word file, which defines a word in the configured language.""" def load(self) -> Optional[str]: """Load and lines from a file and populate the variables. Returns: The word contained in the file """ word = None if self.file_path is not None: for line in self._read(): word = line break return word class SkillResources: def __init__(self, skill_directory, language, dialog_renderer=None, skill_id=None): self.skill_directory = skill_directory self.language = language self.skill_id = skill_id self.types = self._define_resource_types() self._dialog_renderer = dialog_renderer self.static = dict() @property def dialog_renderer(self): if not self._dialog_renderer: self._load_dialog_renderer() return self._dialog_renderer @dialog_renderer.setter def dialog_renderer(self, val): self._dialog_renderer = val def _load_dialog_renderer(self): base_dirs = locate_lang_directories(self.language, self.skill_directory, "dialog") for directory in base_dirs: if directory.exists(): dialog_dir = str(directory) self._dialog_renderer = load_dialogs(dialog_dir) return LOG.debug(f'No dialog loaded for {self.language}') def _define_resource_types(self) -> SkillResourceTypes: """Defines all known types of skill resource files. A resource file contains information the skill needs to function. Examples include dialog files to be spoken and vocab files for intent matching. """ resource_types = dict( dialog=ResourceType("dialog", ".dialog", self.language), entity=ResourceType("entity", ".entity", self.language), intent=ResourceType("intent", ".intent", self.language), list=ResourceType("list", ".list", self.language), named_value=ResourceType("named_value", ".value", self.language), regex=ResourceType("regex", ".rx", self.language), template=ResourceType("template", ".template", self.language), vocabulary=ResourceType("vocab", ".voc", self.language), word=ResourceType("word", ".word", self.language), qml=ResourceType("qml", ".qml") ) for resource_type in resource_types.values(): if self.skill_id: resource_type.locate_user_directory(self.skill_id) resource_type.locate_base_directory(self.skill_directory) return SkillResourceTypes(**resource_types) def load_dialog_file(self, name, data=None) -> List[str]: """Loads the contents of a dialog file into memory. Named variables in the dialog are populated with values found in the data dictionary. Args: name: name of the dialog file (no extension needed) data: keyword arguments used to populate variables Returns: A list of phrases with variables resolved """ dialog_file = DialogFile(self.types.dialog, name) dialog_file.data = data return dialog_file.load() def locate_qml_file(self, name): qml_file = QmlFile(self.types.qml, name) return qml_file.load() def load_list_file(self, name, data=None) -> List[str]: """Load a file containing a list of words or phrases Named variables in the dialog are populated with values found in the data dictionary. Args: name: name of the list file (no extension needed) data: keyword arguments used to populate variables Returns: List of words or phrases read from the list file. """ list_file = ListFile(self.types.list, name) list_file.data = data return list_file.load() def load_named_value_file(self, name, delimiter=None) -> dict: """Load file containing a set names and values. Loads a simple delimited file of name/value pairs. The name is the first item, the value is the second. Args: name: name of the .value file, no extension needed delimiter: delimiter character used Returns: File contents represented as a dictionary """ if name in self.static: named_values = self.static[name] else: named_value_file = NamedValueFile(self.types.named_value, name) if delimiter is not None: named_value_file.delimiter = delimiter named_values = named_value_file.load() self.static[name] = named_values return named_values def load_regex_file(self, name) -> List[str]: """Loads a file containing regular expression patterns. The regular expression patterns are generally used to find a value in a user utterance the skill needs to properly perform the requested function. Args: name: name of the regular expression file, no extension needed Returns: List representation of the regular expression file. """ regex_file = RegexFile(self.types.regex, name) return regex_file.load() def load_template_file(self, name, data=None) -> List[str]: """Loads the contents of a dialog file into memory. Named variables in the dialog are populated with values found in the data dictionary. Args: name: name of the dialog file (no extension needed) data: keyword arguments used to populate variables Returns: A list of phrases with variables resolved """ template_file = TemplateFile(self.types.template, name) template_file.data = data return template_file.load() def load_vocabulary_file(self, name) -> List[List[str]]: """Loads a file containing variations of words meaning the same thing. A vocabulary file defines words a skill uses for intent matching. It can also be used to match words in an utterance after intent intent matching is complete. Args: name: name of the regular expression file, no extension needed Returns: List representation of the regular expression file. """ vocabulary_file = VocabularyFile(self.types.vocabulary, name) return vocabulary_file.load() def load_word_file(self, name) -> Optional[str]: """Loads a file containing a word. Args: name: name of the regular expression file, no extension needed Returns: List representation of the regular expression file. """ word_file = WordFile(self.types.word, name) return word_file.load() def render_dialog(self, name, data=None) -> str: """Selects a record from a dialog file at random for TTS purposes. Args: name: name of the list file (no extension needed) data: keyword arguments used to populate variables Returns: Random record from the file with variables resolved. """ resource_file = DialogFile(self.types.dialog, name) resource_file.data = data return resource_file.render(self.dialog_renderer) def load_skill_vocabulary(self, alphanumeric_skill_id: str) -> dict: skill_vocabulary = {} base_directory = self.types.vocabulary.base_directory for directory, _, files in walk(base_directory): vocabulary_files = [ file_name for file_name in files if file_name.endswith(".voc") ] for file_name in vocabulary_files: vocab_type = alphanumeric_skill_id + file_name[:-4].title() vocabulary = self.load_vocabulary_file(file_name) if vocabulary: skill_vocabulary[vocab_type] = vocabulary return skill_vocabulary def load_skill_regex(self, alphanumeric_skill_id: str) -> List[str]: skill_regexes = [] base_directory = self.types.regex.base_directory for directory, _, files in walk(base_directory): regex_files = [ file_name for file_name in files if file_name.endswith(".rx") ] for file_name in regex_files: skill_regexes.extend(self.load_regex_file(file_name)) skill_regexes = self._make_unique_regex_group( skill_regexes, alphanumeric_skill_id ) return skill_regexes @staticmethod def _make_unique_regex_group( regexes: List[str], alphanumeric_skill_id: str ) -> List[str]: """Adds skill ID to group ID in a regular expression for uniqueness. Args: regexes: regex string alphanumeric_skill_id: skill identifier Returns: regular expressions with uniquely named group IDs Raises: re.error if the regex does not compile """ modified_regexes = [] for regex in regexes: base = "(?P<" + alphanumeric_skill_id modified_regex = base.join(regex.split("(?P<")) re.compile(modified_regex) modified_regexes.append(modified_regex) return modified_regexes class CoreResources(SkillResources): def __init__(self, language): directory = f"{dirname(dirname(__file__))}/res" super().__init__(directory, language) class UserResources(SkillResources): def __init__(self, language, skill_id): directory = f"{get_xdg_data_save_path()}/resources/{skill_id}" super().__init__(directory, language) # TODO move this class to ovos_utils/workshop to # allow it to be used by skills with mycroft-core dev branch class RegexExtractor: """Extracts data from an utterance using regular expressions. Attributes: group_name: regex_patterns: regular expressions read from a .rx file """ def __init__(self, group_name, regex_patterns): self.group_name = group_name self.regex_patterns = regex_patterns def extract(self, utterance) -> Optional[str]: """Attempt to find a value in a user request. Args: utterance: request spoken by the user Returns: The value extracted from the utterance, if found """ extract = None pattern_match = self._match_utterance_to_patterns(utterance) if pattern_match is not None: extract = self._extract_group_from_match(pattern_match) self._log_extraction_result(extract) return extract def _match_utterance_to_patterns(self, utterance: str): """Match regular expressions to user request. Args: utterance: request spoken by the user Returns: a regular expression match object if a match is found """ pattern_match = None for pattern in self.regex_patterns: pattern_match = re.search(pattern, utterance) if pattern_match: break return pattern_match def _extract_group_from_match(self, pattern_match): """Extract the alarm name from the utterance. Args: pattern_match: a regular expression match object """ extract = None try: extract = pattern_match.group(self.group_name).strip() except IndexError: pass else: if not extract: extract = None return extract def _log_extraction_result(self, extract: str): """Log the results of the matching. Args: extract: the value extracted from the user utterance """ if extract is None: LOG.info(f"No {self.group_name.lower()} extracted from utterance") else: LOG.info(f"{self.group_name} extracted from utterance: " + extract) def find_resource(res_name, root_dir, res_dirname, lang=None): """Find a resource file. Searches for the given filename using this scheme: 1. Search the resource lang directory: <skill>/<res_dirname>/<lang>/<res_name> 2. Search the resource directory: <skill>/<res_dirname>/<res_name> 3. Search the locale lang directory or other subdirectory: <skill>/locale/<lang>/<res_name> or <skill>/locale/<lang>/.../<res_name> Args: res_name (string): The resource name to be found root_dir (string): A skill root directory res_dirname (string): A skill sub directory lang (string): language folder to be used Returns: string: The full path to the resource file or None if not
import numpy as np from .. import util from ..element import Element from ..ndmapping import NdMapping, item_check, sorted_context from .dictionary import DictInterface from .interface import Interface, DataError class MultiInterface(Interface): """ MultiInterface allows wrapping around a list of tabular datasets including dataframes, the columnar dictionary format or 2D tabular NumPy arrays. Using the split method the list of tabular data can be split into individual datasets. The interface makes the data appear a list of tabular datasets as a single dataset. The interface may be used to represent geometries so the behavior depends on the type of geometry being represented. """ types = () datatype = 'multitabular' subtypes = ['dictionary', 'dataframe', 'array', 'dask'] geom_types = ['Polygon', 'Ring', 'Line', 'Point'] multi = True @classmethod def init(cls, eltype, data, kdims, vdims): from ...element import Polygons, Path new_data = [] dims = {'kdims': eltype.kdims, 'vdims': eltype.vdims} if kdims is not None: dims['kdims'] = kdims if vdims is not None: dims['vdims'] = vdims if (isinstance(data, list) and len(data) and all(isinstance(d, tuple) and all(util.isscalar(v) for v in d) for d in data)): data = [data] elif not isinstance(data, list): interface = [Interface.interfaces.get(st).applies(data) for st in cls.subtypes if st in Interface.interfaces] if (interface or isinstance(data, tuple)) and issubclass(eltype, Path): data = [data] else: raise ValueError('MultiInterface data must be a list of tabular data types.') prev_interface, prev_dims = None, None for d in data: datatype = cls.subtypes if isinstance(d, dict): if Polygons._hole_key in d: datatype = [dt for dt in datatype if hasattr(Interface.interfaces.get(dt), 'has_holes')] geom_type = d.get('geom_type') if geom_type is not None and geom_type not in cls.geom_types: raise DataError("Geometry type '%s' not recognized, " "must be one of %s." % (geom_type, cls.geom_types)) else: datatype = [dt for dt in datatype if hasattr(Interface.interfaces.get(dt), 'geom_type')] d, interface, dims, _ = Interface.initialize(eltype, d, kdims, vdims, datatype=datatype) if prev_interface: if prev_interface != interface: raise DataError('MultiInterface subpaths must all have matching datatype.', cls) if dims['kdims'] != prev_dims['kdims']: raise DataError('MultiInterface subpaths must all have matching kdims.', cls) if dims['vdims'] != prev_dims['vdims']: raise DataError('MultiInterface subpaths must all have matching vdims.', cls) new_data.append(d) prev_interface, prev_dims = interface, dims return new_data, dims, {} @classmethod def validate(cls, dataset, vdims=True): if not dataset.data: return from holoviews.element import Polygons ds = cls._inner_dataset_template(dataset, validate_vdims=vdims) for d in dataset.data: ds.data = d ds.interface.validate(ds, vdims) if isinstance(dataset, Polygons) and ds.interface is DictInterface: holes = ds.interface.holes(ds) if not isinstance(holes, list): raise DataError('Polygons holes must be declared as a list-of-lists.', cls) subholes = holes[0] coords = ds.data[ds.kdims[0].name] splits = np.isnan(coords.astype('float')).sum() if len(subholes) != (splits+1): raise DataError('Polygons with holes containing multi-geometries ' 'must declare a list of holes for each geometry.', cls) @classmethod def geom_type(cls, dataset): from holoviews.element import Polygons, Path, Points if isinstance(dataset, type): eltype = dataset else: eltype = type(dataset) if isinstance(dataset.data, list): ds = cls._inner_dataset_template(dataset) if hasattr(ds.interface, 'geom_type'): geom_type = ds.interface.geom_type(ds) if geom_type is not None: return geom_type if issubclass(eltype, Polygons): return 'Polygon' elif issubclass(eltype, Path): return 'Line' elif issubclass(eltype, Points): return 'Point' @classmethod def _inner_dataset_template(cls, dataset, validate_vdims=True): """ Returns a Dataset template used as a wrapper around the data contained within the multi-interface dataset. """ from . import Dataset vdims = dataset.vdims if getattr(dataset, 'level', None) is None else [] return Dataset(dataset.data[0], datatype=cls.subtypes, kdims=dataset.kdims, vdims=vdims, _validate_vdims=validate_vdims) @classmethod def assign(cls, dataset, new_data): ds = cls._inner_dataset_template(dataset) assigned = [] for i, d in enumerate(dataset.data): ds.data = d new = ds.interface.assign(ds, {k: v[i:i+1] for k, v in new_data.items()}) assigned.append(new) return assigned @classmethod def dimension_type(cls, dataset, dim): if not dataset.data: # Note: Required to make empty datasets work at all (should fix) # Other interfaces declare equivalent of empty array # which defaults to float type return float ds = cls._inner_dataset_template(dataset) return ds.interface.dimension_type(ds, dim) @classmethod def range(cls, dataset, dim): if not dataset.data: return (None, None) ranges = [] ds = cls._inner_dataset_template(dataset) # Backward compatibility for Contours/Polygons level level = getattr(dataset, 'level', None) dim = dataset.get_dimension(dim) if level is not None and dim is dataset.vdims[0]: return (level, level) for d in dataset.data: ds.data = d ranges.append(ds.interface.range(ds, dim)) return util.max_range(ranges) @classmethod def has_holes(cls, dataset): if not dataset.data: return False ds = cls._inner_dataset_template(dataset) for d in dataset.data: ds.data = d if ds.interface.has_holes(ds): return True return False @classmethod def holes(cls, dataset): holes = [] if not dataset.data: return holes ds = cls._inner_dataset_template(dataset) for d in dataset.data: ds.data = d holes += ds.interface.holes(ds) return holes @classmethod def isscalar(cls, dataset, dim, per_geom=False): """ Tests if dimension is scalar in each subpath. """ if not dataset.data: return True geom_type = cls.geom_type(dataset) ds = cls._inner_dataset_template(dataset) combined = [] for d in dataset.data: ds.data = d values = ds.interface.values(ds, dim, expanded=False) unique = list(util.unique_iterator(values)) if len(unique) > 1: return False elif per_geom and geom_type != 'Point': continue unique = unique[0] if unique not in combined: if combined: return False combined.append(unique) return True @classmethod def select(cls, dataset, selection_mask=None, **selection): """ Applies selectiong on all the subpaths. """ from ...element import Polygons if not dataset.data: return dataset.data elif selection_mask is not None: return [d for b, d in zip(selection_mask, dataset.data) if b] ds = cls._inner_dataset_template(dataset) skipped = (Polygons._hole_key,) if hasattr(ds.interface, 'geo_column'): skipped += (ds.interface.geo_column(ds),) data = [] for d in dataset.data: ds.data = d selection_mask = ds.interface.select_mask(ds, selection) sel = ds.interface.select(ds, selection_mask) is_dict = isinstance(sel, dict) if ((not len(sel) and not is_dict) or (is_dict and any(False if util.isscalar(v) else len(v) == 0 for k, v in sel.items() if k not in skipped))): continue data.append(sel) return data @classmethod def select_paths(cls, dataset, index): """ Allows selecting paths with usual NumPy slicing index. """ selection = np.array([{0: p} for p in dataset.data])[index] if isinstance(selection, dict): return [selection[0]] return [s[0] for s in selection] @classmethod def aggregate(cls, dataset, dimensions, function, **kwargs): raise NotImplementedError('Aggregation currently not implemented') @classmethod def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs): # Get dimensions information dimensions = [dataset.get_dimension(d) for d in dimensions] kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions] # Update the kwargs appropriately for Element group types group_kwargs = {} group_type = list if group_type == 'raw' else group_type if issubclass(group_type, Element): group_kwargs.update(util.get_param_values(dataset)) group_kwargs['kdims'] = kdims group_kwargs.update(kwargs) # Find all the keys along supplied dimensions values = [] for d in dimensions: if not cls.isscalar(dataset, d, True): raise ValueError('MultiInterface can only apply groupby ' 'on scalar dimensions, %s dimension ' 'is not scalar' % d) vals = cls.values(dataset, d, False, True) values.append(vals) values = tuple(values) # Iterate over the unique entries applying selection masks from . import Dataset ds = Dataset(values, dimensions) keys = (tuple(vals[i] for vals in values) for i in range(len(vals))) grouped_data = [] for unique_key in util.unique_iterator(keys): mask = ds.interface.select_mask(ds, dict(zip(dimensions, unique_key))) selection = [data for data, m in zip(dataset.data, mask) if m] group_data = group_type(selection, **group_kwargs) grouped_data.append((unique_key, group_data)) if issubclass(container_type, NdMapping): with item_check(False), sorted_context(False): return container_type(grouped_data, kdims=dimensions) else: return container_type(grouped_data) @classmethod def sample(cls, dataset, samples=[]): raise NotImplementedError('Sampling operation on subpaths not supported') @classmethod def shape(cls, dataset): """ Returns the shape of all subpaths, making it appear like a single array of concatenated subpaths separated by NaN values. """ if not dataset.data: return (0, len(dataset.dimensions())) elif cls.geom_type(dataset) != 'Point': return (len(dataset.data), len(dataset.dimensions())) rows, cols = 0, 0 ds = cls._inner_dataset_template(dataset) for d in dataset.data: ds.data = d r, cols = ds.interface.shape(ds) rows += r return rows, cols @classmethod def length(cls, dataset): """ Returns the length of the multi-tabular dataset making it appear like a single array of concatenated subpaths separated by NaN values. """ if not dataset.data: return 0 elif cls.geom_type(dataset) != 'Point': return len(dataset.data) length = 0 ds = cls._inner_dataset_template(dataset) for d in dataset.data: ds.data = d length += ds.interface.length(ds) return length @classmethod def dtype(cls, dataset, dimension): if not dataset.data: return np.dtype('float') ds = cls._inner_dataset_template(dataset) return ds.interface.dtype(ds, dimension) @classmethod def sort(cls, dataset, by=[], reverse=False): by = [dataset.get_dimension(d).name for d in by] if len(by) == 1: sorting = cls.values(dataset, by[0], False).argsort() else: arrays = [dataset.dimension_values(d, False) for d in by] sorting = util.arglexsort(arrays) return [dataset.data[s]
Cross-Lingual Transfer and Beyond}, author={<NAME> and <NAME>,}, journal={arXiv:1812.10464v2}, year={2018} }""" ), "bucc18": textwrap.dedent(""""""), "udpos": textwrap.dedent(""""""), "SQuAD": textwrap.dedent( """\ @article{2016arXiv160605250R, author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev}, Konstantin and {Liang}, Percy}, title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}", journal = {arXiv e-prints}, year = 2016, eid = {arXiv:1606.05250}, pages = {arXiv:1606.05250}, archivePrefix = {arXiv}, eprint = {1606.05250}, }""" ), "PAN-X": textwrap.dedent( """\ @article{pan-x, title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond}, author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>}, volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers} year={2017} }""" ), } _TEXT_FEATURES = { "XNLI": {"language": "language", "sentence1": "sentence1", "sentence2": "sentence2"}, "tydiqa": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"}, "XQuAD": {"id": "id", "context": "context", "question": "question", "answers": "answers"}, "MLQA": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"}, "tatoeba": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""}, "bucc18": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""}, "PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"}, "udpos": {"word": "", "pos_tag": ""}, "SQuAD": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"}, "PAN-X": {"word": "", "ner_tag": "", "lang": ""}, } _DATA_URLS = { "tydiqa": "https://storage.googleapis.com/tydiqa/", "XNLI": "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip", "XQuAD": "https://github.com/deepmind/xquad/raw/master/", "MLQA": "https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip", "PAWS-X": "https://storage.googleapis.com/paws/pawsx/x-final.tar.gz", "bucc18": "https://comparable.limsi.fr/bucc2018/", "tatoeba": "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1", "udpos": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz", "SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/dataset/", "PAN-X": "", } _URLS = { "tydiqa": "https://github.com/google-research-datasets/tydiqa", "XQuAD": "https://github.com/deepmind/xquad", "XNLI": "https://www.nyu.edu/projects/bowman/xnli/", "MLQA": "https://github.com/facebookresearch/MLQA", "PAWS-X": "https://github.com/google-research-datasets/paws/tree/master/pawsx", "bucc18": "https://comparable.limsi.fr/bucc2018/", "tatoeba": "https://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md", "udpos": "https://universaldependencies.org/", "SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/", "PAN-X": "", } class XtremeConfig(nlp.BuilderConfig): """BuilderConfig for Break""" def __init__(self, data_url, citation, url, text_features, **kwargs): """ Args: text_features: `dict[string, string]`, map from the name of the feature dict for each text field to the name of the column in the tsv file label_column: label_classes **kwargs: keyword arguments forwarded to super. """ super(XtremeConfig, self).__init__( version=nlp.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs ) self.text_features = text_features self.data_url = data_url self.citation = citation self.url = url class Xtreme(nlp.GeneratorBasedBuilder): """TODO(xtreme): Short description of my dataset.""" # TODO(xtreme): Set up version. VERSION = nlp.Version("0.1.0") BUILDER_CONFIGS = [ XtremeConfig( name=name, description=_DESCRIPTIONS[name.split(".")[0]], citation=_CITATIONS[name.split(".")[0]], text_features=_TEXT_FEATURES[name.split(".")[0]], data_url=_DATA_URLS[name.split(".")[0]], url=_URLS[name.split(".")[0]], ) for name in _NAMES ] @property def manual_download_instructions(self): if self.config.name.startswith("PAN-X"): return """\ You need to manually download the AmazonPhotos.zip file on Amazon Cloud Drive (https://www.amazon.com/clouddrive/share/d3KGCRCIYwhKJF0H3eWA26hjg2ZCRhjpEQtDL70FSBN). The folder containing the saved file can be used to load the dataset via `nlp.load_dataset("xtreme", data_dir="<path/to/folder>"). """ return None def _info(self): # TODO(xtreme): Specifies the nlp.DatasetInfo object features = {text_feature: nlp.Value("string") for text_feature in six.iterkeys(self.config.text_features)} if "answers" in features.keys(): features["answers"] = nlp.features.Sequence( {"answer_start": nlp.Value("int32"), "text": nlp.Value("string")} ) if self.config.name.startswith("PAWS-X"): features["label"] = nlp.Value("string") if self.config.name == "XNLI": features["gold_label"] = nlp.Value("string") return nlp.DatasetInfo( # This is the description that will appear on the datasets page. description=self.config.description + "\n" + _DESCRIPTION, # nlp.features.FeatureConnectors features=nlp.Features( features # These are the features of your dataset like images, labels ... ), # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=None, # Homepage of the dataset for documentation homepage="https://github.com/google-research/xtreme" + "\t" + self.config.url, citation=self.config.citation + "\n" + _CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # TODO(xtreme): Downloads the data and defines the splits # dl_manager is a nlp.download.DownloadManager that can be used to # download and extract URLs if self.config.name == "tydiqa": train_url = "v1.1/tydiqa-goldp-v1.1-train.json" dev_url = "v1.1/tydiqa-goldp-v1.1-dev.json" urls_to_download = { "train": os.path.join(self.config.data_url, train_url), "dev": os.path.join(self.config.data_url, dev_url), } dl_dir = dl_manager.download_and_extract(urls_to_download) return [ nlp.SplitGenerator( name=nlp.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": dl_dir["train"]}, ), nlp.SplitGenerator( name=nlp.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": dl_dir["dev"]}, ), ] if self.config.name == "XNLI": dl_dir = dl_manager.download_and_extract(self.config.data_url) data_dir = os.path.join(dl_dir, "XNLI-1.0") return [ nlp.SplitGenerator( name=nlp.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.test.tsv")} ), nlp.SplitGenerator( name=nlp.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.dev.tsv")} ), ] if self.config.name.startswith("MLQA"): mlqa_downloaded_files = dl_manager.download_and_extract(self.config.data_url) l1 = self.config.name.split(".")[1] l2 = self.config.name.split(".")[2] return [ nlp.SplitGenerator( name=nlp.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join( os.path.join(mlqa_downloaded_files, "MLQA_V1/test"), "test-context-{}-question-{}.json".format(l1, l2), ) }, ), nlp.SplitGenerator( name=nlp.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join( os.path.join(mlqa_downloaded_files, "MLQA_V1/dev"), "dev-context-{}-question-{}.json".format(l1, l2), ) }, ), ] if self.config.name.startswith("XQuAD"): lang = self.config.name.split(".")[1] xquad_downloaded_file = dl_manager.download_and_extract( os.path.join(self.config.data_url, "xquad.{}.json".format(lang)) ) return [ nlp.SplitGenerator( name=nlp.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": xquad_downloaded_file}, ), ] if self.config.name.startswith("PAWS-X"): lang = self.config.name.split(".")[1] paws_x_dir = dl_manager.download_and_extract(self.config.data_url) data_dir = os.path.join(paws_x_dir, "x-final", lang) return [ nlp.SplitGenerator( name=nlp.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": os.path.join(data_dir, "dev_2k.tsv")}, ), nlp.SplitGenerator( name=nlp.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": os.path.join(data_dir, "test_2k.tsv")}, ), nlp.SplitGenerator( name=nlp.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, "translated_train.tsv") if lang != "en" else os.path.join(data_dir, "train.tsv") }, ), ] elif self.config.name.startswith("tatoeba"): lang = self.config.name.split(".")[1] tatoeba_source_data = dl_manager.download_and_extract( os.path.join(self.config.data_url, "tatoeba.{}-eng.{}".format(lang, lang)) ) tatoeba_eng_data = dl_manager.download_and_extract( os.path.join(self.config.data_url, "tatoeba.{}-eng.eng".format(lang)) ) return [ nlp.SplitGenerator( name=nlp.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": (tatoeba_source_data, tatoeba_eng_data)}, ), ] if self.config.name.startswith("bucc18"): lang = self.config.name.split(".")[1] bucc18_dl_test_dir = dl_manager.download_and_extract( os.path.join(self.config.data_url, "bucc2018-{}-en.training-gold.tar.bz2".format(lang)) ) bucc18_dl_dev_dir = dl_manager.download_and_extract( os.path.join(self.config.data_url, "bucc2018-{}-en.sample-gold.tar.bz2".format(lang)) ) return [ nlp.SplitGenerator( name=nlp.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": os.path.join(bucc18_dl_dev_dir, "bucc2018", lang + "-en")}, ), nlp.SplitGenerator( name=nlp.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": os.path.join(bucc18_dl_test_dir, "bucc2018", lang + "-en")}, ), ] if self.config.name.startswith("udpos"): udpos_downloaded_files = dl_manager.download_and_extract(self.config.data_url) data_dir = os.path.join(udpos_downloaded_files, "ud-treebanks-v2.5") lang = self.config.name.split(".")[1] data_dir = os.path.join(data_dir, "*_" + lang + "*") folders = sorted(glob.glob(data_dir)) if lang == "Kazakh": return [ nlp.SplitGenerator( name=nlp.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": [ os.path.join(folder, file) for folder in folders for file in sorted(os.listdir(folder)) if "test" in file and file.endswith(".conllu") ] }, ), nlp.SplitGenerator( name=nlp.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": [ os.path.join(folder, file) for folder in folders for file in sorted(os.listdir(folder)) if "train" in file and file.endswith(".conllu") ] }, ), ] elif lang == "Tagalog" or lang == "Thai" or lang == "Yoruba": return [ nlp.SplitGenerator( name=nlp.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": [ os.path.join(folder, file) for folder in folders for file in sorted(os.listdir(folder)) if "test" in file and file.endswith(".conllu") ] }, ) ] else: return [ nlp.SplitGenerator( name=nlp.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": [ os.path.join(folder, file) for folder in folders for file in sorted(os.listdir(folder)) if "NYUAD" not in folder and "dev" in file and file.endswith(".conllu") ] # we exclude Arabic NYUAD which deos not contains any word, only _ }, ), nlp.SplitGenerator( name=nlp.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": [ os.path.join(folder, file) for folder in folders for file in sorted(os.listdir(folder)) if "NYUAD" not in folder and "test" in file and file.endswith(".conllu") ] }, ), nlp.SplitGenerator( name=nlp.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": [ os.path.join(folder, file) for folder in folders for file in sorted(os.listdir(folder)) if "NYUAD" not in folder and "train" in file and file.endswith(".conllu") ] }, ), ] if self.config.name == "SQuAD": urls_to_download = { "train": os.path.join(self.config.data_url, "train-v1.1.json"), "dev": os.path.join(self.config.data_url, "dev-v1.1.json"), } downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), ] if self.config.name.startswith("PAN-X"): path_to_manual_folder = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) panx_path = os.path.join(path_to_manual_folder, _PAN_X_FOLDER) if not os.path.exists(panx_path): raise FileNotFoundError( "{} does not exist. Make sure you insert a manual dir via `nlp.load_dataset('xtreme', data_dir=...)` that includes {}. Manual download instructions: {}".format( panx_path, _PAN_X_FOLDER, self.manual_download_instructions ) ) panx_dl_dir = dl_manager.extract(panx_path) lang = self.config.name.split(".")[1] lang_folder = dl_manager.extract(os.path.join(panx_dl_dir, lang + ".tar.gz")) return [ nlp.SplitGenerator( name=nlp.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(lang_folder, "dev") # we exclude Arabic NYUAD which deos not contains any word, only _ }, ), nlp.SplitGenerator( name=nlp.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": os.path.join(lang_folder, "test")}, ), nlp.SplitGenerator( name=nlp.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={"filepath": os.path.join(lang_folder, "train")}, ), ] def _generate_examples(self, filepath): """Yields examples.""" # TODO(xtreme): Yields (key, example) tuples from the dataset if self.config.name == "tydiqa" or self.config.name.startswith("MLQA") or self.config.name == "SQuAD": with open(filepath) as f: data = json.load(f) for article in data["data"]: title =
<gh_stars>0 # -*- coding: utf-8 -*- """ (c) 2014-2016 - Copyright Red Hat Inc Authors: <NAME> <<EMAIL>> """ # pylint: disable=too-few-public-methods # pylint: disable=no-init # pylint: disable=super-on-old-class from __future__ import unicode_literals, absolute_import import datetime import re import flask import flask_wtf as wtf try: from flask_wtf import FlaskForm except ImportError: from flask_wtf import Form as FlaskForm import six import wtforms import pagure.lib.query import pagure.validators from pagure.config import config as pagure_config from pagure.utils import urlpattern, is_admin STRICT_REGEX = "^[a-zA-Z0-9-_]+$" # This regex is used when creating tags, there we do not want to allow ',' # as otherwise it breaks the UI. TAGS_REGEX = "^[a-zA-Z0-9][a-zA-Z0-9-_ .:]+$" TAGS_REGEX_RE = re.compile(TAGS_REGEX) # In the issue page tags are sent as a comma-separated list, so in order to # allow having multiple tags in an issue, we need to allow ',' in them. TAGS_REGEX_MULTI = "^[a-zA-Z0-9][a-zA-Z0-9-_, .:]+$" FALSE_VALUES = ("false", "", False, "False", 0, "0") WTF_VERSION = tuple() if hasattr(wtf, "__version__"): WTF_VERSION = tuple(int(v) for v in wtf.__version__.split(".")) class PagureForm(FlaskForm): """ Local form allowing us to form set the time limit. """ def __init__(self, *args, **kwargs): delta = pagure_config.get("WTF_CSRF_TIME_LIMIT", 3600) if delta and WTF_VERSION < (0, 10, 0): self.TIME_LIMIT = datetime.timedelta(seconds=delta) else: self.TIME_LIMIT = delta if "csrf_enabled" in kwargs and kwargs["csrf_enabled"] is False: kwargs["meta"] = {"csrf": False} if WTF_VERSION >= (0, 14, 0): kwargs.pop("csrf_enabled") super(PagureForm, self).__init__(*args, **kwargs) def convert_value(val): """ Convert the provided values to strings when possible. """ if val: if not isinstance(val, (list, tuple, six.text_type)): return val.decode("utf-8") elif isinstance(val, six.string_types): return val class MultipleEmail(wtforms.validators.Email): """Split the value by comma and run them through the email validator of wtforms. """ def __call__(self, form, field): message = field.gettext("One or more invalid email address.") for data in field.data.split(","): data = data.strip() if not self.regex.match(data or ""): raise wtforms.validators.ValidationError(message) def user_namespace_if_private(form, field): """Check if the data in the field is the same as in the password field.""" if form.private.data: field.data = flask.g.fas_user.username def file_virus_validator(form, field): """Checks for virus in the file from flask request object, raises wtf.ValidationError if virus is found else None.""" if not pagure_config["VIRUS_SCAN_ATTACHMENTS"]: return from pyclamd import ClamdUnixSocket if ( field.name not in flask.request.files or flask.request.files[field.name].filename == "" ): # If no file was uploaded, this field is correct return uploaded = flask.request.files[field.name] clam = ClamdUnixSocket() if not clam.ping(): raise wtforms.ValidationError( "Unable to communicate with virus scanner" ) results = clam.scan_stream(uploaded.stream.read()) if results is None: uploaded.stream.seek(0) return else: result = results.values() res_type, res_msg = result if res_type == "FOUND": raise wtforms.ValidationError("Virus found: %s" % res_msg) else: raise wtforms.ValidationError("Error scanning uploaded file") def ssh_key_validator(form, field): """ Form for ssh key validation """ if not pagure.lib.query.are_valid_ssh_keys(field.data): raise wtforms.ValidationError("Invalid SSH keys") class ProjectFormSimplified(PagureForm): """ Form to edit the description of a project. """ description = wtforms.StringField( "Description", [wtforms.validators.DataRequired()], ) url = wtforms.StringField( "URL", [ wtforms.validators.optional(), wtforms.validators.Regexp(urlpattern, flags=re.IGNORECASE), ], ) avatar_email = wtforms.StringField( "Avatar email", [ pagure.validators.EmailValidator("avatar_email must be an email"), wtforms.validators.optional(), ], ) tags = wtforms.StringField( "Project tags", [wtforms.validators.optional(), wtforms.validators.Length(max=255)], ) private = wtforms.BooleanField( "Private", [wtforms.validators.Optional()], false_values=FALSE_VALUES ) mirrored_from = wtforms.StringField( "Mirrored from", [wtforms.validators.optional(), wtforms.validators.Length(max=255)], ) class ProjectForm(ProjectFormSimplified): """ Form to create or edit project. """ name = wtforms.StringField("Project name") mirrored_from = wtforms.StringField( "Mirror from URL", [ wtforms.validators.optional(), wtforms.validators.Regexp(urlpattern, flags=re.IGNORECASE), ], ) create_readme = wtforms.BooleanField( "Create README", [wtforms.validators.optional()], false_values=FALSE_VALUES, ) namespace = wtforms.SelectField( "Project Namespace", [user_namespace_if_private, wtforms.validators.optional()], choices=[], coerce=convert_value, ) ignore_existing_repos = wtforms.BooleanField( "Ignore existing repositories", [wtforms.validators.optional()], false_values=FALSE_VALUES, ) repospanner_region = wtforms.SelectField( "repoSpanner Region", [wtforms.validators.optional()], choices=( [("none", "Disabled")] + [ (region, region) for region in pagure_config["REPOSPANNER_REGIONS"].keys() ] ), coerce=convert_value, default=pagure_config["REPOSPANNER_NEW_REPO"], ) default_branch = wtforms.StringField( "Default branch", [wtforms.validators.optional()], ) def __init__(self, *args, **kwargs): """Calls the default constructor with the normal argument but uses the list of collection provided to fill the choices of the drop-down list. """ super(ProjectForm, self).__init__(*args, **kwargs) # set the name validator regex = pagure_config.get( "PROJECT_NAME_REGEX", "^[a-zA-z0-9_][a-zA-Z0-9-_.+]*$" ) self.name.validators = [ wtforms.validators.DataRequired(), wtforms.validators.Regexp(regex, flags=re.IGNORECASE), ] # Set the list of namespace if "namespaces" in kwargs: self.namespace.choices = [ (namespace, namespace) for namespace in kwargs["namespaces"] ] if not pagure_config.get("USER_NAMESPACE", False): self.namespace.choices.insert(0, ("", "")) if not ( is_admin() and pagure_config.get("ALLOW_ADMIN_IGNORE_EXISTING_REPOS") ) and ( flask.g.fas_user.username not in pagure_config["USERS_IGNORE_EXISTING_REPOS"] ): self.ignore_existing_repos = None if not ( is_admin() and pagure_config.get("REPOSPANNER_NEW_REPO_ADMIN_OVERRIDE") ): self.repospanner_region = None class IssueFormSimplied(PagureForm): """ Form to create or edit an issue. """ title = wtforms.StringField( "Title", [wtforms.validators.DataRequired()], ) issue_content = wtforms.TextAreaField( "Content", [wtforms.validators.DataRequired()], ) private = wtforms.BooleanField( "Private", [wtforms.validators.optional()], false_values=FALSE_VALUES ) milestone = wtforms.SelectField( "Milestone", [wtforms.validators.Optional()], choices=[], coerce=convert_value, ) priority = wtforms.SelectField( "Priority", [wtforms.validators.Optional()], choices=[], coerce=convert_value, ) def __init__(self, *args, **kwargs): """Calls the default constructor with the normal argument but uses the list of collection provided to fill the choices of the drop-down list. """ super(IssueFormSimplied, self).__init__(*args, **kwargs) self.priority.choices = [] if "priorities" in kwargs: for key in sorted(kwargs["priorities"]): self.priority.choices.append((key, kwargs["priorities"][key])) self.milestone.choices = [] if "milestones" in kwargs and kwargs["milestones"]: for key in kwargs["milestones"]: self.milestone.choices.append((key, key)) self.milestone.choices.insert(0, ("", "")) class IssueForm(IssueFormSimplied): """ Form to create or edit an issue. """ status = wtforms.SelectField( "Status", [wtforms.validators.DataRequired()], choices=[] ) def __init__(self, *args, **kwargs): """Calls the default constructor with the normal argument but uses the list of collection provided to fill the choices of the drop-down list. """ super(IssueForm, self).__init__(*args, **kwargs) if "status" in kwargs: self.status.choices = [ (status, status) for status in kwargs["status"] ] class RequestPullForm(PagureForm): """ Form to create a pull request. """ title = wtforms.StringField( "Title", [wtforms.validators.DataRequired()], ) initial_comment = wtforms.TextAreaField( "Initial Comment", [wtforms.validators.Optional()] ) allow_rebase = wtforms.BooleanField( "Allow rebasing", [wtforms.validators.Optional()], false_values=FALSE_VALUES, ) class RequestPullEditForm(RequestPullForm): """ Form to edit a pull request. """ branch_to = wtforms.SelectField( "Target branch", [wtforms.validators.Required()], choices=[], coerce=convert_value, ) def __init__(self, *args, **kwargs): """Calls the default constructor with the normal argument but uses the list of collection provided to fill the choices of the drop-down list. """ super(RequestPullEditForm, self).__init__(*args, **kwargs) if "branches" in kwargs: self.branch_to.choices = [ (branch, branch) for branch in kwargs["branches"] ] class RemoteRequestPullForm(RequestPullForm): """ Form to create a remote pull request. """ git_repo = wtforms.StringField( "Git repo address", [ wtforms.validators.DataRequired(), wtforms.validators.Regexp(urlpattern, flags=re.IGNORECASE), ], ) branch_from = wtforms.StringField( "Git branch", [wtforms.validators.DataRequired()], ) branch_to = wtforms.StringField( "Git branch to merge in", [wtforms.validators.DataRequired()], ) class DeleteIssueTagForm(PagureForm): """ Form to remove a tag to from a project. """ tag = wtforms.StringField( "Tag", [ wtforms.validators.Optional(), wtforms.validators.Regexp(TAGS_REGEX, flags=re.IGNORECASE), wtforms.validators.Length(max=255), ], ) class AddIssueTagForm(DeleteIssueTagForm): """ Form to add a tag to a project. """ tag_description = wtforms.StringField( "Tag Description", [wtforms.validators.Optional()] ) tag_color = wtforms.StringField( "Tag Color", [wtforms.validators.DataRequired()] ) class ApiAddIssueTagForm(PagureForm): """ Form to add a tag to a project from the API endpoint """ tag = wtforms.StringField( "Tag", [ wtforms.validators.DataRequired(), wtforms.validators.Regexp(TAGS_REGEX, flags=re.IGNORECASE), wtforms.validators.Length(max=255), ], ) tag_description = wtforms.StringField( "Tag Description", [wtforms.validators.Optional()] ) tag_color = wtforms.StringField( "Tag Color", [wtforms.validators.DataRequired()] ) class StatusForm(PagureForm): """ Form to add/change the status of an issue. """ status = wtforms.SelectField( "Status", [wtforms.validators.DataRequired()], choices=[] ) close_status = wtforms.SelectField( "Closed as", [wtforms.validators.Optional()], choices=[] ) def __init__(self, *args, **kwargs): """Calls the default constructor with the normal argument but uses the list of collection provided to fill the choices of the drop-down list. """ super(StatusForm, self).__init__(*args, **kwargs) if "status" in kwargs: self.status.choices = [ (status, status) for status in kwargs["status"] ] self.close_status.choices = [] if "close_status" in kwargs: for key in sorted(kwargs["close_status"]): self.close_status.choices.append((key, key)) self.close_status.choices.insert(0, ("", "")) class MilestoneForm(PagureForm): """ Form to change the milestone of an issue. """ milestone = wtforms.SelectField( "Milestone", [wtforms.validators.Optional()], choices=[], coerce=convert_value, ) def __init__(self, *args, **kwargs): """Calls the default constructor with the normal argument but uses the list of collection provided to fill the choices of the drop-down list. """ super(MilestoneForm, self).__init__(*args, **kwargs) self.milestone.choices = [] if "milestones" in kwargs and kwargs["milestones"]: for key in kwargs["milestones"]: self.milestone.choices.append((key, key)) self.milestone.choices.insert(0, ("", "")) class NewTokenForm(PagureForm): """ Form to add a new token. """ description = wtforms.StringField( "description", [wtforms.validators.Optional()] ) expiration_date = wtforms.DateField( "expiration date", [wtforms.validators.DataRequired()], default=datetime.date.today() + datetime.timedelta(days=(30 * 6)), ) acls = wtforms.SelectMultipleField( "ACLs", [wtforms.validators.DataRequired()], choices=[] ) def __init__(self, *args, **kwargs): """Calls the default constructor with the normal argument but uses the list of collection provided to fill the choices of the drop-down list. """ super(NewTokenForm, self).__init__(*args, **kwargs) if "acls" in kwargs: self.acls.choices = [ (acl.name, acl.name) for acl in kwargs["acls"] ] if "sacls" in kwargs: self.acls.choices = [(acl, acl) for acl in
<gh_stars>1-10 #!/usr/bin/env python """ @package ion_functions.data.ph_functions @file ion_functions/data/ph_functions.py @author <NAME> @brief Module containing pH family instrument related functions """ # imports import numpy as np import numexpr as ne import scipy as sp # functions to extract L0 parameters from SAMI-II pH instruments (PHSEN) def ph_434_intensity(light): """ Function to extract the signal intensity at 434 nm (PH434SI_L0) from the pH instrument light measurements. Coded to accept either a single record or an array of records. """ light = np.atleast_3d(light).astype(np.float) new = np.reshape(light, (-1, 23, 4)) si434 = new[:, :, 1] return si434 # signal intensity, 434 nm (PH434SI_L0) def ph_578_intensity(light): """ Function to extract the signal intensity at 578 nm (PH578SI_L0) from the pH instrument light measurements. Coded to accept either a single record or an array of records. """ light = np.atleast_3d(light).astype(np.float) new = np.reshape(light, (-1, 23, 4)) si578 = new[:, :, 3] return si578 # signal intensity, 578 nm (PH578SI_L0) # functions to convert thermistor and battery measurements from counts to # applicable engineering units def ph_thermistor(traw): """ Function to convert the thermistor data (ABSTHRM_L0) from counts to degrees Centigrade for the pH instrument. """ # convert raw thermistor readings from counts to degrees Centigrade Rt = ne.evaluate('(traw / (4096.0 - traw)) * 17400.0') lRt = np.log(Rt) InvT = ne.evaluate('0.0010183 + 0.000241 * lRt + 0.00000015 * lRt**3') therm = ne.evaluate('(1.0 / InvT) - 273.15') return therm def ph_battery(braw): """ Function to convert the battery voltage from counts to Volts from the pH instrument. """ # convert raw battery readings from counts to Volts volts = ne.evaluate('braw * 15. / 4096.') return volts # function to calculate the PHWATER_L2 data product def ph_calc_phwater(ref, light, therm, ea434, eb434, ea578, eb578, ind_slp, ind_off, psal=35.0): """ Description: OOI Level 2 pH of seawater core data product, which is calculated using data from the Sunburst SAMI-II pH instrument (PHSEN). This document is intended to be used by OOI programmers to construct appropriate processes to create the L2 pH of seawater core data product. Implemented by: 2013-04-19: <NAME>. Initial code. Usage: ph = ph_calc_phwater(ref, light, therm, ea434, eb434, ea578, eb578, psal=35.0, ind=1) where ph = measured pH of seawater [unitless] ref = raw signal and reference measurements during blank cycle [counts] light = raw signal and reference measurements during measurement cycle [counts] therm = thermistor reading at end of measurement cycle [deg_C] ea434 = mCP molar absorptivities provided by vendor, specific to a reagent bag with a defined shelflife. eb434 = mCP molar absorptivities as above ea578 = mCP molar absorptivities as above eb578 = mCP molar absorptivities as above ind_slp = indicator impurity slope correction factor [unitless] ind_off = indicator impurity offset correction factor [unitless] psal = practical salinity estimate used in calculcations from a co-located CTD, default is 35.0 if CTD data is unavailable [unitless] References: OOI (2014). Data Product Specification for pH of Seawater. Document Control Number 1341-00510. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-00510_Data_Product_SPEC_PHWATER_OOI.pdf) """ # reformat all input values to arrays of the correct dimensions, shape, and # type, recording the number of input records. ref = (np.atleast_2d(ref)).astype(np.float) nRec = ref.shape[0] light = np.atleast_3d(light).astype(np.float) light = np.reshape(light, (nRec, 23, 4)) therm = np.reshape(therm, (nRec, 1)).astype(np.float) ea434 = np.reshape(ea434, (nRec, 1)).astype(np.float) eb434 = np.reshape(eb434, (nRec, 1)).astype(np.float) ea578 = np.reshape(ea578, (nRec, 1)).astype(np.float) eb578 = np.reshape(eb578, (nRec, 1)).astype(np.float) if np.isscalar(ind_slp) is True: ind_slp = np.tile(ind_slp, (nRec, 1)).astype(np.float) else: ind_slp = np.reshape(ind_slp, (nRec, 1)).astype(np.float) if np.isscalar(ind_off) is True: ind_off = np.tile(ind_off, (nRec, 1)).astype(np.float) else: ind_off = np.reshape(ind_off, (nRec, 1)).astype(np.float) if np.isscalar(psal) is True: psal = np.tile(psal, (nRec, 1)).astype(np.float) else: psal = np.reshape(psal, (nRec, 1)).astype(np.float) # Calculate blanks from the 16 sets of reference light measurements arr434 = np.array([ (ref[:, 1] / ref[:, 0]), (ref[:, 5] / ref[:, 4]), (ref[:, 9] / ref[:, 8]), (ref[:, 13] / ref[:, 12]), ]) blank434 = np.reshape(np.mean(arr434, axis=0), (nRec, 1)) arr578 = np.array([ (ref[:, 3] / ref[:, 2]), (ref[:, 7] / ref[:, 6]), (ref[:, 11] / ref[:, 10]), (ref[:, 15] / ref[:, 14]), ]) blank578 = np.reshape(np.mean(arr578, axis=0), (nRec, 1)) # Extract 23 sets of 4 light measurements into arrays corresponding to the # raw reference and signal measurements at 434 and 578 nm. Input is an # array of length 92 (23 sets * 4 measurements per set). Can reshape and # slice to extract the parameters. ref434 = light[:, :, 0] # reference signal, 434 nm int434 = light[:, :, 1] # signal intensity, 434 nm (PH434SI_L0) ref578 = light[:, :, 2] # reference signal, 578 nm int578 = light[:, :, 3] # signal intensity, 578 nm (PH578SI_L0) # Absorbance A434 = -sp.log10(int434 / ref434) A434blank = -sp.log10(blank434) abs434 = A434 - A434blank A578 = -sp.log10(int578 / ref578) A578blank = -sp.log10(blank578) abs578 = A578 - A578blank R = abs578 / abs434 # pka from Clayton and Byrne, 1993 pKa = (1245.69 / (therm + 273.15)) + 3.8275 + (0.0021 * (35. - psal)) pKa = np.reshape(pKa, (-1, 1)) # Molar absorptivities Ea434 = ea434 - (26. * (therm - 24.788)) Ea578 = ea578 + (therm - 24.788) Eb434 = eb434 + (12. * (therm - 24.788)) Eb578 = eb578 - (71. * (therm - 24.788)) e1 = Ea578 / Ea434 e2 = Eb578 / Ea434 e3 = Eb434 / Ea434 V1 = R - e1 V2 = e2 - R * e3 # indicator concentration calculations HI = (abs434 * Eb578 - abs578 * Eb434) / (Ea434 * Eb578 - Eb434 * Ea578) I = (abs578 * Ea434 - abs434 * Ea578) / (Ea434 * Eb578 - Eb434 * Ea578) IndConc = HI + I pointph = np.real(pKa + sp.log10(V1 / V2)) # ************************ Initial pH Calcs ************************ # determine the most linear region of points for pH of seawater # calculation, skipping the first 5 points. IndConca = IndConc[:, 5:] Y = pointph[:, 5:] X = np.linspace(1, 18, 18) # create arrays for vectorized computations used in sum of squares below. # reflows 1D and 2D arrays into 2D and 3D arrays, respectively, shifting # each "row" of the arrays by one value, allowing the sum of square # calculations to be computed in a vectorized fashion, replacing the for # loop that had the computations running on 1:8, 2:9, ... 11:18. step = 7 # number of points to use count = step + 1 nPts = np.size(X) - step x = np.zeros((nPts, count)) y = np.zeros((nRec, nPts, count)) for i in range(nPts): x[i, :] = X[i:i+count] for j in range(nRec): y[j, i, :] = Y[j, i:i+count] # compute the range of best fitting points, using array multiplications to # determine the best fit via the correlation coefficient. sumx = np.sum(x, axis=1) sumy = np.sum(y, axis=2) sumxy = np.sum(x * y, axis=2) sumx2 = np.sum(x**2, axis=1) sumy2 = np.sum(y**2, axis=2) sumxx = sumx * sumx sumyy = sumy * sumy ssxy = sumxy - (sumx * sumy) / count ssx = sumx2 - (sumxx / count) ssy = sumy2 - (sumyy / count) r2 = ssxy**2 / (ssx * ssy) # Range of seawater points to use cutoff1 = np.argmax(r2, axis=1) # Find the first, best R-squared value cutoff2 = cutoff1 + count # Indicator and pH range limited to best points IndConcS = np.zeros((nRec, count)) pointphS = np.zeros((nRec, count)) for i in range(nRec): IndConcS[i, :] = IndConca[i, cutoff1[i]:cutoff2[i]] pointphS[i, :] = Y[i, cutoff1[i]:cutoff2[i]] # ************************* Final pH Calcs ************************* sumx = np.sum(IndConcS, axis=1) sumy = np.sum(pointphS, axis=1) sumxy = np.sum(pointphS * IndConcS, axis=1) sumx2 = np.sum(IndConcS**2, axis=1) sumy2 = np.sum(pointphS**2, axis=1) xbar = np.mean(IndConcS, axis=1) ybar = np.mean(pointphS, axis=1) sumxx = sumx * sumx sumyy = sumy * sumy ssxy = sumxy - (sumx * sumy) / count ssx = sumx2 - (sumxx / count) ssy = sumy2 - (sumyy / count) slope = ssxy / ssx ph = ybar - slope *
* cos(q2) * N.y + sin(q2) * N.z ) assert express(B.z, N) == ( sin(q1) * sin(q2) * N.x - sin(q2) * cos(q1) * N.y + cos(q2) * N.z ) assert express(B.x, A) == A.x assert express(B.y, A) == (cos(q2) * A.y + sin(q2) * A.z) assert express(B.z, A) == (-sin(q2) * A.y + cos(q2) * A.z) assert express(B.x, B) == B.x assert express(B.y, B) == B.y assert express(B.z, B) == B.z assert express(B.x, C) == (cos(q3) * C.x + sin(q3) * C.z) assert express(B.y, C) == C.y assert express(B.z, C) == (-sin(q3) * C.x + cos(q3) * C.z) assert express(C.x, N) == ( (cos(q1) * cos(q3) - sin(q1) * sin(q2) * sin(q3)) * N.x + (sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1)) * N.y - sin(q3) * cos(q2) * N.z ) assert express(C.y, N) == ( -sin(q1) * cos(q2) * N.x + cos(q1) * cos(q2) * N.y + sin(q2) * N.z ) assert express(C.z, N) == ( (sin(q3) * cos(q1) + sin(q1) * sin(q2) * cos(q3)) * N.x + (sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)) * N.y + cos(q2) * cos(q3) * N.z ) assert express(C.x, A) == ( cos(q3) * A.x + sin(q2) * sin(q3) * A.y - sin(q3) * cos(q2) * A.z ) assert express(C.y, A) == (cos(q2) * A.y + sin(q2) * A.z) assert express(C.z, A) == ( sin(q3) * A.x - sin(q2) * cos(q3) * A.y + cos(q2) * cos(q3) * A.z ) assert express(C.x, B) == (cos(q3) * B.x - sin(q3) * B.z) assert express(C.y, B) == B.y assert express(C.z, B) == (sin(q3) * B.x + cos(q3) * B.z) assert express(C.x, C) == C.x assert express(C.y, C) == C.y assert express(C.z, C) == C.z == (C.z) # Check to make sure Vectors get converted back to UnitVectors assert N.x == express((cos(q1) * A.x - sin(q1) * A.y), N) assert N.y == express((sin(q1) * A.x + cos(q1) * A.y), N) assert N.x == express( (cos(q1) * B.x - sin(q1) * cos(q2) * B.y + sin(q1) * sin(q2) * B.z), N ) assert N.y == express( (sin(q1) * B.x + cos(q1) * cos(q2) * B.y - sin(q2) * cos(q1) * B.z), N ) assert N.z == express((sin(q2) * B.y + cos(q2) * B.z), N) """ These don't really test our code, they instead test the auto simplification (or lack thereof) of SymPy. assert N.x == express(( (cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*C.x - sin(q1)*cos(q2)*C.y + (sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*C.z), N) assert N.y == express(( (sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C.x + cos(q1)*cos(q2)*C.y + (sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C.z), N) assert N.z == express((-sin(q3)*cos(q2)*C.x + sin(q2)*C.y + cos(q2)*cos(q3)*C.z), N) """ assert A.x == express((cos(q1) * N.x + sin(q1) * N.y), A) assert A.y == express((-sin(q1) * N.x + cos(q1) * N.y), A) assert A.y == express((cos(q2) * B.y - sin(q2) * B.z), A) assert A.z == express((sin(q2) * B.y + cos(q2) * B.z), A) assert A.x == express((cos(q3) * C.x + sin(q3) * C.z), A) # Tripsimp messes up here too. # print express((sin(q2)*sin(q3)*C.x + cos(q2)*C.y - # sin(q2)*cos(q3)*C.z), A) assert A.y == express( (sin(q2) * sin(q3) * C.x + cos(q2) * C.y - sin(q2) * cos(q3) * C.z), A ) assert A.z == express( (-sin(q3) * cos(q2) * C.x + sin(q2) * C.y + cos(q2) * cos(q3) * C.z), A ) assert B.x == express((cos(q1) * N.x + sin(q1) * N.y), B) assert B.y == express( (-sin(q1) * cos(q2) * N.x + cos(q1) * cos(q2) * N.y + sin(q2) * N.z), B ) assert B.z == express( (sin(q1) * sin(q2) * N.x - sin(q2) * cos(q1) * N.y + cos(q2) * N.z), B ) assert B.y == express((cos(q2) * A.y + sin(q2) * A.z), B) assert B.z == express((-sin(q2) * A.y + cos(q2) * A.z), B) assert B.x == express((cos(q3) * C.x + sin(q3) * C.z), B) assert B.z == express((-sin(q3) * C.x + cos(q3) * C.z), B) """ assert C.x == express(( (cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*N.x + (sin(q1)*cos(q3)+sin(q2)*sin(q3)*cos(q1))*N.y - sin(q3)*cos(q2)*N.z), C) assert C.y == express(( -sin(q1)*cos(q2)*N.x + cos(q1)*cos(q2)*N.y + sin(q2)*N.z), C) assert C.z == express(( (sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*N.x + (sin(q1)*sin(q3)-sin(q2)*cos(q1)*cos(q3))*N.y + cos(q2)*cos(q3)*N.z), C) """ assert C.x == express( (cos(q3) * A.x + sin(q2) * sin(q3) * A.y - sin(q3) * cos(q2) * A.z), C ) assert C.y == express((cos(q2) * A.y + sin(q2) * A.z), C) assert C.z == express( (sin(q3) * A.x - sin(q2) * cos(q3) * A.y + cos(q2) * cos(q3) * A.z), C ) assert C.x == express((cos(q3) * B.x - sin(q3) * B.z), C) assert C.z == express((sin(q3) * B.x + cos(q3) * B.z), C) def test_time_derivative(): # The use of time_derivative for calculations pertaining to scalar # fields has been tested in test_coordinate_vars in test_essential.py A = ReferenceFrame("A") q = dynamicsymbols("q") qd = dynamicsymbols("q", 1) B = A.orientnew("B", "Axis", [q, A.z]) d = A.x | A.x assert time_derivative(d, B) == (-qd) * (A.y | A.x) + (-qd) * (A.x | A.y) d1 = A.x | B.y assert time_derivative(d1, A) == -qd * (A.x | B.x) assert time_derivative(d1, B) == -qd * (A.y | B.y) d2 = A.x | B.x assert time_derivative(d2, A) == qd * (A.x | B.y) assert time_derivative(d2, B) == -qd * (A.y | B.x) d3 = A.x | B.z assert time_derivative(d3, A) == 0 assert time_derivative(d3, B) == -qd * (A.y | B.z) q1, q2, q3, q4 = dynamicsymbols("q1 q2 q3 q4") q1d, q2d, q3d, q4d = dynamicsymbols("q1 q2 q3 q4", 1) q1dd, q2dd, q3dd, q4dd = dynamicsymbols("q1 q2 q3 q4", 2) C = B.orientnew("C", "Axis", [q4, B.x]) v1 = q1 * A.z v2 = q2 * A.x + q3 * B.y v3 = q1 * A.x + q2 * A.y + q3 * A.z assert time_derivative(B.x, C) == 0 assert time_derivative(B.y, C) == -q4d * B.z assert time_derivative(B.z, C) == q4d * B.y assert time_derivative(v1, B) == q1d * A.z assert ( time_derivative(v1, C) == -q1 * sin(q) * q4d * A.x + q1 * cos(q) * q4d * A.y + q1d * A.z ) assert time_derivative(v2, A) == q2d * A.x - q3 * qd * B.x + q3d * B.y assert ( time_derivative(v2, C) == q2d * A.x - q2 * qd * A.y + q2 * sin(q) * q4d * A.z + q3d * B.y - q3 * q4d * B.z ) assert ( time_derivative(v3, B) == (q2 * qd + q1d) * A.x + (-q1 * qd + q2d) * A.y + q3d * A.z ) assert time_derivative(d, C) == -qd * (A.y | A.x) + sin(q) * q4d * ( A.z | A.x ) - qd * (A.x | A.y) + sin(q) * q4d * (A.x | A.z) raises(ValueError, lambda: time_derivative(B.x, C, order=0.5)) raises(ValueError, lambda: time_derivative(B.x, C, order=-1)) def test_get_motion_methods(): # Initialization t = dynamicsymbols._t s1, s2, s3 = symbols("s1 s2 s3") S1, S2, S3 = symbols("S1 S2 S3") S4, S5, S6 = symbols("S4 S5 S6") t1, t2 = symbols("t1 t2") a, b, c = dynamicsymbols("a b c") ad, bd, cd = dynamicsymbols("a b c", 1) a2d, b2d, c2d = dynamicsymbols("a b c", 2) v0 = S1 * N.x + S2 * N.y + S3 * N.z v01 = S4 * N.x + S5 * N.y + S6 * N.z v1 = s1 * N.x + s2 * N.y + s3 * N.z v2 = a * N.x + b * N.y + c * N.z v2d = ad * N.x + bd * N.y + cd * N.z v2dd = a2d * N.x + b2d * N.y + c2d * N.z # Test position parameter assert get_motion_params(frame=N) == (0, 0, 0) assert get_motion_params(N, position=v1) == (0, 0, v1) assert get_motion_params(N, position=v2) == (v2dd, v2d, v2) # Test velocity parameter assert get_motion_params(N, velocity=v1) == (0, v1, v1 * t) assert get_motion_params(N, velocity=v1, position=v0, timevalue1=t1) == ( 0, v1, v0 + v1 * (t - t1), )
None self.resourceUuid = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIDELETEALIYUNSNAPSHOTFROMLOCALMSG_FULL_NAME = 'org.zstack.header.aliyun.storage.snapshot.APIDeleteAliyunSnapshotFromLocalMsg' class APIDeleteAliyunSnapshotFromLocalMsg(object): FULL_NAME='org.zstack.header.aliyun.storage.snapshot.APIDeleteAliyunSnapshotFromLocalMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.deleteMode = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIDELETEALIYUNSNAPSHOTFROMREMOTEMSG_FULL_NAME = 'org.zstack.header.aliyun.storage.snapshot.APIDeleteAliyunSnapshotFromRemoteMsg' class APIDeleteAliyunSnapshotFromRemoteMsg(object): FULL_NAME='org.zstack.header.aliyun.storage.snapshot.APIDeleteAliyunSnapshotFromRemoteMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.deleteMode = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIGCALIYUNSNAPSHOTREMOTEMSG_FULL_NAME = 'org.zstack.header.aliyun.storage.snapshot.APIGCAliyunSnapshotRemoteMsg' class APIGCAliyunSnapshotRemoteMsg(object): FULL_NAME='org.zstack.header.aliyun.storage.snapshot.APIGCAliyunSnapshotRemoteMsg' def __init__(self): #mandatory field self.dataCenterUuid = NotNoneField() self.deleteMode = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYALIYUNSNAPSHOTFROMLOCALMSG_FULL_NAME = 'org.zstack.header.aliyun.storage.snapshot.APIQueryAliyunSnapshotFromLocalMsg' class APIQueryAliyunSnapshotFromLocalMsg(object): FULL_NAME='org.zstack.header.aliyun.storage.snapshot.APIQueryAliyunSnapshotFromLocalMsg' def __init__(self): #mandatory field self.conditions = NotNoneList() self.limit = None self.start = None self.count = None self.groupBy = None self.replyWithCount = None self.sortBy = None #valid values: [asc, desc] self.sortDirection = None self.fields = OptionalList() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYALIYUNSNAPSHOTFROMLOCALREPLY_FULL_NAME = 'org.zstack.header.aliyun.storage.snapshot.APIQueryAliyunSnapshotFromLocalReply' class APIQueryAliyunSnapshotFromLocalReply(object): FULL_NAME='org.zstack.header.aliyun.storage.snapshot.APIQueryAliyunSnapshotFromLocalReply' def __init__(self): self.inventories = OptionalList() self.total = None self.success = None self.error = None APISYNCALIYUNSNAPSHOTREMOTEMSG_FULL_NAME = 'org.zstack.header.aliyun.storage.snapshot.APISyncAliyunSnapshotRemoteMsg' class APISyncAliyunSnapshotRemoteMsg(object): FULL_NAME='org.zstack.header.aliyun.storage.snapshot.APISyncAliyunSnapshotRemoteMsg' def __init__(self): #mandatory field self.dataCenterUuid = NotNoneField() self.snapshotId = None self.resourceUuid = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIUPDATEALIYUNSNAPSHOTMSG_FULL_NAME = 'org.zstack.header.aliyun.storage.snapshot.APIUpdateAliyunSnapshotMsg' class APIUpdateAliyunSnapshotMsg(object): FULL_NAME='org.zstack.header.aliyun.storage.snapshot.APIUpdateAliyunSnapshotMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() #valid regex values: ^[\u4e00-\u9fa5a-zA-Z][\u4e00-\u9fa5_a-zA-Z0-9.-]+$ self.name = None self.description = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIGETCPUMEMORYCAPACITYMSG_FULL_NAME = 'org.zstack.header.allocator.APIGetCpuMemoryCapacityMsg' class APIGetCpuMemoryCapacityMsg(object): FULL_NAME='org.zstack.header.allocator.APIGetCpuMemoryCapacityMsg' def __init__(self): self.zoneUuids = OptionalList() self.clusterUuids = OptionalList() self.hostUuids = OptionalList() self.all = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIGETCPUMEMORYCAPACITYREPLY_FULL_NAME = 'org.zstack.header.allocator.APIGetCpuMemoryCapacityReply' class APIGetCpuMemoryCapacityReply(object): FULL_NAME='org.zstack.header.allocator.APIGetCpuMemoryCapacityReply' def __init__(self): self.totalCpu = None self.availableCpu = None self.totalMemory = None self.availableMemory = None self.success = None self.error = None APIGETHOSTALLOCATORSTRATEGIESMSG_FULL_NAME = 'org.zstack.header.allocator.APIGetHostAllocatorStrategiesMsg' class APIGetHostAllocatorStrategiesMsg(object): FULL_NAME='org.zstack.header.allocator.APIGetHostAllocatorStrategiesMsg' def __init__(self): self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIGETHOSTALLOCATORSTRATEGIESREPLY_FULL_NAME = 'org.zstack.header.allocator.APIGetHostAllocatorStrategiesReply' class APIGetHostAllocatorStrategiesReply(object): FULL_NAME='org.zstack.header.allocator.APIGetHostAllocatorStrategiesReply' def __init__(self): self.hostAllocatorStrategies = OptionalList() self.success = None self.error = None APIISREADYTOGOMSG_FULL_NAME = 'org.zstack.header.apimediator.APIIsReadyToGoMsg' class APIIsReadyToGoMsg(object): FULL_NAME='org.zstack.header.apimediator.APIIsReadyToGoMsg' def __init__(self): self.managementNodeId = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIISREADYTOGOREPLY_FULL_NAME = 'org.zstack.header.apimediator.APIIsReadyToGoReply' class APIIsReadyToGoReply(object): FULL_NAME='org.zstack.header.apimediator.APIIsReadyToGoReply' def __init__(self): self.managementNodeId = None self.success = None self.error = None APIREQUESTBAREMETALCONSOLEACCESSMSG_FULL_NAME = 'org.zstack.header.baremetal.console.APIRequestBaremetalConsoleAccessMsg' class APIRequestBaremetalConsoleAccessMsg(object): FULL_NAME='org.zstack.header.baremetal.console.APIRequestBaremetalConsoleAccessMsg' def __init__(self): #mandatory field self.chassisUuid = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYBAREMETALHARDWAREINFOMSG_FULL_NAME = 'org.zstack.header.baremetal.hardwareinfo.APIQueryBaremetalHardwareInfoMsg' class APIQueryBaremetalHardwareInfoMsg(object): FULL_NAME='org.zstack.header.baremetal.hardwareinfo.APIQueryBaremetalHardwareInfoMsg' def __init__(self): #mandatory field self.conditions = NotNoneList() self.limit = None self.start = None self.count = None self.groupBy = None self.replyWithCount = None self.sortBy = None #valid values: [asc, desc] self.sortDirection = None self.fields = OptionalList() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYBAREMETALHARDWAREINFOREPLY_FULL_NAME = 'org.zstack.header.baremetal.hardwareinfo.APIQueryBaremetalHardwareInfoReply' class APIQueryBaremetalHardwareInfoReply(object): FULL_NAME='org.zstack.header.baremetal.hardwareinfo.APIQueryBaremetalHardwareInfoReply' def __init__(self): self.inventories = OptionalList() self.total = None self.success = None self.error = None APICREATEBAREMETALHOSTCFGMSG_FULL_NAME = 'org.zstack.header.baremetal.host.APICreateBaremetalHostCfgMsg' class APICreateBaremetalHostCfgMsg(object): FULL_NAME='org.zstack.header.baremetal.host.APICreateBaremetalHostCfgMsg' def __init__(self): #mandatory field self.chassisUuid = NotNoneField() self.password = <PASSWORD> self.vnc = None self.unattended = None self.cloneIso = None #mandatory field self.cfgItems = NotNoneMap() self.resourceUuid = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIDELETEBAREMETALHOSTCFGMSG_FULL_NAME = 'org.zstack.header.baremetal.host.APIDeleteBaremetalHostCfgMsg' class APIDeleteBaremetalHostCfgMsg(object): FULL_NAME='org.zstack.header.baremetal.host.APIDeleteBaremetalHostCfgMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.deleteMode = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYBAREMETALHOSTCFGMSG_FULL_NAME = 'org.zstack.header.baremetal.host.APIQueryBaremetalHostCfgMsg' class APIQueryBaremetalHostCfgMsg(object): FULL_NAME='org.zstack.header.baremetal.host.APIQueryBaremetalHostCfgMsg' def __init__(self): #mandatory field self.conditions = NotNoneList() self.limit = None self.start = None self.count = None self.groupBy = None self.replyWithCount = None self.sortBy = None #valid values: [asc, desc] self.sortDirection = None self.fields = OptionalList() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYBAREMETALHOSTCFGREPLY_FULL_NAME = 'org.zstack.header.baremetal.host.APIQueryBaremetalHostCfgReply' class APIQueryBaremetalHostCfgReply(object): FULL_NAME='org.zstack.header.baremetal.host.APIQueryBaremetalHostCfgReply' def __init__(self): self.inventories = OptionalList() self.total = None self.success = None self.error = None APICREATEBAREMETALCHASSISMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APICreateBaremetalChassisMsg' class APICreateBaremetalChassisMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APICreateBaremetalChassisMsg' def __init__(self): #mandatory field self.name = NotNoneField() self.description = None #mandatory field self.ipmiAddress = NotNoneField() self.ipmiPort = None #mandatory field self.ipmiUsername = NotNoneField() #mandatory field self.ipmiPassword = NotNoneField() self.resourceUuid = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIDELETEBAREMETALCHASSISMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APIDeleteBaremetalChassisMsg' class APIDeleteBaremetalChassisMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APIDeleteBaremetalChassisMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.deleteMode = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIPOWEROFFBAREMETALHOSTMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APIPowerOffBaremetalHostMsg' class APIPowerOffBaremetalHostMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APIPowerOffBaremetalHostMsg' def __init__(self): #mandatory field self.chassisUuid = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIPOWERONBAREMETALHOSTMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APIPowerOnBaremetalHostMsg' class APIPowerOnBaremetalHostMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APIPowerOnBaremetalHostMsg' def __init__(self): #mandatory field self.chassisUuid = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIPOWERRESETBAREMETALHOSTMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APIPowerResetBaremetalHostMsg' class APIPowerResetBaremetalHostMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APIPowerResetBaremetalHostMsg' def __init__(self): #mandatory field self.chassisUuid = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIPOWERSTATUSBAREMETALHOSTMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APIPowerStatusBaremetalHostMsg' class APIPowerStatusBaremetalHostMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APIPowerStatusBaremetalHostMsg' def __init__(self): #mandatory field self.chassisUuid = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIPROVISIONBAREMETALHOSTMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APIProvisionBaremetalHostMsg' class APIProvisionBaremetalHostMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APIProvisionBaremetalHostMsg' def __init__(self): #mandatory field self.chassisUuid = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYBAREMETALCHASSISMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APIQueryBaremetalChassisMsg' class APIQueryBaremetalChassisMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APIQueryBaremetalChassisMsg' def __init__(self): #mandatory field self.conditions = NotNoneList() self.limit = None self.start = None self.count = None self.groupBy = None self.replyWithCount = None self.sortBy = None #valid values: [asc, desc] self.sortDirection = None self.fields = OptionalList() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYBAREMETALCHASSISREPLY_FULL_NAME = 'org.zstack.header.baremetal.power.APIQueryBaremetalChassisReply' class APIQueryBaremetalChassisReply(object): FULL_NAME='org.zstack.header.baremetal.power.APIQueryBaremetalChassisReply' def __init__(self): self.inventories = OptionalList() self.total = None self.success = None self.error = None APIUPDATEBAREMETALCHASSISMSG_FULL_NAME = 'org.zstack.header.baremetal.power.APIUpdateBaremetalChassisMsg' class APIUpdateBaremetalChassisMsg(object): FULL_NAME='org.zstack.header.baremetal.power.APIUpdateBaremetalChassisMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.name = None self.description = None self.ipmiAddress = None self.ipmiPort = None self.ipmiUsername = None self.ipmiPassword = None #valid values: [Unprovisioned, Provisioning, Provisioned] self.status = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APICREATEBAREMETALPXESERVERMSG_FULL_NAME = 'org.zstack.header.baremetal.pxeserver.APICreateBaremetalPxeServerMsg' class APICreateBaremetalPxeServerMsg(object): FULL_NAME='org.zstack.header.baremetal.pxeserver.APICreateBaremetalPxeServerMsg' def __init__(self): #mandatory field self.name = NotNoneField() self.description = None #mandatory field self.dhcpInterface = NotNoneField() self.dhcpRangeBegin = None self.dhcpRangeEnd = None self.dhcpRangeNetmask = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIDELETEBAREMETALPXESERVERMSG_FULL_NAME = 'org.zstack.header.baremetal.pxeserver.APIDeleteBaremetalPxeServerMsg' class APIDeleteBaremetalPxeServerMsg(object): FULL_NAME='org.zstack.header.baremetal.pxeserver.APIDeleteBaremetalPxeServerMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.deleteMode = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYBAREMETALPXESERVERMSG_FULL_NAME = 'org.zstack.header.baremetal.pxeserver.APIQueryBaremetalPxeServerMsg' class APIQueryBaremetalPxeServerMsg(object): FULL_NAME='org.zstack.header.baremetal.pxeserver.APIQueryBaremetalPxeServerMsg' def __init__(self): #mandatory field self.conditions = NotNoneList() self.limit = None self.start = None self.count = None self.groupBy = None self.replyWithCount = None self.sortBy = None #valid values: [asc, desc] self.sortDirection = None self.fields = OptionalList() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIQUERYBAREMETALPXESERVERREPLY_FULL_NAME = 'org.zstack.header.baremetal.pxeserver.APIQueryBaremetalPxeServerReply' class APIQueryBaremetalPxeServerReply(object): FULL_NAME='org.zstack.header.baremetal.pxeserver.APIQueryBaremetalPxeServerReply' def __init__(self): self.inventories = OptionalList() self.total = None self.success = None self.error = None APISTARTBAREMETALPXESERVERMSG_FULL_NAME = 'org.zstack.header.baremetal.pxeserver.APIStartBaremetalPxeServerMsg' class APIStartBaremetalPxeServerMsg(object): FULL_NAME='org.zstack.header.baremetal.pxeserver.APIStartBaremetalPxeServerMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APISTOPBAREMETALPXESERVERMSG_FULL_NAME = 'org.zstack.header.baremetal.pxeserver.APIStopBaremetalPxeServerMsg' class APIStopBaremetalPxeServerMsg(object): FULL_NAME='org.zstack.header.baremetal.pxeserver.APIStopBaremetalPxeServerMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIUPDATEBAREMETALPXESERVERMSG_FULL_NAME = 'org.zstack.header.baremetal.pxeserver.APIUpdateBaremetalPxeServerMsg' class APIUpdateBaremetalPxeServerMsg(object): FULL_NAME='org.zstack.header.baremetal.pxeserver.APIUpdateBaremetalPxeServerMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.name = None self.description = None self.dhcpRangeBegin = None self.dhcpRangeEnd = None self.dhcpRangeNetmask = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APICHANGECLUSTERSTATEMSG_FULL_NAME = 'org.zstack.header.cluster.APIChangeClusterStateMsg' class APIChangeClusterStateMsg(object): FULL_NAME='org.zstack.header.cluster.APIChangeClusterStateMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() #mandatory field #valid values: [enable, disable] self.stateEvent = NotNoneField() self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APICREATECLUSTERMSG_FULL_NAME = 'org.zstack.header.cluster.APICreateClusterMsg' class APICreateClusterMsg(object): FULL_NAME='org.zstack.header.cluster.APICreateClusterMsg' def __init__(self): #mandatory field self.zoneUuid = NotNoneField() #mandatory field self.name = NotNoneField() self.description = None #mandatory field #valid values: [KVM, Simulator] self.hypervisorType = NotNoneField() #valid values: [zstack] self.type = None self.resourceUuid = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIDELETECLUSTERMSG_FULL_NAME = 'org.zstack.header.cluster.APIDeleteClusterMsg' class APIDeleteClusterMsg(object): FULL_NAME='org.zstack.header.cluster.APIDeleteClusterMsg' def __init__(self): #mandatory field self.uuid = NotNoneField() self.deleteMode = None self.session = None self.timeout = None self.systemTags = OptionalList() self.userTags = OptionalList() APIGETCLUSTERREPLY_FULL_NAME = 'org.zstack.header.cluster.APIGetClusterReply' class APIGetClusterReply(object): FULL_NAME='org.zstack.header.cluster.APIGetClusterReply' def __init__(self): self.inventory = None self.success = None self.error = None APILISTCLUSTERREPLY_FULL_NAME = 'org.zstack.header.cluster.APIListClusterReply' class APIListClusterReply(object): FULL_NAME='org.zstack.header.cluster.APIListClusterReply' def __init__(self): self.inventories = OptionalList() self.success = None self.error = None APIQUERYCLUSTERMSG_FULL_NAME = 'org.zstack.header.cluster.APIQueryClusterMsg' class APIQueryClusterMsg(object): FULL_NAME='org.zstack.header.cluster.APIQueryClusterMsg' def __init__(self): #mandatory field self.conditions = NotNoneList() self.limit = None self.start = None self.count = None self.groupBy = None self.replyWithCount = None self.sortBy = None #valid values: [asc, desc] self.sortDirection = None self.fields = OptionalList() self.session
Juai" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN1Juai(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 1 Juai" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 1 Juai" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN1Juai(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 1 Juai" verbose_name_plural = "07 Foto ATL Disdik SMPN 1 Juai" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 1 Lampihong class ATLDisdikSMPN1Lampihong(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 1 Lampihong" verbose_name_plural = "07 ATL Disdik SMPN 1 Lampihong" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN1Lampihong(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 1 Lampihong" verbose_name_plural = "07 Harga ATL Disdik SMPN 1 Lampihong" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN1Lampihong(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 1 Lampihong" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 1 Lampihong" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN1Lampihong(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 1 Lampihong" verbose_name_plural = "07 Foto ATL Disdik SMPN 1 Lampihong" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 1 Paringin class ATLDisdikSMPN1Paringin(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 1 Paringin" verbose_name_plural = "07 ATL Disdik SMPN 1 Paringin" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN1Paringin(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 1 Paringin" verbose_name_plural = "07 Harga ATL Disdik SMPN 1 Paringin" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN1Paringin(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 1 Paringin" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 1 Paringin" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN1Paringin(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 1 Paringin" verbose_name_plural = "07 Foto ATL Disdik SMPN 1 Paringin" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 2 Awayan class ATLDisdikSMPN2Awayan(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 2 Awayan" verbose_name_plural = "07 ATL Disdik SMPN 2 Awayan" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN2Awayan(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 2 Awayan" verbose_name_plural = "07 Harga ATL Disdik SMPN 2 Awayan" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN2Awayan(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 2 Awayan" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 2 Awayan" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN2Awayan(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 2 Awayan" verbose_name_plural = "07 Foto ATL Disdik SMPN 2 Awayan" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 2 Batumandi class ATLDisdikSMPN2Batumandi(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 2 Batumandi" verbose_name_plural = "07 ATL Disdik SMPN 2 Batumandi" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN2Batumandi(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 2 Batumandi" verbose_name_plural = "07 Harga ATL Disdik SMPN 2 Batumandi" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN2Batumandi(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 2 Batumandi" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 2 Batumandi" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN2Batumandi(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 2 Batumandi" verbose_name_plural = "07 Foto ATL Disdik SMPN 2 Batumandi" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 2 Halong class ATLDisdikSMPN2Halong(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 2 Halong" verbose_name_plural = "07 ATL Disdik SMPN 2 Halong" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN2Halong(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 2 Halong" verbose_name_plural = "07 Harga ATL Disdik SMPN 2 Halong" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN2Halong(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 2 Halong" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 2 Halong" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN2Halong(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 2 Halong" verbose_name_plural = "07 Foto ATL Disdik SMPN 2 Halong" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 2 Juai class ATLDisdikSMPN2Juai(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 2 Juai" verbose_name_plural = "07 ATL Disdik SMPN 2 Juai" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN2Juai(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 2 Juai" verbose_name_plural = "07 Harga ATL Disdik SMPN 2 Juai" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN2Juai(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 2 Juai" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 2 Juai" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN2Juai(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 2 Juai" verbose_name_plural = "07 Foto ATL Disdik SMPN 2 Juai" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 2 Lampihong class ATLDisdikSMPN2Lampihong(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 2 Lampihong" verbose_name_plural = "07 ATL Disdik SMPN 2 Lampihong" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN2Lampihong(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 2 Lampihong" verbose_name_plural = "07 Harga ATL Disdik SMPN 2 Lampihong" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN2Lampihong(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 2 Lampihong" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 2 Lampihong" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN2Lampihong(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 2 Lampihong" verbose_name_plural = "07 Foto ATL Disdik SMPN 2 Lampihong" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 2 Paringin class ATLDisdikSMPN2Paringin(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 2 Paringin" verbose_name_plural = "07 ATL Disdik SMPN 2 Paringin" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN2Paringin(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 2 Paringin" verbose_name_plural = "07 Harga ATL Disdik SMPN 2 Paringin" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN2Paringin(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 2 Paringin" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 2 Paringin" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN2Paringin(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 2 Paringin" verbose_name_plural = "07 Foto ATL Disdik SMPN 2 Paringin" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 3 Awayan class ATLDisdikSMPN3Awayan(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 3 Awayan" verbose_name_plural = "07 ATL Disdik SMPN 3 Awayan" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN3Awayan(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 3 Awayan" verbose_name_plural = "07 Harga ATL Disdik SMPN 3 Awayan" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN3Awayan(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 3 Awayan" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 3 Awayan" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN3Awayan(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 3 Awayan" verbose_name_plural = "07 Foto ATL Disdik SMPN 3 Awayan" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN 3 Batumandi class ATLDisdikSMPN3Batumandi(ATL): class Meta: proxy = True verbose_name = "07 ATL Disdik SMPN 3 Batumandi" verbose_name_plural = "07 ATL Disdik SMPN 3 Batumandi" def __unicode__(self): return self.nama_barang class HargaATLDisdikSMPN3Batumandi(HargaATL): class Meta: proxy = True verbose_name = "07 Harga ATL Disdik SMPN 3 Batumandi" verbose_name_plural = "07 Harga ATL Disdik SMPN 3 Batumandi" def __unicode__(self): return "%s" % (self.id_atl) class SKPDAsalATLDisdikSMPN3Batumandi(SKPDAsalATL): class Meta: proxy = True verbose_name = "07 SKPD Asal ATL Disdik SMPN 3 Batumandi" verbose_name_plural = "07 SKPD Asal ATL Disdik SMPN 3 Batumandi" def __unicode__(self): return "%s" % (self.id) class FotoATLDisdikSMPN3Batumandi(FotoATL): class Meta: proxy = True verbose_name = "07 Foto ATL Disdik SMPN 3 Batumandi" verbose_name_plural = "07 Foto ATL Disdik SMPN 3 Batumandi" def __unicode__(self): return "%s" % (self.id_atl) #Disdik SMPN
= redoTimeoutMax or self.RedoTimeoutMax self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin self.redoTimer = StoreTimer(self.stack.store, duration=self.redoTimeoutMin) self.burst = max(0, int(burst)) # BurstSize self.misseds = oset() # ordered set of currently missed segments self.acked = False # Have received at least one ack self.sid = self.remote.sid self.tid = self.remote.nextTid() self.prep() # prepare .txData self.tray = packeting.TxTray(stack=self.stack) def transmit(self, packet): ''' Augment transmit with restart of redo timer ''' super(Messenger, self).transmit(packet) self.redoTimer.restart() def receive(self, packet): """ Process received packet belonging to this transaction """ super(Messenger, self).receive(packet) if packet.data['tk'] == TrnsKind.message: if packet.data['pk'] == PcktKind.ack: # more self.acked = True self.another() # continue message elif packet.data['pk'] == PcktKind.resend: # resend self.acked = True self.resend() # resend missed segments elif packet.data['pk'] == PcktKind.done: # completed self.acked = True self.complete() elif packet.data['pk'] == PcktKind.nack: # rejected self.reject() def process(self): ''' Perform time based processing of transaction ''' if self.timeout > 0.0 and self.timer.expired: self.remove() console.concise("Messenger {0}. Timed out with {1} in {2} at {3}\n".format( self.stack.name, self.remote.name, self.tid, self.stack.store.stamp)) return # keep sending message until completed or timed out if self.redoTimer.expired: duration = min( max(self.redoTimeoutMin, self.redoTimer.duration * 2.0), self.redoTimeoutMax) self.redoTimer.restart(duration=duration) if self.txPacket: if self.txPacket.data['pk'] in [PcktKind.message]: if self.acked and not self.txPacket.data['af']: # turn on AgnFlag if not set self.txPacket.data.update(af=True) self.txPacket.repack() self.transmit(self.txPacket) # redo console.concise("Messenger {0}. Redo Segment {1} with " "{2} in {3} at {4}\n".format( self.stack.name, self.txPacket.data['sn'], self.remote.name, self.tid, self.stack.store.stamp)) self.stack.incStat('redo_segment') def prep(self): ''' Prepare .txData ''' self.txData.update( #sh=self.stack.local.ha[0], #sp=self.stack.local.ha[1], dh=self.remote.ha[0], # maybe needed for index dp=self.remote.ha[1], # maybe needed for index se=self.remote.nuid, de=self.remote.fuid, tk=self.kind, cf=self.rmt, bf=self.bcst, si=self.sid, ti=self.tid,) def message(self, body=None): ''' Send message or part of message. So repeatedly called until complete ''' if not self.remote.allowed: emsg = "Messenger {0}. Must be allowed with {1} first\n".format( self.stack.name, self.remote.name) console.terse(emsg) self.stack.incStat('unallowed_remote') self.remove() return if not self.tray.packets: try: self.tray.pack(data=self.txData, body=body) except raeting.PacketError as ex: console.terse(str(ex) + '\n') self.stack.incStat("packing_error") self.remove() return if self.tray.current >= len(self.tray.packets): emsg = "Messenger {0}. Current packet {1} greater than num packets {2}\n".format( self.stack.name, self.tray.current, len(self.tray.packets)) console.terse(emsg) self.remove() return if self.index not in self.remote.transactions: self.add() elif self.remote.transactions[self.index] != self: emsg = "Messenger {0}. Remote {1} Index collision of {2} in {3} at {4}\n".format( self.stack.name, self.remote.name, self.index, self.tid, self.stack.store.stamp) console.terse(emsg) self.incStat('message_index_collision') self.remove() return burst = (min(self.burst, (len(self.tray.packets) - self.tray.current)) if self.burst else (len(self.tray.packets) - self.tray.current)) packets = self.tray.packets[self.tray.current:self.tray.current + burst] if packets: last = packets[-1] last.data.update(wf=True) # set wait flag on last packet in burst last.repack() for packet in packets: self.transmit(packet) self.tray.last = self.tray.current self.tray.current += 1 self.stack.incStat("message_segment_tx") console.concise("Messenger {0}. Do Message Segment {1} with {2} in {3} at {4}\n".format( self.stack.name, self.tray.last, self.remote.name, self.tid, self.stack.store.stamp)) def another(self): ''' Process ack packet and continue sending ''' if not self.stack.parseInner(self.rxPacket): return self.remote.refresh(alived=True) self.stack.incStat("message_ack_rx") if self.misseds: self.sendMisseds() else: current = self.rxPacket.data['sn'] + 1 if self.tray.current > current: console.concise("Messenger {0}. Current {1} is ahead of requested {2}. Adjust.\n".format( self.stack.name, self.tray.current, current)) self.tray.current = current self.tray.last = current - 1 if self.tray.current < len(self.tray.packets): self.message() # continue message def resend(self): ''' Process resend packet and update .misseds list of missing packets Then send misseds ''' if not self.stack.parseInner(self.rxPacket): return self.remote.refresh(alived=True) self.stack.incStat('message_resend_rx') data = self.rxPacket.data body = self.rxPacket.body.data misseds = body.get('misseds') # indexes of missed segments if misseds: if not self.tray.packets: emsg = "Invalid resend request '{0}'\n".format(misseds) console.terse(emsg) self.stack.incStat('invalid_resend') return for m in misseds: try: packet = self.tray.packets[m] except IndexError as ex: #console.terse(str(ex) + '\n') console.terse("Invalid misseds segment number {0}\n".format(m)) self.stack.incStat("invalid_misseds") return self.misseds.add(packet) # add segment, set only adds if unique self.sendMisseds() def sendMisseds(self): ''' Send a burst of missed packets ''' if self.misseds: burst = (min(self.burst, (len(self.misseds))) if self.burst else len(self.misseds)) # make list of first burst number of packets misseds = [missed for missed in self.misseds][:burst] for packet in misseds[:-1]: repack = False if not packet.data['af']: # turn on again flag if not set packet.data.update(af=True) repack = True if packet.data['wf']: # turn off wait flag if set packet.data.update(wf=False) repack = True if repack: packet.repack() for packet in misseds[-1:]: # last packet repack = False if not packet.data['af']: # turn on again flag is not set packet.data.update(af=True) repack = True if not packet.data['wf']: # turn on wait flag if not set packet.data.update(wf=True) repack = True if repack: packet.repack() for packet in misseds: self.transmit(packet) self.stack.incStat("message_segment_tx") console.concise("Messenger {0}. Do Resend Message Segment " "{1} with {2} in {3} at {4}\n".format( self.stack.name, packet.data['sn'], self.remote.name, self.tid, self.stack.store.stamp)) self.misseds.discard(packet) # remove from self.misseds def complete(self): ''' Process Done Ack Complete transaction and remove ''' if not self.stack.parseInner(self.rxPacket): return self.remote.refresh(alived=True) self.stack.incStat('message_complete_rx') self.remove() console.concise("Messenger {0}. Done with {1} in {2} at {3}\n".format( self.stack.name, self.remote.name, self.tid, self.stack.store.stamp)) self.stack.incStat("message_initiate_complete") def reject(self): ''' Process nack packet terminate in response to nack ''' if not self.stack.parseInner(self.rxPacket): return self.remote.refresh(alived=True) self.stack.incStat('message_reject_rx') self.remove() console.concise("Messenger {0}. Rejected by {1} in {2} at {3}\n".format( self.stack.name, self.remote.name, self.tid, self.stack.store.stamp)) self.stack.incStat(self.statKey()) def nack(self): ''' Send nack to terminate transaction ''' body = odict() packet = packeting.TxPacket(stack=self.stack, kind=PcktKind.nack.value, embody=body, data=self.txData) try: packet.pack() except raeting.PacketError as ex: console.terse(str(ex) + '\n') self.stack.incStat("packing_error") self.remove() return self.transmit(packet) self.stack.incStat('message_nack_tx') self.remove() console.concise("Messenger {0}. Do Nack Reject of {1} in {2} at {3}\n".format( self.stack.name, self.remote.name, self.tid, self.stack.store.stamp)) self.stack.incStat(self.statKey()) class Messengent(Correspondent): ''' RAET protocol Messengent Correspondent class Dual of Messenger Generic Messages ''' Timeout = 0.0 RedoTimeoutMin = 0.2 # initial timeout RedoTimeoutMax = 0.5 # max timeout def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa): ''' Setup instance ''' kwa['kind'] = TrnsKind.message.value super(Messengent, self).__init__(**kwa) self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin self.redoTimer = StoreTimer(self.stack.store, duration=self.redoTimeoutMin) self.wait = False # wf wait flag self.lowest = None self.prep() # prepare .txData self.tray = packeting.RxTray(stack=self.stack) def transmit(self, packet): ''' Augment transmit with restart of redo timer ''' super(Messengent, self).transmit(packet) self.redoTimer.restart() def receive(self, packet): """ Process received packet belonging to this transaction """ super(Messengent, self).receive(packet) # resent message if packet.data['tk'] == TrnsKind.message: if packet.data['pk'] == PcktKind.message: self.message() elif packet.data['pk'] == PcktKind.nack: # rejected self.reject() def process(self): ''' Perform time based processing of transaction ''' if self.timeout > 0.0 and self.timer.expired: self.nack() console.concise("Messengent {0}. Timed out with {1} in {2} at {3}\n".format( self.stack.name, self.remote.name, self.tid, self.stack.store.stamp)) return if self.redoTimer.expired: duration = min( max(self.redoTimeoutMin, self.redoTimer.duration * 2.0), self.redoTimeoutMax) self.redoTimer.restart(duration=duration) if self.tray.complete: self.complete() else: misseds = self.tray.missing(begin=self.lowest) if misseds: # resent missed segments self.lowest = misseds[0] self.resend(misseds) else: # always ask for more here self.ack() def prep(self): ''' Prepare .txData ''' self.txData.update( #sh=self.stack.local.ha[0], #sp=self.stack.local.ha[1], dh=self.remote.ha[0], # maybe needed for index dp=self.remote.ha[1], # maybe needed for index se=self.remote.nuid, de=self.remote.fuid, tk=self.kind, cf=self.rmt, bf=self.bcst, wf=self.rxPacket.data['wf'], # was self.wait si=self.sid, ti=self.tid, ck=self.rxPacket.data['ck'], # so acks use same coat kind encrypted fk=self.rxPacket.data['fk'], # so acks use same foot kind signed ) def message(self): ''' Process message packet. Called repeatedly for each packet in message ''' if not self.remote.allowed: emsg = "Messengent {0}. Must be allowed with {1} first\n".format( self.stack.name, self.remote.name) console.terse(emsg) self.stack.incStat('unallowed_message_attempt') self.nack() return try: body = self.tray.parse(self.rxPacket) except raeting.PacketError as ex: console.terse(str(ex) + '\n') self.incStat('parsing_message_error') self.nack() return if self.index not in self.remote.transactions: self.add() elif self.remote.transactions[self.index] != self: emsg = "Messengent {0}. Remote {1} Index collision of {2} in {3} at {4}\n".format( self.stack.name, self.remote.name, self.index, self.tid, self.stack.store.stamp) console.terse(emsg) self.incStat('message_index_collision') self.nack() return self.remote.refresh(alived=True) self.stack.incStat("message_segment_rx") self.wait = self.rxPacket.data['wf'] # sender is waiting for ack if self.tray.complete: self.complete() elif self.wait: # ask for more if sender waiting for ack misseds = self.tray.missing(begin=self.lowest) if misseds: # resent missed segments self.lowest = misseds[0] self.resend(misseds) else: self.ack() def ack(self): ''' Send ack to message ''' body = odict() packet = packeting.TxPacket(stack=self.stack, kind=PcktKind.ack.value, embody=body, data=self.txData) packet.data['sn'] = self.tray.highest try: packet.pack() except raeting.PacketError as ex: console.terse(str(ex) + '\n') self.stack.incStat("packing_error") self.remove() return self.transmit(packet) self.stack.incStat("message_more_ack") console.concise("Messengent {0}. Do Ack More from {1} on Segment {2} with {3} in {4} at {5}\n".format( self.stack.name, self.tray.highest + 1, self.rxPacket.data['sn'], self.remote.name, self.tid, self.stack.store.stamp)) def resend(self, misseds): ''' Send resend request(s) for missing packets ''' while misseds: if len(misseds) > raeting.MAX_MISSEDS_RESEND: # was 64 remainders = misseds[raeting.MAX_MISSEDS_RESEND:] # only do at most 64 at a time misseds = misseds[:raeting.MAX_MISSEDS_RESEND] else: remainders = [] body = odict(misseds=misseds)
""" Transformer model helper methods """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os import re import string import unicodedata import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim _NEG_INF = -1e9 def _set_up_dirs(args): if args.use_colab is None: EvalResultsFile = 'eval_results.txt' TestResults = 'test_results.txt' OUTPUT_DIR = 'ckpts/' + args.lang log_dir = 'data/logs' log_file = log_dir + args.lang + '_' + args.enc_type + '_' + str(args.emb_dim) + '.log' if not os.path.isdir(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) else: from google.colab import drive drive.mount('/content/gdrive') OUTPUT_DIR = '/content/gdrive/My Drive/ckpts/' + args.lang EvalResultsFile = OUTPUT_DIR + '/eval_results.txt' TestResults = OUTPUT_DIR + '/test_results.txt' log_dir = OUTPUT_DIR + '/logs' log_file = log_dir + args.lang + '_' + args.enc_type + '_' + str(args.emb_dim) + '.txt' if not os.path.isdir(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) return OUTPUT_DIR, EvalResultsFile, TestResults, log_file, log_dir def process_results(results_file): def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def PreProcessSentence(w): regex = re.compile('[%s]' % re.escape(string.punctuation)) w = regex.sub('', w) w = unicode_to_ascii(w.lower().strip()) w = w.rstrip().strip() return w with open(results_file) as f: content = f.readlines() content = [PreProcessSentence(x) for x in content] with open(results_file, 'w+') as f: f.write('\n'.join(content)) def _tensorize(vocab, text): """ Function to convert texts into number sequences first, and then add padding. Basically, tensorising them. :param vocab: The vocab which is used to lookup ids :type vocab: tf.tokenizer obj :param text: A list of sentences or a text file :type text: list :return: tensorised text data :rtype: tf.tensor """ tensor = vocab.texts_to_sequences(text) tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post') return tensor def read_sentencepiece_vocab(filepath): voc = [] with open(filepath, encoding='utf-8') as fi: for line in fi: voc.append(line.split("\t")[0]) # skip the first <unk> token voc = voc[1:] return voc def parse_sentencepiece_token(token): if token.startswith("▁"): return token[1:] else: return "##" + token def max_length(tensor): return max(len(t) for t in tensor) def convert(lang, tensor): for t in tensor: if t != 0: print("%d ----> %s" % (t, lang.index_word[t.numpy()])) def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def PreProcessSentence(w, sentencepiece, lang): """ Preprocess a sentence by cleaning it, making everything lower case etc. If sentencepiece is being used then don't make spaces between punctuations and words, as it has it's own tokenizer. :param lang: Language of sentence :type lang: str :param w: Sentence to be preprocessed :type w: str :param sentencepiece: Is sentencepiece being used ? :type sentencepiece: str :return:Preprocessed sentence :rtype:str """ regex = re.compile('[%s]' % re.escape(string.punctuation)) w = regex.sub('', w) w = unicode_to_ascii(w.lower().strip()) if sentencepiece == 'False': # creating a space between a word and the punctuation following it # eg: "he is a boy." => "he is a boy ." # Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation w = re.sub(r"([?.!,¿])", r" \1 ", w) w = re.sub(r'[" "]+', " ", w) # replacing everything with space except (a-z, A-Z, ".", "?", "!", ",") if lang == 'eng': w = re.sub(r"[^a-z0-9A-Z?.!,¿]+", " ", w) w = w.rstrip().strip() # adding a start and an end token to the sentence # so that the model know when to start and stop predicting. w = '<start> ' + w + ' <end>' return w def PreprocessSeqSource(w, sentencepiece, lang): """ Preprocess a sentence by cleaning it, making everything lower case etc. If sentencepiece is being used then don't make spaces between punctuations and words, as it has it's own tokenizer. :param lang: Language of sentence :type lang: str :param w: Sentence to be preprocessed :type w: str :param sentencepiece: Is sentencepiece being used ? :type sentencepiece: str :return:Preprocessed sentence :rtype:str """ regex = re.compile('[%s]' % re.escape(string.punctuation)) w = regex.sub('', w) w = unicode_to_ascii(w.lower().strip()) if sentencepiece == 'False': # creating a space between a word and the punctuation following it # eg: "he is a boy." => "he is a boy ." # Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation w = re.sub(r"([?.!,¿])", r" \1 ", w) w = re.sub(r'[" "]+', " ", w) # replacing everything with space except (a-z, A-Z, ".", "?", "!", ",") if lang == 'eng': w = re.sub(r"[^a-z0-9A-Z?.!,¿]+", " ", w) w = w.rstrip().strip() # adding a start and an end token to the sentence # so that the model know when to start and stop predicting. w = '<start> <{}> {} <end>'.format(lang, w) return w def model_summary(model): """ Gives summary of model and its params :param model: the model :type model: tf.keras.model object :return: summary text :rtype: write obj """ model_vars = model.trainable_variables slim.model_analyzer.analyze_vars(model_vars, print_info=True) def get_position_encoding(length, hidden_size, min_timescale=1.0, max_timescale=1.0e4): """ Function to get the positional encoding for sequences to impart structural information :param length: sequence length :type length: int :param hidden_size: size of hidden state :type hidden_size: Tensor :param min_timescale: Minimum scale that will be applied at each position :type min_timescale: float :param max_timescale: Maximum scale that will be applied at each position :type max_timescale: float :return: Tensor with shape [length, hidden_size] :rtype: tf.Tensor """ position = tf.cast(tf.range(length), tf.float32) num_timescales = hidden_size // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.cast(num_timescales, tf.float32) - 1)) inv_timescales = min_timescale * tf.exp( tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) return signal def get_decoder_self_attention_bias(length): """ Calculate bias for decoder that maintains model's autoregressive property. Creates a tensor that masks out locations that correspond to illegal connections, so prediction at position i cannot draw information from future positions. :param length: length of sequences :type length:int :return: float tensor of shape [1, 1, length, length] :rtype: tf.Tensor """ with tf.name_scope("decoder-self_attention_bias"): valid_locs = tf.linalg.band_part(tf.ones([length, length]), -1, 0) valid_locs = tf.reshape(valid_locs, [1, 1, length, length]) decoder_bias = _NEG_INF * (1.0 - valid_locs) return decoder_bias def get_padding(x, padding_values=0): """ Returns float tensor representing the padding values :param x: input tensor :type x: tf.Tensor :param padding_values: padding_value :type padding_values: int :return: tensor with same shape as x containing values 0 or 1. :rtype: tf.tensor """ with tf.name_scope("padding"): return tf.cast(tf.equal(x, padding_values), tf.float32) def get_padding_bias(x): """ Calculate bias tensor from padding values in tensor :param x: input tensor :type x: tensor :return: attention bias :rtype: tensor [batch_size, 1, 1, length] """ with tf.name_scope("attention_bias"): padding = get_padding(x) attention_bias = padding * _NEG_INF attention_bias = tf.expand_dims( tf.expand_dims(attention_bias, axis=1), axis=1) return attention_bias def loss_function(real, pred, loss_object): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) def scaled_dot_product_attention(q, k, v, mask): """Calculate the attention weights. q, k, v must have matching leading dimensions. k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v. The mask has different shapes depending on its type(padding or look ahead) but it must be broadcastable for addition. Args: q: query shape == (..., seq_len_q, depth) k: key shape == (..., seq_len_k, depth) v: value shape == (..., seq_len_v, depth_v) mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k). Defaults to None. Returns: output, attention_weights """ matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask to the scaled tensor. if mask is not None: scaled_attention_logits += (mask * -1e9) # softmax is normalized on the last axis (seq_len_k) so that the scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_v, depth_v) return output, attention_weights def get_angles(pos, i, d_model): angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model)) return pos * angle_rates def positional_encoding(position, d_model): angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model) # apply sin to even indices in the array; 2i sines = np.sin(angle_rads[:, 0::2]) # apply cos to odd indices in the array; 2i+1 cosines = np.cos(angle_rads[:, 1::2]) pos_encoding = np.concatenate([sines, cosines], axis=-1) pos_encoding = pos_encoding[np.newaxis, ...] return tf.cast(pos_encoding, dtype=tf.float32) def create_padding_mask(seq): seq = tf.cast(tf.math.equal(seq, 0), tf.float32) # add extra dimensions so that we can add the padding # to the attention logits. return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len) def print_out(q, k, v): temp_out, temp_attn = scaled_dot_product_attention( q, k, v, None) print('Attention weights are:') print(temp_attn) print('Output is:') print(temp_out) def point_wise_feed_forward_network(d_model, dff): return tf.keras.Sequential([ tf.keras.layers.Dense(dff, activation='relu'), # (batch_size,
""" Define the actor process for exploration of the environment in reinforcement learning. """ import json import os from copy import deepcopy from time import time from collections import namedtuple import logging import numpy as np # pylint: disable=not-callable import torch from torch.utils.tensorboard import SummaryWriter from torch.nn.utils import vector_to_parameters from distributed.environment_set import EnvironmentSet from distributed.model_util import choose_model, extend_model_config, load_model from distributed.util import ( COORDINATE_SHIFTS, LOCAL_DELTAS, anneal_factor, compute_priorities, format_torch, independent_step, multiple_independent_steps, select_actions, select_actions_value_network, time_tb, ) from surface_rl_decoder.surface_code_util import SOLVED_EPISODE_REWARD from surface_rl_decoder.surface_code import SurfaceCode from surface_rl_decoder.syndrome_masks import get_plaquette_mask, get_vertex_mask # pylint: disable=too-many-statements,too-many-locals,too-many-branches Transition = namedtuple( "Transition", ["state", "action", "reward", "next_state", "terminal"] ) VTransition = namedtuple( "VTransition", [ "state", "action", "reward", "next_state", "terminal", "optimal_action", "optimal_reward", "optimal_next_state", "optimal_terminal", ], ) def actor(args): """ Define the actor function to be run by a mp process. The actor defines multiple environments which are in differing states and can perform steps independent of each other. After a certain number of steps, the used policy network is updated with new parameters from the learner process. Parameters ========== args: dictionary containing actor configuration "actor_io_queue": mp.Queue object for communication between actor and replay memory "learner_actor_queue": mp.Queue object for communication between actor and learner process "num_environments": (int) number of independent environments to perform steps in "size_action_history": (int) maximum size of the action history of the environment, trying to execute more actions than this in one environment causes the environment to terminate and start again with a new syndrome. "size_local_memory_buffer": (int) maximum number of objects in the local memory store for transitions, actions, q values, rewards "num_actions_per_qubit": (int) number of possible operators on a qubit, default should be 3, for Pauli-X, -Y, -Z "verbosity": verbosity level "epsilon": (float) probability to choose a random action "model_name": (str) specifier for the model "model_config": (dict) configuration for network architecture. May change with different architectures "benchmarking": whether certain performance time measurements should be performed "summary_path": (str), base path for tensorboard "summary_date": (str), target path for tensorboard for current run "load_model": toggle whether to load a pretrained model "old_model_path" if 'load_model' is activated, this is the location from which the old model is loaded "discount_factor": gamma factor in reinforcement learning "discount_intermediate_reward": the discount factor dictating how strongly lower layers should be discounted when calculating the reward for creating/destroying syndromes "min_value_factor_intermediate_reward": minimum value that the effect of the intermediate reward should be annealed to "decay_factor_intermediate_reward": how strongly the intermediate reward should decay over time during a training run "decay_factor_epsilon": how strongly the exploration factor ε should decay over time during a training run "min_value_factor_epsilon": minimum value that the exploration factor ε should be annealed to """ num_environments = args["num_environments"] actor_id = args["id"] size_action_history = args["size_action_history"] device = args["device"] verbosity = args["verbosity"] benchmarking = args["benchmarking"] num_actions_per_qubit = args["num_actions_per_qubit"] epsilon = args["epsilon"] load_model_flag = args["load_model"] old_model_path = args["old_model_path"] discount_factor = args["discount_factor"] discount_factor_anneal = args["discount_factor_anneal"] discount_factor_start = args["discount_factor_start"] rl_type = args["rl_type"] p_error = args["p_error"] p_msmt = args["p_msmt"] p_error_start = args["p_error_start"] p_msmt_start = args["p_msmt_start"] p_error_anneal = args["p_error_anneal"] p_msmt_anneal = args["p_msmt_anneal"] discount_intermediate_reward = float(args.get("discount_intermediate_reward", 0.75)) min_value_factor_intermediate_reward = float( args.get("min_value_intermediate_reward", 0.0) ) decay_factor_intermediate_reward = float( args.get("decay_factor_intermediate_reward", 1.0) ) decay_factor_epsilon = float(args.get("decay_factor_epsilon", 1.0)) min_value_factor_epsilon = float(args.get("min_value_factor_epsilon", 0.0)) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(f"actor_{actor_id}") if verbosity >= 4: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) logger.info("Fire up all the environments!") seed = int(args.get("seed", 0)) if seed != 0: np.random.seed(seed + actor_id) torch.manual_seed(seed + actor_id) torch.cuda.manual_seed(seed + actor_id) torch.cuda.manual_seed_all(seed + actor_id) env = SurfaceCode() state_size = env.syndrome_size code_size = state_size - 1 stack_depth = env.stack_depth # create a collection of independent environments environments = EnvironmentSet(env, num_environments) if rl_type == "v": transition_type = np.dtype( [ ("state", (np.uint8, (stack_depth, state_size, state_size))), ("action", (np.uint8, 3)), ("reward", float), ("next_state", (np.uint8, (stack_depth, state_size, state_size))), ("terminal", bool), ("optimal_action", (np.uint8, 3)), ("optimal_reward", float), ( "optimal_next_state", (np.uint8, (stack_depth, state_size, state_size)), ), ("optimal_terminal", bool), ] ) else: transition_type = np.dtype( [ ("state", (np.uint8, (stack_depth, state_size, state_size))), ("action", (np.uint8, 3)), ("reward", float), ("next_state", (np.uint8, (stack_depth, state_size, state_size))), ("terminal", bool), ] ) # initialize all states p_error_start_list = np.repeat(p_error_start, num_environments) p_msmt_start_list = np.repeat(p_msmt_start, num_environments) states = environments.reset_all( p_error=p_error_start_list, p_msmt=p_msmt_start_list ) steps_per_episode = np.zeros(num_environments) # initialize local memory buffers size_local_memory_buffer = args["size_local_memory_buffer"] + 1 local_buffer_transitions = np.empty( (num_environments, size_local_memory_buffer), dtype=transition_type ) local_buffer_actions = np.empty( (num_environments, size_local_memory_buffer, 3), dtype=np.uint8 ) if rl_type == "q": local_buffer_qvalues = np.empty( (num_environments, size_local_memory_buffer), dtype=(float, num_actions_per_qubit * code_size * code_size + 1), ) elif rl_type == "v": local_buffer_qvalues = np.empty( (num_environments, size_local_memory_buffer), dtype=float, ) local_buffer_rewards = np.empty( (num_environments, size_local_memory_buffer), dtype=float ) buffer_idx = 0 # load communication queues actor_io_queue = args["actor_io_queue"] learner_actor_queue = args["learner_actor_queue"] # initialize the policy agent model_name = args["model_name"] model_config = args["model_config"] model_config = extend_model_config( model_config, state_size, stack_depth, device=device ) base_model_config_path = args["base_model_config_path"] base_model_path = args["base_model_path"] use_transfer_learning = args["use_transfer_learning"] if rl_type == "v": vertex_mask = get_vertex_mask(code_size) plaquette_mask = get_plaquette_mask(code_size) combined_mask = np.logical_or(vertex_mask, plaquette_mask, dtype=np.int8) combined_mask = format_torch(combined_mask, device=device, dtype=torch.int8) # prepare Transfer learning, if enabled if use_transfer_learning: logger.info(f"Prepare transfer learning for d={code_size}.") with open(base_model_config_path, "r") as json_file: base_model_config = json.load(json_file)["simple_conv"] base_model_config = extend_model_config( base_model_config, state_size, stack_depth, device=device ) else: base_model_config = None model = choose_model( model_name, model_config, model_path_base=base_model_path, model_config_base=base_model_config, transfer_learning=use_transfer_learning, rl_type=rl_type, ) if load_model_flag: model, _, _ = load_model(model, old_model_path) logger.info(f"Loaded actor model from {old_model_path}") model.to(device) performance_start = time() heart = time() heartbeat_interval = 60 # seconds logger.info(f"Actor {actor_id} starting loop on device {device}") sent_data_chunks = 0 # initialize tensorboard for monitoring/logging summary_path = args["summary_path"] summary_date = args["summary_date"] tensorboard = SummaryWriter( os.path.join(summary_path, str(code_size), summary_date, "actor") ) tensorboard_step = 0 steps_to_benchmark = 0 benchmark_frequency = 1000 # pylint: disable=too-many-nested-blocks # start the main exploration loop while True: steps_per_episode += 1 steps_to_benchmark += 1 # select actions based on the chosen model and latest states _states = torch.tensor(states, dtype=torch.float32, device=device) select_action_start = time() current_time_tb = time_tb() delta_t = select_action_start - performance_start annealed_epsilon = anneal_factor( delta_t, decay_factor=decay_factor_epsilon, min_value=min_value_factor_epsilon, base_factor=epsilon, ) current_p_error = anneal_factor( delta_t, decay_factor=p_error_anneal, min_value=p_error_start, max_value=p_error, base_factor=p_error_start, ) current_p_msmt = anneal_factor( delta_t, decay_factor=p_msmt_anneal, min_value=p_msmt_start, max_value=p_msmt, base_factor=p_msmt_start, ) current_discount_factor = anneal_factor( delta_t, decay_factor=discount_factor_anneal, min_value=discount_factor_start, max_value=discount_factor, base_factor=discount_factor_start, ) if rl_type == "q": actions, q_values = select_actions( _states, model, state_size - 1, epsilon=annealed_epsilon ) elif rl_type == "v": # call values here the same as q values, although they are actually # plain values ( actions, q_values, optimal_actions, optimal_q_values, ) = select_actions_value_network( _states, model, code_size, stack_depth, combined_mask, COORDINATE_SHIFTS, LOCAL_DELTAS, device=device, epsilon=epsilon, ) q_values = np.squeeze(q_values) optimal_q_values = np.squeeze(optimal_q_values) if benchmarking and steps_to_benchmark % benchmark_frequency == 0: select_action_stop = time() logger.info( f"time for select action: {select_action_stop - select_action_start}" ) if verbosity >= 2: tensorboard.add_scalars( "actor/parameters", { "annealed_epsilon": annealed_epsilon, "annealed_p_error": current_p_error, "annealed_p_msmt": current_p_msmt, "annealed_discount_factor": current_discount_factor, }, delta_t, walltime=current_time_tb, ) # perform the chosen actions steps_start = time() annealing_intermediate_reward = anneal_factor( delta_t, decay_factor=decay_factor_intermediate_reward, min_value=min_value_factor_intermediate_reward, ) if rl_type == "v": qubits = [ deepcopy(environments.environments[i].qubits) for i in range(num_environments) ] syndrome_errors = [ deepcopy(environments.environments[i].syndrome_errors) for i in range(num_environments) ] actual_errors = [ deepcopy(environments.environments[i].actual_errors) for i in range(num_environments) ] action_histories = [ deepcopy(environments.environments[i].actions) for i in range(num_environments) ] ( optimal_next_states, optimal_rewards, optimal_terminals, _, ) = multiple_independent_steps( states, qubits, optimal_actions, vertex_mask, plaquette_mask, syndrome_errors, actual_errors, action_histories, discount_intermediate_reward=discount_intermediate_reward, annealing_intermediate_reward=annealing_intermediate_reward, punish_repeating_actions=0, ) next_states, rewards, terminals, _ = environments.step( actions, discount_intermediate_reward=discount_intermediate_reward, annealing_intermediate_reward=annealing_intermediate_reward, punish_repeating_actions=0, ) if benchmarking and steps_to_benchmark % benchmark_frequency == 0: steps_stop = time() logger.info( f"time to step through environments: {steps_stop - steps_start}" ) if verbosity >= 2: current_time_tb = time_tb() tensorboard.add_scalars( "actor/effect_intermediate_reward", {"anneal_factor": annealing_intermediate_reward}, delta_t, walltime=current_time_tb, ) # save transitions to local buffer if rl_type == "v": transitions = np.asarray( [ VTransition( states[i], actions[i], rewards[i], next_states[i], terminals[i], optimal_actions[i], optimal_rewards[i], optimal_next_states[i], optimal_terminals[i], ) for i in range(num_environments) ], dtype=transition_type, ) else: transitions = np.asarray( [ Transition( states[i], actions[i], rewards[i], next_states[i], terminals[i] ) for i in range(num_environments) ], dtype=transition_type, ) local_buffer_transitions[:, buffer_idx] = transitions local_buffer_actions[:, buffer_idx] = actions local_buffer_qvalues[:, buffer_idx] = q_values local_buffer_rewards[:, buffer_idx] = rewards buffer_idx += 1 # prepare to send local transitions to replay memory if buffer_idx >= size_local_memory_buffer: # get new weights for the policy model here if (learner_qsize := learner_actor_queue.qsize()) > 0: # consume all the deprecated updates without effect for _ in range(learner_qsize - 1): learner_actor_queue.get() msg, network_params = learner_actor_queue.get() assert msg is not None
b = bias.broadcast(y.shape, [0,2,3]) y = y + b return y else: N,C,H,W = x.shape Kh, Kw = weight.shape[-2:] G = groups CpG = C // G # channels per group oc = out_channels oh = (H+padding[0]*2-Kh*dilation[0]+dilation[0]-1)//stride[0]+1 ow = (W+padding[1]*2-Kw*dilation[1]+dilation[1]-1)//stride[1]+1 xx = x.reindex([N,G,oc//G,CpG,oh,ow,Kh,Kw], [ 'i0', # Nid f'i1*{CpG}+i3', # Gid f'i4*{stride[0]}-{padding[0]}+i6*{dilation[0]}', # Hid+Khid f'i5*{stride[1]}-{padding[1]}+i7*{dilation[1]}', # Wid+KWid ]) xx.compile_options = {"G":G} # w: [oc, CpG, Kh, Kw] ww = weight.reindex([N, G, oc//G, CpG, oh, ow, Kh, Kw], [ f'i1*{oc//G}+i2', 'i3', 'i6', 'i7' ]) yy = xx*ww y = yy.reindex_reduce('add', [N, oc, oh, ow], [ 'i0', f'i1*{oc//G}+i2', 'i4', 'i5' ]) if bias is not None: b = bias.broadcast(y.shape, [0,2,3]) y = y + b return y def conv3d(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): ''' Applies a 3D convolution over an input signal composed of several input planes. :param x: the input volume :type x: jt.Var :param weight: the convolution kernel :type weight: jt.Var :param bias: the bias after convolution :type bias: jt,Var, optional :param stride: Stride of the convolution. Default: 1 :type stride: int or tuple, optional :param padding: Padding added to all four sides of the input. Default: 0 :type padding: int or tuple, optional :param dilation: Spacing between kernel elements. Default: 1 :type dilation: int or tuple, optional :param groups: Number of blocked connections from input channels to output channels. Default: 1 :type groups: int, optional Example: >>> x = jt.randn(4, 24, 50, 50, 50) >>> w = jt.randn(32, 24, 3, 3, 3) >>> y = nn.conv2d(x, w) ''' padding = _triple(padding) stride = _triple(stride) dilation = _triple(dilation) out_channels = weight.shape[0] if jt.flags.use_cuda and jt.cudnn: y = jt.cudnn.ops.cudnn_conv3d(x, weight, *stride, *padding, *dilation, groups) elif groups == 1: N,C,D,H,W = x.shape Kd, Kh, Kw = weight.shape[-3:] od = (D+padding[0]*2-Kd*dilation[0]+dilation[0]-1)//stride[0]+1 oh = (H+padding[1]*2-Kh*dilation[1]+dilation[1]-1)//stride[1]+1 ow = (W+padding[2]*2-Kw*dilation[2]+dilation[2]-1)//stride[2]+1 xx = x.reindex([N,out_channels,C,od,oh,ow,Kd,Kh,Kw], [ 'i0', # Nid 'i2', # Cid f'i3*{stride[0]}-{padding[0]}+i6*{dilation[0]}', # Hid+Khid f'i4*{stride[1]}-{padding[1]}+i7*{dilation[1]}', # Wid+KWid f'i5*{stride[2]}-{padding[2]}+i8*{dilation[2]}', # Did+KDid ]) ww = weight.broadcast(xx.shape, [0,3,4,5]) yy = xx*ww y = yy.sum([2,6,7,8]) # Kc, Kh, Kw,Kd else: N,C,D,H,W = x.shape Kd, Kh, Kw = weight.shape[-3:] G = groups CpG = C // G # channels per group oc = out_channels od = (D+padding[0]*2-Kd*dilation[0]+dilation[0]-1)//stride[0]+1 oh = (H+padding[1]*2-Kh*dilation[1]+dilation[1]-1)//stride[1]+1 ow = (W+padding[2]*2-Kw*dilation[2]+dilation[2]-1)//stride[2]+1 xx = x.reindex([N,G,oc//G,CpG,od,oh,ow,Kd,Kh,Kw], [ 'i0', # Nid f'i1*{CpG}+i3', # Gid f'i4*{stride[0]}-{padding[0]}+i7*{dilation[0]}', # Hid+Khid f'i5*{stride[1]}-{padding[1]}+i8*{dilation[1]}', # Wid+KWid f'i6*{stride[2]}-{padding[2]}+i9*{dilation[2]}', # Did+KDid ]) xx.compile_options = {"G":G} # w: [oc, CpG, Kh, Kw, Kd] ww = weight.reindex([N, G, oc//G, CpG, oh, ow, od, Kh, Kw, Kd], [ f'i1*{oc//G}+i2', 'i3', 'i7', 'i8', 'i9' ]) yy = xx*ww y = yy.reindex_reduce('add', [N, oc, od, oh, ow], [ 'i0', f'i1*{oc//G}+i2', 'i4', 'i5', 'i6' ]) if bias is not None: b = bias.broadcast(y.shape, [0,2,3,4]) y = y + b return y class ConvTranspose(Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, \ padding=0, output_padding=0, groups=1, bias=True, dilation=1): self.in_channels = in_channels self.out_channels = out_channels # added self.dilation = dilation self.group = groups assert groups==1, "Group conv not supported yet." self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size) self.stride = stride if isinstance(stride, tuple) else (stride, stride) self.dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation) # added self.padding = padding if isinstance(padding, tuple) else (padding, padding) self.real_padding = (self.dilation[0] * (self.kernel_size[0] - 1) - self.padding[0], self.dilation[1] * (self.kernel_size[1] - 1) - self.padding[1]) self.output_padding = output_padding if isinstance (output_padding, tuple) else (output_padding, output_padding) assert self.output_padding[0] < max(self.stride[0], self.dilation[0]) and \ self.output_padding[1] < max(self.stride[1], self.dilation[1]), \ "output padding must be smaller than max(stride, dilation)" self.weight = init.invariant_uniform((in_channels, out_channels) + self.kernel_size, dtype="float") if bias: fan=1 for i in self.weight.shape[1:]: fan *= i bound = 1 / math.sqrt(fan) self.bias = init.uniform([out_channels], dtype="float", low=-bound, high=bound) else: self.bias = None def execute(self, x): N,C,H,W = x.shape i,o,h,w = self.weight.shape assert C==i stride_h, stride_w = self.stride padding_h, padding_w = self.padding dilation_h, dilation_w = self.dilation h_out = (H-1) * stride_h + self.output_padding[0] - 2*padding_h + 1 + (h-1)*dilation_h w_out = (W-1) * stride_w + self.output_padding[1] - 2*padding_w + 1 + (w-1)*dilation_w out_shape = (N, o, h_out, w_out) shape = (N, i, o, H, W, h, w) xx = x.broadcast(shape, (2, 5, 6)) # i,h,w ww = self.weight.broadcast(shape, (0, 3, 4)) # N,H,W y = (ww*xx).reindex_reduce("add", out_shape, [ 'i0', # N 'i2', # o f'i3*{stride_h}-{padding_h}+i5*{dilation_h}', # Hid+Khid f'i4*{stride_w}-{padding_w}+i6*{dilation_w}', # Wid+KWid ]) if self.bias is not None: b = self.bias.broadcast(y.shape, [0,2,3]) y = y + b return y class ConvTranspose3d(Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, \ padding=0, output_padding=0, groups=1, bias=True, dilation=1): self.in_channels = in_channels self.out_channels = out_channels # added self.dilation = dilation self.group = groups assert groups==1, "Group conv not supported yet." self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size, kernel_size) self.stride = stride if isinstance(stride, tuple) else (stride, stride, stride) self.dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation, dilation) # added self.padding = padding if isinstance(padding, tuple) else (padding, padding, padding) self.real_padding = ( self.dilation[0] * (self.kernel_size[0] - 1) - self.padding[0], self.dilation[1] * (self.kernel_size[1] - 1) - self.padding[1], self.dilation[2] * (self.kernel_size[2] - 1) - self.padding[2]) self.output_padding = output_padding if isinstance (output_padding, tuple) else (output_padding, output_padding, output_padding) assert self.output_padding[0] < max(self.stride[0], self.dilation[0]) and \ self.output_padding[1] < max(self.stride[1], self.dilation[1]) and \ self.output_padding[2] < max(self.stride[2], self.dilation[2]), \ "output padding must be smaller than max(stride, dilation)" self.weight = init.invariant_uniform((in_channels, out_channels) + self.kernel_size, dtype="float") if bias: fan=1 for i in self.weight.shape[1:]: fan *= i bound = 1 / math.sqrt(fan) self.bias = init.uniform([out_channels], dtype="float", low=-bound, high=bound) else: self.bias = None def execute(self, x): return conv_transpose3d(x, self.weight, self.bias, self.stride, self.padding, self.output_padding, self.group, self.dilation) def conv_transpose(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): x = input N,C,H,W = x.shape i,o,h,w = weight.shape assert C==i assert groups==1, "Group conv not supported yet." stride = stride if isinstance(stride, tuple) else (stride, stride) dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation) # added padding = padding if isinstance(padding, tuple) else (padding, padding) output_padding = output_padding if isinstance (output_padding, tuple) else (output_padding, output_padding) assert output_padding[0] < max(stride[0], dilation[0]) and \ output_padding[1] < max(stride[1], dilation[1]), \ "output padding must be smaller than max(stride, dilation)" stride_h, stride_w = stride padding_h, padding_w = padding dilation_h, dilation_w = dilation h_out = (H-1) * stride_h + output_padding[0] - 2*padding_h + 1 + (h-1)*dilation_h w_out = (W-1) * stride_w + output_padding[1] - 2*padding_w + 1 + (w-1)*dilation_w out_shape = (N, o, h_out, w_out) shape = (N, i, o, H, W, h, w) xx = x.broadcast(shape, (2, 5, 6)) # i,h,w ww = weight.broadcast(shape, (0, 3, 4)) # N,H,W y = (ww*xx).reindex_reduce("add", out_shape, [ 'i0', # N 'i2', # o f'i3*{stride_h}-{padding_h}+i5*{dilation_h}', # Hid+Khid f'i4*{stride_w}-{padding_w}+i6*{dilation_w}', # Wid+KWid ]) if isinstance(bias, jt.Var): b = bias.broadcast(y.shape, [0,2,3]) y = y + b else: assert not bias, "Bias should be none or jittor var" return y def conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): x = input N,C,D,H,W = x.shape i,o,d,h,w = weight.shape assert C==i assert groups==1, "Group conv not supported yet." stride = stride if isinstance(stride, tuple) else (stride, stride, stride) dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation, dilation) # added padding = padding if isinstance(padding, tuple) else (padding, padding, padding) output_padding = output_padding if isinstance (output_padding, tuple) else (output_padding, output_padding, output_padding) assert output_padding[0] < max(stride[0], dilation[0]) and \ output_padding[1] < max(stride[1], dilation[1]) and \ output_padding[2] < max(stride[2], dilation[2]), \ "output padding must be smaller than max(stride, dilation)" stride_d, stride_h, stride_w = stride padding_d, padding_h, padding_w = padding dilation_d, dilation_h, dilation_w = dilation d_out = (D-1) * stride_d + output_padding[0] - 2*padding_d + 1 + (d-1)*dilation_d h_out = (H-1) * stride_h + output_padding[1] - 2*padding_h + 1 + (h-1)*dilation_h w_out = (W-1) * stride_w + output_padding[2] - 2*padding_w + 1 + (w-1)*dilation_w out_shape = (N, o, d_out, h_out, w_out) if jt.flags.use_cuda and jt.cudnn: return jt.cudnn.ops.cudnn_conv3d_backward_x(weight, x, *out_shape[2:], *stride, *padding, *dilation, groups) shape = (N, i, o, D, H, W, d, h, w) xx = x.broadcast(shape, (2, 6, 7, 8)) # i,h,w ww = weight.broadcast(shape, (0, 3, 4,
import math import numpy as np import tensorflow as tf from tensorflow.keras import layers as tfkl from tensorflow_probability import distributions as tfd from tensorflow.keras.mixed_precision import experimental as prec import common class RSSM(common.Module): def __init__( self, stoch=30, deter=200, hidden=200, discrete=False, act=tf.nn.elu, std_act='softplus', min_std=0.1, warm_up=1, num_prototypes=2500, proto=30, temperature=0.1, sinkhorn_eps=0.05, sinkhorn_iters=3): super().__init__() self._stoch = stoch self._deter = deter self._hidden = hidden self._discrete = discrete self._act = getattr(tf.nn, act) if isinstance(act, str) else act self._std_act = std_act self._min_std = min_std ################################################################################ self._warm_up = warm_up self._num_prototypes = num_prototypes self._proto = proto self._temperature = temperature self._sinkhorn_eps = sinkhorn_eps self._sinkhorn_iters = sinkhorn_iters initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) self._prototypes = tf.Variable( initializer(shape=(num_prototypes, proto), dtype=tf.float32)) ################################################################################ self._cell = GRUCell(self._deter, norm=True) self._cast = lambda x: tf.cast(x, prec.global_policy().compute_dtype) def initial(self, batch_size): dtype = prec.global_policy().compute_dtype if self._discrete: state = dict( logit=tf.zeros([batch_size, self._stoch, self._discrete], dtype), stoch=tf.zeros([batch_size, self._stoch, self._discrete], dtype), deter=self._cell.get_initial_state(None, batch_size, dtype)) else: state = dict( mean=tf.zeros([batch_size, self._stoch], dtype), std=tf.zeros([batch_size, self._stoch], dtype), stoch=tf.zeros([batch_size, self._stoch], dtype), deter=self._cell.get_initial_state(None, batch_size, dtype)) return state @tf.function def observe(self, embed, action, state=None): swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape)))) if state is None: state = self.initial(tf.shape(action)[0]) embed, action = swap(embed), swap(action) post, prior = common.static_scan( lambda prev, inputs: self.obs_step(prev[0], *inputs), (action, embed), (state, state)) post = {k: swap(v) for k, v in post.items()} prior = {k: swap(v) for k, v in prior.items()} return post, prior @tf.function def imagine(self, action, state=None): swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape)))) if state is None: state = self.initial(tf.shape(action)[0]) assert isinstance(state, dict), state action = swap(action) prior = common.static_scan(self.img_step, action, state) prior = {k: swap(v) for k, v in prior.items()} return prior def get_feat(self, state): stoch = self._cast(state['stoch']) if self._discrete: shape = stoch.shape[:-2] + [self._stoch * self._discrete] stoch = tf.reshape(stoch, shape) return tf.concat([stoch, state['deter']], -1) def get_dist(self, state): if self._discrete: logit = state['logit'] logit = tf.cast(logit, tf.float32) dist = tfd.Independent(common.OneHotDist(logit), 1) else: mean, std = state['mean'], state['std'] mean = tf.cast(mean, tf.float32) std = tf.cast(std, tf.float32) dist = tfd.MultivariateNormalDiag(mean, std) return dist @tf.function def obs_step(self, prev_state, prev_action, embed, sample=True): prior = self.img_step(prev_state, prev_action, sample) x = tf.concat([prior['deter'], embed], -1) x = self.get('obs_out', tfkl.Dense, self._hidden, self._act)(x) stats = self._suff_stats_layer('obs_dist', x) dist = self.get_dist(stats) stoch = dist.sample() if sample else dist.mode() post = {'stoch': stoch, 'deter': prior['deter'], **stats} return post, prior @tf.function def img_step(self, prev_state, prev_action, sample=True): prev_stoch = self._cast(prev_state['stoch']) prev_action = self._cast(prev_action) if self._discrete: shape = prev_stoch.shape[:-2] + [self._stoch * self._discrete] prev_stoch = tf.reshape(prev_stoch, shape) x = tf.concat([prev_stoch, prev_action], -1) x = self.get('img_in', tfkl.Dense, self._hidden, self._act)(x) deter = prev_state['deter'] x, deter = self._cell(x, [deter]) deter = deter[0] # Keras wraps the state in a list. x = self.get('img_out', tfkl.Dense, self._hidden, self._act)(x) stats = self._suff_stats_layer('img_dist', x) dist = self.get_dist(stats) stoch = dist.sample() if sample else dist.mode() prior = {'stoch': stoch, 'deter': deter, **stats} return prior def _suff_stats_layer(self, name, x): if self._discrete: x = self.get(name, tfkl.Dense, self._stoch * self._discrete, None)(x) logit = tf.reshape(x, x.shape[:-1] + [self._stoch, self._discrete]) return {'logit': logit} else: x = self.get(name, tfkl.Dense, 2 * self._stoch, None)(x) mean, std = tf.split(x, 2, -1) std = { 'softplus': lambda: tf.nn.softplus(std), 'sigmoid': lambda: tf.nn.sigmoid(std), 'sigmoid2': lambda: 2 * tf.nn.sigmoid(std / 2), }[self._std_act]() std = std + self._min_std return {'mean': mean, 'std': std} ################################################################################ # Sinkhorn-Knopp @tf.function def sinkhorn(self, scores): shape = scores.shape K = shape[0] scores = tf.reshape(scores, [-1]) log_Q = tf.nn.log_softmax(scores / self._sinkhorn_eps, axis=0) log_Q = tf.reshape(log_Q, [K, -1]) N = log_Q.shape[1] for _ in range(self._sinkhorn_iters): log_row_sums = tf.math.reduce_logsumexp(log_Q, axis=1, keepdims=True) log_Q = log_Q - log_row_sums - math.log(K) log_col_sums = tf.math.reduce_logsumexp(log_Q, axis=0, keepdims=True) log_Q = log_Q - log_col_sums - math.log(N) log_Q = log_Q + math.log(N) Q = tf.math.exp(log_Q) return tf.reshape(Q, shape) @tf.function def proto_loss(self, post, embed, ema_proj): prototypes = tf.math.l2_normalize(self._prototypes, axis=-1) self._prototypes.assign(prototypes) obs_proj = self.get('obs_proj', tfkl.Dense, self._proto, None)(embed) obs_proj = tf.cast(obs_proj, tf.float32) obs_norm = tf.norm(obs_proj, axis=-1) obs_proj = tf.math.l2_normalize(obs_proj, axis=-1) B, T = obs_proj.shape[:2] obs_proj = tf.reshape(obs_proj, [B*T, self._proto]) obs_scores = tf.linalg.matmul(self._prototypes, obs_proj, transpose_b=True) obs_scores = tf.reshape(obs_scores, [self._num_prototypes, B, T]) obs_scores = obs_scores[:, :, self._warm_up:] obs_logits = tf.nn.log_softmax(obs_scores / self._temperature, axis=0) obs_logits_1, obs_logits_2 = tf.split(obs_logits, 2, axis=1) ema_proj = tf.reshape(ema_proj, [B*T, self._proto]) ema_scores = tf.linalg.matmul(self._prototypes, ema_proj, transpose_b=True) ema_scores = tf.reshape(ema_scores, [self._num_prototypes, B, T]) ema_scores = ema_scores[:, :, self._warm_up:] ema_scores_1, ema_scores_2 = tf.split(ema_scores, 2, axis=1) ema_targets_1 = tf.stop_gradient(self.sinkhorn(ema_scores_1)) ema_targets_2 = tf.stop_gradient(self.sinkhorn(ema_scores_2)) ema_targets = tf.concat([ema_targets_1, ema_targets_2], axis=1) feat = self.get_feat(post) feat_proj = self.get('feat_proj', tfkl.Dense, self._proto, None)(feat) feat_proj = tf.cast(feat_proj, tf.float32) feat_norm = tf.norm(feat_proj, axis=-1) feat_proj = tf.math.l2_normalize(feat_proj, axis=-1) feat_proj = tf.reshape(feat_proj, [B*T, self._proto]) feat_scores = tf.linalg.matmul(self._prototypes, feat_proj, transpose_b=True) feat_scores = tf.reshape(feat_scores, [self._num_prototypes, B, T]) feat_scores = feat_scores[:, :, self._warm_up:] feat_logits = tf.nn.log_softmax(feat_scores / self._temperature, axis=0) swav_loss = ( -0.5 * tf.math.reduce_mean( tf.math.reduce_sum(ema_targets_2 * obs_logits_1, axis=0)) -0.5 * tf.math.reduce_mean( tf.math.reduce_sum(ema_targets_1 * obs_logits_2, axis=0))) temp_loss = ( -tf.math.reduce_mean( tf.math.reduce_sum(ema_targets * feat_logits, axis=0))) norm_loss = ( +1.0 * tf.math.reduce_mean( tf.math.square(obs_norm - 1)) +1.0 * tf.math.reduce_mean( tf.math.square(feat_norm - 1))) losses = { 'swav': swav_loss, 'temp': temp_loss, 'norm': norm_loss, } return losses ################################################################################ def kl_loss(self, post, prior, forward, balance, free, free_avg): kld = tfd.kl_divergence sg = lambda x: tf.nest.map_structure(tf.stop_gradient, x) lhs, rhs = (prior, post) if forward else (post, prior) mix = balance if forward else (1 - balance) if balance == 0.5: value = kld(self.get_dist(lhs), self.get_dist(rhs)) loss = tf.maximum(value, free).mean() else: value_lhs = value = kld(self.get_dist(lhs), self.get_dist(sg(rhs))) value_rhs = kld(self.get_dist(sg(lhs)), self.get_dist(rhs)) if free_avg: loss_lhs = tf.maximum(value_lhs.mean(), free) loss_rhs = tf.maximum(value_rhs.mean(), free) else: loss_lhs = tf.maximum(value_lhs, free).mean() loss_rhs = tf.maximum(value_rhs, free).mean() loss = mix * loss_lhs + (1 - mix) * loss_rhs return loss, value class ConvEncoder(common.Module): def __init__( self, depth=32, act=tf.nn.elu, kernels=(4, 4, 4, 4), keys=['image']): self._act = getattr(tf.nn, act) if isinstance(act, str) else act self._depth = depth self._kernels = kernels self._keys = keys @tf.function def __call__(self, obs): if tuple(self._keys) == ('image',): x = tf.reshape(obs['image'], (-1,) + tuple(obs['image'].shape[-3:])) for i, kernel in enumerate(self._kernels): depth = 2 ** i * self._depth x = self._act(self.get(f'h{i}', tfkl.Conv2D, depth, kernel, 2)(x)) x = tf.reshape(x, [x.shape[0], np.prod(x.shape[1:])]) shape = tf.concat([tf.shape(obs['image'])[:-3], [x.shape[-1]]], 0) return tf.reshape(x, shape) else: dtype = prec.global_policy().compute_dtype features = [] for key in self._keys: value = tf.convert_to_tensor(obs[key]) if value.dtype.is_integer: value = tf.cast(value, dtype) semilog = tf.sign(value) * tf.math.log(1 + tf.abs(value)) features.append(semilog[..., None]) elif len(obs[key].shape) >= 4: x = tf.reshape(obs['image'], (-1,) + tuple(obs['image'].shape[-3:])) for i, kernel in enumerate(self._kernels): depth = 2 ** i * self._depth x = self._act(self.get(f'h{i}', tfkl.Conv2D, depth, kernel, 2)(x)) x = tf.reshape(x, [x.shape[0], np.prod(x.shape[1:])]) shape = tf.concat([tf.shape(obs['image'])[:-3], [x.shape[-1]]], 0) features.append(tf.reshape(x, shape)) else: raise NotImplementedError((key, value.dtype, value.shape)) return tf.concat(features, -1) class MLP(common.Module): def __init__(self, shape, layers, units, act=tf.nn.elu, **out): self._shape = (shape,) if isinstance(shape, int) else shape self._layers = layers self._units = units self._act = getattr(tf.nn, act) if isinstance(act, str) else act self._out = out def __call__(self, features): x = tf.cast(features, prec.global_policy().compute_dtype) for index in range(self._layers): x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x) return self.get('out', DistLayer, self._shape, **self._out)(x) class GRUCell(tf.keras.layers.AbstractRNNCell): def __init__(self, size, norm=False, act=tf.tanh, update_bias=-1, **kwargs): super().__init__() self._size = size self._act = getattr(tf.nn, act) if isinstance(act, str) else act self._norm = norm self._update_bias = update_bias self._layer = tfkl.Dense(3 * size, use_bias=norm is not None, **kwargs) if norm: self._norm = tfkl.LayerNormalization(dtype=tf.float32) @property def state_size(self): return self._size @tf.function def call(self, inputs, state): state = state[0] # Keras wraps the state in a list. parts = self._layer(tf.concat([inputs, state], -1)) if self._norm: dtype = parts.dtype parts = tf.cast(parts, tf.float32) parts = self._norm(parts) parts = tf.cast(parts, dtype) reset, cand, update = tf.split(parts, 3, -1) reset = tf.nn.sigmoid(reset) cand = self._act(reset * cand) update = tf.nn.sigmoid(update + self._update_bias) output = update * cand + (1 - update) * state return output, [output] class DistLayer(common.Module): def __init__(self, shape, dist='mse', min_std=0.1, init_std=0.0): self._shape = shape self._dist = dist self._min_std = min_std self._init_std = init_std def __call__(self, inputs): out = self.get('out', tfkl.Dense, np.prod(self._shape))(inputs) out = tf.reshape(out, tf.concat([tf.shape(inputs)[:-1], self._shape], 0)) out = tf.cast(out, tf.float32) if self._dist in ('normal', 'tanh_normal', 'trunc_normal'): std = self.get('std', tfkl.Dense, np.prod(self._shape))(inputs) std = tf.reshape(std, tf.concat([tf.shape(inputs)[:-1], self._shape], 0)) std = tf.cast(std, tf.float32) if self._dist == 'mse': dist = tfd.Normal(out, 1.0) return tfd.Independent(dist, len(self._shape)) if self._dist == 'normal': dist
#!/usr/bin/python # coding=utf-8 # 公众号:testerzhang __author__ = 'testerzhang' import os import time import traceback from appium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.common.exceptions import NoSuchElementException, TimeoutException, WebDriverException from selenium.webdriver.support import expected_conditions as EC from tqdm import tqdm import parse from loguru import logger import jdconfig as config logger.add(config.APPIUM_LOG) def wait_time_bar(wait_sec): logger.debug(f"等待{wait_sec}秒") wait_value = 10 * wait_sec for i in tqdm(range(wait_value)): time.sleep(0.1) # logger.debug("") class JD(object): def __init__(self): device_port = config.DEVICE_PORT desired_caps = config.DESIRED_CAPS self.skip_list = config.SKIP_LIST url = "http://localhost:{}/wd/hub".format(device_port) try: self.driver = webdriver.Remote(url, desired_caps) except WebDriverException: raise Exception("请手机连接到电脑哦!") except: logger.error(f"异常={traceback.format_exc()}") raise Exception("连接手机出了问题,请检查下") self.wait = WebDriverWait(self.driver, config.TIME_OUT) self.game_over = False self.windows_xpath = config.WINDOWS_XPATH self.windows_xpath2 = config.WINDOWS_XPATH2 self.except_html = "./except" if not os.path.exists(self.except_html): os.makedirs(self.except_html) self.finish_task_skip = [] logger.debug("1.打开京东") wait_time_bar(4) # 点击中间区域位置 def click_screen_middle(self): screen_size = self.driver.get_window_size() logger.debug(f"手机屏幕大小={screen_size}") # 屏幕的宽度 width x = screen_size['width'] # 屏幕的高度 height y = screen_size['height'] start_x = x / 2 start_y = y / 2 positions = [(start_x, start_y), (start_x, start_y)] logger.debug(f"点击屏幕位置={positions}") self.driver.tap(positions, 100) # 关闭 def close(self): wait_time_bar(5) logger.debug("6.关闭app") self.driver.quit() # 检测是否在当前自动化的app def detect_app(self): if self.driver.current_package != "com.jingdong.app.mall": self.driver.back() # 判断某些任务是不是直接跳过 def continue_task(self, content): is_continue = True for skip in self.skip_list: if skip in content: logger.warning(f"任务=[{content}]暂时不做") is_continue = False break return is_continue # 首页查找入口 def active_page(self): search_result = False logger.debug(f"2.查找活动入口") try: # 搜索框 search_div = '//android.widget.TextView[contains(@content-desc,"搜索")]' search_elm = self.wait.until(EC.presence_of_element_located((By.XPATH, search_div))) search_elm.click() wait_time_bar(2) # 换个思路,拿到动态的resource-id my_regx = '''{temp}content-desc="搜索框,{temp2}resource-id="{search_id}"{temp3}''' regx_result = parse.parse(my_regx, self.driver.page_source) # logger.debug(f"regx_result={regx_result}") if regx_result is None: logger.warning("获取搜索框ID正则匹配失败,退出") raise Exception("获取搜索框ID正则匹配失败,退出") search_text_id = regx_result['search_id'] # 输入搜索文本,这里目前只能是用ID,xpath解析异常 # search_text_id = 'com.jd.lib.search.feature:id/a54' box = self.wait.until(EC.presence_of_element_located((By.ID, search_text_id))) box.set_text("炸年兽") # 点击搜索按钮 logger.debug(f"点击搜索按钮") search_btn_xpath = '//android.widget.TextView[@content-desc="搜索,按钮"]' button = self.wait.until(EC.presence_of_element_located((By.XPATH, search_btn_xpath))) button.click() wait_time_bar(3) # 废弃寻找元素 # door_xpath = '//androidx.recyclerview.widget.RecyclerView/android.widget.RelativeLayout[@index="2"]' # door_button = self.wait.until(EC.presence_of_element_located((By.XPATH, door_xpath))) # door_button.click() # 屏幕点击位置进入活动 self.click_screen_middle() # 加载新页面时间 wait_time_bar(5) logger.debug("进入活动入口") except NoSuchElementException: raise Exception("找不到活动入口") filename = f"{self.except_html}/search.html" self.write_html(filename) except: raise Exception("元素定位了,但是找不到活动入口") filename = f"{self.except_html}/search-except.html" self.write_html(filename) return True def close_windows(self): try: count_div = f'//*[@text="累计任务奖励"]/../../android.view.View[1]' count_elm = self.driver.find_element(By.XPATH, count_div) logger.debug(f"点击关闭按钮") count_elm.click() except: logger.warning(f"点击关闭异常") # logger.debug(f"【{task}】点击异常={traceback.format_exc()}") # task必须是副标题的内容 def print_task_detail(self, task): continue_flag = True task_title_xpath = "" task_second_title_xpath = "" task_title_text = "" task_second_title_text = "" try: logger.debug(f"检查任务:【{task}】是否存在") task_second_title_xpath = f'//*[contains(@text, "{task}")]' task_second_title = self.driver.find_element(By.XPATH, task_second_title_xpath) task_second_title_text = task_second_title.text logger.debug(f"任务副标题={task_second_title_text}") except: logger.warning(f"该任务:【{task}】不执行") continue_flag = False return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text try: task_title_xpath = f'{task_second_title_xpath}//preceding-sibling::android.view.View[1]' task_title_elm = self.driver.find_element(By.XPATH, task_title_xpath) # 获取标题 task_title_text = task_title_elm.text logger.debug(f"任务标题={task_title_text}") except NoSuchElementException: continue_flag = False filename = f"{self.except_html}/获取任务主标题-不存在.html" self.write_html(filename) return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text except: logger.warning(f"该任务:【{task}】获取任务标题异常,不执行") continue_flag = False return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text # 判断是否任务跳过 is_continue = self.continue_task(task_title_text) if not is_continue: logger.warning(f"满足跳过任务关键字,退出2") continue_flag = False return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text # task必须是主标题的内容 def print_task_detail2(self, task): continue_flag = True task_title_xpath = "" task_second_title_xpath = "" task_title_text = "" task_second_title_text = "" try: logger.debug(f"检查任务:【{task}】是否存在") task_title_xpath = f'//*[contains(@text, "{task}")]' task_title = self.driver.find_element(By.XPATH, task_title_xpath) task_title_text = task_title.text logger.debug(f"任务主标题={task_title_text}") except NoSuchElementException: pass except: logger.warning(f"该任务:【{task}】不执行") continue_flag = False return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text try: task_second_title_xpath = f'{task_title_xpath}//following-sibling::android.view.View[1]' task_second_title_elm = self.driver.find_element(By.XPATH, task_second_title_xpath) # 获取标题 task_second_title_text = task_second_title_elm.text logger.debug(f"任务副标题={task_second_title_text}") except NoSuchElementException: continue_flag = False filename = f"{self.except_html}/获取任务副标题-不存在.html" self.write_html(filename) return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text except: logger.warning(f"该任务:【{task}】获取任务副标题异常,不执行") continue_flag = False return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text # 判断是否任务跳过 is_continue = self.continue_task(task_title_text) if not is_continue: logger.warning(f"满足跳过任务关键字,退出2") continue_flag = False return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text return continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text # 种草城 def grass_task(self, task): init_loop = 0 max_loop = 1 jump_loop_flag = 0 while init_loop < max_loop: init_loop = init_loop + 1 if jump_loop_flag == 1: logger.debug(f"超过循环次数,退出该类任务。") break continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text = self.print_task_detail2( task) if not continue_flag: break # 开始点击 result = parse.parse("{temp}({now_times}/{total_times})", f"{task_title_text}") now_times = int(result['now_times']) total_times = 3 logger.debug(f"now_times={now_times},total_times={total_times}") if now_times == total_times and total_times > 0: continue else: while now_times < total_times: logger.debug(f"开始【{task}】任务now_times={now_times}点击") # todo:检测页面是否已经完成任务了 try: task_button_do_xpath = f'{task_second_title_xpath}/following-sibling::android.view.View[1]' task_button_do_elm = self.driver.find_element(By.XPATH, task_button_do_xpath) task_button_do_elm.click() except NoSuchElementException: filename = f"{self.except_html}/互动种草城-点击-{now_times}-no-found.html" self.write_html(filename) break except: logger.warning(f"该任务:【{task}】获取任务按钮异常,不执行") break wait_time_bar(3) # 检测页面是否含有"当前页点击浏览4个商品领爆竹"文字 logger.debug(f"检测页面是否有关键字") source = self.driver.page_source find_flag = source.find("互动种草城") if find_flag == -1: logger.warning(f"没找到【互动种草城】关键字,退出任务") break # 执行4次 shop_success = True for i in range(1, 4): try: logger.debug(f"开始第{i}次访问店铺") to_finish_xpath = f'//android.view.View[contains(@text, "去完成去完成")]' to_finish_elm = self.driver.find_element(By.XPATH, to_finish_xpath) to_finish_elm.click() except NoSuchElementException: shop_success = False filename = f"{self.except_html}/互动种草城-店铺-{i}.html" self.write_html(filename) break except: shop_success = False logger.warning(f"点击店铺异常={traceback.format_exc()}") break wait_time_bar(1) logger.debug("从详情页返回") self.driver.back() wait_time_bar(2) if shop_success: logger.debug("返回任务列表") self.driver.back() now_times = now_times + 1 # gzh:testerzhang 做任务列表,还不能做全部,后续再看看。 def do_task(self, detect=False): if detect: enter_success = self.detect_enter_task_lists() if not enter_success: logger.warning(f"没有进入任务列表,退出") return # 配置文件配置需要执行的任务清单 task_list = config.TASK_LIST for task in task_list: if self.game_over: break while True: # 开始做任务 logger.debug(f"开始真正做任务列表:【{task}】") if task in ["去领取"]: try: progress_div = f'//*[@text="累计任务奖励"]/../android.view.View[3]/android.view.View/android.view.View' progress_elm_lists = self.driver.find_elements(By.XPATH, progress_div) logger.debug(f"找到[去领取]区域长度={len(progress_elm_lists)}") for i, progress_elm in enumerate(progress_elm_lists, 0): if i == 0: continue logger.debug(f"尝试点击第{i}个[去领取]") progress_elm.click() wait_time_bar(2) close_tip_div = f'//android.view.View[contains(@text, "+")]' close_tip_lists = self.driver.find_elements(By.XPATH, close_tip_div) if len(close_tip_lists) > 0: close_tip_elm = close_tip_lists[0] tips = close_tip_elm.text logger.debug(f"tips={tips}") if '爆竹' in tips: logger.debug(f"关闭弹窗") self.close_windows() wait_time_bar(2) except NoSuchElementException: filename = f"{self.except_html}/lingqu.html" self.write_html(filename) except: logger.warning(f"[去领取]异常={traceback.format_exc()}") else: wait_time_bar(5) break elif task in ["关闭"]: self.close_windows() break elif task in ["去组队可得", "玩AR游戏"]: continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text = self.print_task_detail( task) if not continue_flag: break try: logger.debug(f"开始【{task}】任务点击") task_button_do_xpath = f'{task_title_xpath}/following-sibling::android.view.View[2]' task_button_do_elm = self.driver.find_element(By.XPATH, task_button_do_xpath) task_button_do_elm.click() if task in ["玩AR游戏"]: wait_time_bar(4) self.driver.back() else: wait_time_bar(2) except NoSuchElementException: filename = f"{self.except_html}/join_group_or_ar_no.html" self.write_html(filename) except: logger.warning(f"该任务:【{task}】获取任务按钮异常,不执行") break break elif task in ["去种草城"]: # todo: 只有一次种草城 self.grass_task(task) break elif '底部跳转app' == task: try: logger.debug(f"开始点击任务列表底部的横幅") task_button_do_xpath = f'''//android.view.View[@resource-id="taskPanelBanner"]''' task_button_do_elm = self.driver.find_element(By.XPATH, task_button_do_xpath) task_button_do_elm.click() self.do_other_app() except NoSuchElementException: filename = f"{self.except_html}/底部跳转app-no-found.html" self.write_html(filename) except: logger.warning(f"该任务:【{task}】获取任务按钮异常,不执行") logger.warning("做【其他任务】完成,直接退出吧") self.game_over = True ## 不管做啥,都退出 break elif '累计浏览' == task: init_loop = 0 max_loop = 3 jump_loop_flag = 0 while init_loop < max_loop: init_loop = init_loop + 1 if jump_loop_flag == 1: logger.debug(f"超过循环次数,退出该类任务。") break continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text = self.print_task_detail( task) if not continue_flag: break # 开始点击 result = parse.parse("{temp}({now_times}/{total_times})", f"{task_title_text}") now_times = int(result['now_times']) total_times = int(result['total_times']) logger.debug(f"now_times={now_times},total_times={total_times}") if now_times == total_times and total_times > 0: continue else: while now_times < total_times: try: logger.debug(f"开始【{task}】任务now_times={now_times}点击") task_button_do_xpath = f'{task_second_title_xpath}/following-sibling::android.view.View[1]' task_button_do_elm = self.driver.find_element(By.XPATH, task_button_do_xpath) task_button_do_elm.click() except NoSuchElementException: filename = f"{self.except_html}/累计浏览-点击浏览-{now_times}-no-found.html" self.write_html(filename) break except: logger.warning(f"该任务:【{task}】获取任务按钮异常,不执行") break wait_time_bar(3) # 检测页面是否含有"当前页点击浏览4个商品领爆竹"文字 logger.debug(f"检测页面是否有关键字") source = self.driver.page_source find_flag = source.find("当前页点击浏览4个商品领爆竹") if find_flag == -1: logger.warning(f"没找到【当前页点击浏览4个商品领爆竹】关键字,退出任务") break # 执行4次 browse_success = True for i in range(1, 5): try: logger.debug(f"开始第{i}次浏览商品") goods_views_xpath = f'//android.view.View[@resource-id="root"]/android.view.View[2]/android.view.View[{i}]' # logger.debug(f"goods_views_xpath={goods_views_xpath}") goods_views_elm = self.driver.find_element(By.XPATH, goods_views_xpath) goods_views_elm.click() except NoSuchElementException: browse_success = False filename = f"{self.except_html}/累计浏览-商品-{i}.html" self.write_html(filename) break except: browse_success = False logger.warning(f"点击商品异常={traceback.format_exc()}") break wait_time_bar(1) logger.debug("从商品详情页返回") self.driver.back() wait_time_bar(2) if browse_success: logger.debug("返回任务列表") self.driver.back() now_times = now_times + 1 break elif '浏览3个品牌墙' == task: init_loop = 0 max_loop = 3 jump_loop_flag = 0 while init_loop < max_loop: init_loop = init_loop + 1 if jump_loop_flag == 1: logger.debug(f"超过循环次数,退出该类任务。") break continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text = self.print_task_detail( task) if not continue_flag: break # 开始点击 result = parse.parse("{temp}({now_times}/{total_times})", f"{task_title_text}") now_times = int(result['now_times']) total_times = int(result['total_times']) logger.debug(f"now_times={now_times},total_times={total_times}") if now_times == total_times and total_times > 0: continue else: while now_times < total_times: try: logger.debug(f"开始【{task}】任务now_times={now_times}点击") task_button_do_xpath = f'{task_second_title_xpath}/following-sibling::android.view.View[1]' task_button_do_elm = self.driver.find_element(By.XPATH, task_button_do_xpath) task_button_do_elm.click() except NoSuchElementException: filename = f"{self.except_html}/浏览3个品牌墙-点击浏览-{now_times}-no-found.html" self.write_html(filename) break except: logger.warning(f"该任务:【{task}】获取任务按钮异常,不执行") break wait_time_bar(3) # 检测页面是否含有"当前页点击浏览4个商品领爆竹"文字 logger.debug(f"检测页面是否有关键字") source = self.driver.page_source find_flag = source.find("feedBottom") if find_flag == -1: logger.warning(f"没找到【feedBottom】关键字,退出任务") break # 执行4次 browse_success = True for i in range(1, 4): try: logger.debug(f"开始第{i}次浏览品牌墙") goods_views_xpath = f'//android.view.View[@resource-id="feedBottom"]/android.view.View/android.view.View[2]/android.view.View[{i}]' # logger.debug(f"goods_views_xpath={goods_views_xpath}") goods_views_elm = self.driver.find_element(By.XPATH, goods_views_xpath) goods_views_elm.click() except NoSuchElementException: browse_success = False filename = f"{self.except_html}/浏览3个品牌墙-品牌浏览-{i}.html" self.write_html(filename) break except: browse_success = False logger.warning(f"点击浏览品牌墙异常={traceback.format_exc()}") break wait_time_bar(1) logger.debug("从品牌墙详情页返回") self.driver.back() wait_time_bar(2) if browse_success: logger.debug("返回任务列表") self.driver.back() # 屏幕点击位置进入活动 self.click_screen_middle() # 加载新页面时间 wait_time_bar(5) button_name = "重新进入:做任务,集爆竹" enter_success = self.find_task_list_entrance(button_name) if not enter_success: logger.error(f"重新进入活动,依然没找到任务列表入口") else: wait_time_bar(5) self.do_task(detect=True) now_times = now_times + 1 break elif '浏览' in task: init_loop = 0 max_loop = 3 jump_loop_flag = 0 while init_loop < max_loop: init_loop = init_loop + 1 if jump_loop_flag == 1: logger.debug(f"超过循环次数,退出该类任务。") break continue_flag, task_title_xpath, task_second_title_xpath, task_title_text, task_second_title_text = self.print_task_detail( task) if not continue_flag: break if '浏览并加购' in task_second_title_text: logger.warning(f"浏览并加购任务不做") break # elif '成功入会并浏览可得' in task_second_title_text: # logger.warning(f"成功入会任务不做") # break elif '去财富岛' in task_second_title_text: logger.debug(f"财富岛任务不做") break elif '去小程序' in task_second_title_text: logger.debug(f"去小程序任务不做") break # 开始点击 result = parse.parse("{temp}({now_times}/{total_times})", f"{task_title_text}") now_times = int(result['now_times']) total_times = int(result['total_times']) logger.debug(f"now_times={now_times},total_times={total_times}") if now_times == total_times and total_times > 0:
# working again. self.client_communicator.LoadServerCertificate( server_certificate=server_certificate, ca_certificate=config.CONFIG["CA.certificate"]) self.assertLen(list(self.ClientServerCommunicate()), 10) class HTTPClientTests(client_action_test_lib.WithAllClientActionsMixin, test_lib.GRRBaseTest): """Test the http communicator.""" def setUp(self): """Set up communicator tests.""" super().setUp() # These tests change the config so we preserve state. config_stubber = test_lib.PreserveConfig() config_stubber.Start() self.addCleanup(config_stubber.Stop) self.server_private_key = config.CONFIG["PrivateKeys.server_key"] self.server_certificate = config.CONFIG["Frontend.certificate"] # Make a new client self.CreateNewClientObject() # And cache it in the server self.CreateNewServerCommunicator() requests_stubber = utils.Stubber(requests, "request", self.UrlMock) requests_stubber.Start() self.addCleanup(requests_stubber.Stop) sleep_stubber = utils.Stubber(time, "sleep", lambda x: None) sleep_stubber.Start() self.addCleanup(sleep_stubber.Stop) self.messages = [] ca_enroller.enrolment_cache.Flush() # Response to send back to clients. self.server_response = dict( session_id="aff4:/W:session", name="Echo", response_id=2) def _MakeClient(self): self.client_certificate = self.ClientCertFromPrivateKey( config.CONFIG["Client.private_key"]) self.client_cn = self.client_certificate.GetCN() self.client_id = self.client_cn[len("aff4:/"):] data_store.REL_DB.WriteClientMetadata( self.client_id, certificate=self.client_certificate, fleetspeak_enabled=False) def _ClearClient(self): del data_store.REL_DB.delegate.metadatas[self.client_id] def CreateNewServerCommunicator(self): self._MakeClient() self.server_communicator = frontend_lib.ServerCommunicator( certificate=self.server_certificate, private_key=self.server_private_key) def CreateClientCommunicator(self): self.client_communicator = comms.GRRHTTPClient( ca_cert=config.CONFIG["CA.certificate"], worker_cls=worker_mocks.DisabledNannyClientWorker) def CreateNewClientObject(self): self.CreateClientCommunicator() # Disable stats collection for tests. self.client_communicator.client_worker.last_stats_sent_time = ( time.time() + 3600) # Build a client context with preloaded server certificates self.client_communicator.communicator.LoadServerCertificate( self.server_certificate, config.CONFIG["CA.certificate"]) self.client_communicator.http_manager.retry_error_limit = 5 def UrlMock(self, num_messages=10, url=None, data=None, **kwargs): """A mock for url handler processing from the server's POV.""" if "server.pem" in url: cert = str(config.CONFIG["Frontend.certificate"]).encode("ascii") return MakeResponse(200, cert) _ = kwargs try: comms_cls = rdf_flows.ClientCommunication self.client_communication = comms_cls.FromSerializedBytes(data) # Decrypt incoming messages self.messages, source, ts = self.server_communicator.DecodeMessages( self.client_communication) # Make sure the messages are correct self.assertEqual(source, self.client_cn) messages = sorted( [m for m in self.messages if m.session_id == "aff4:/W:session"], key=lambda m: m.response_id) self.assertEqual([m.response_id for m in messages], list(range(len(messages)))) self.assertEqual([m.request_id for m in messages], [1] * len(messages)) # Now prepare a response response_comms = rdf_flows.ClientCommunication() message_list = rdf_flows.MessageList() for i in range(0, num_messages): message_list.job.Append(request_id=i, **self.server_response) # Preserve the timestamp as a nonce self.server_communicator.EncodeMessages( message_list, response_comms, destination=source, timestamp=ts, api_version=self.client_communication.api_version) return MakeResponse(200, response_comms.SerializeToBytes()) except communicator.UnknownClientCertError: raise MakeHTTPException(406) except Exception as e: logging.info("Exception in mock urllib.request.Open: %s.", e) self.last_urlmock_error = e if flags.FLAGS.pdb_post_mortem: pdb.post_mortem() raise MakeHTTPException(500) def CheckClientQueue(self): """Checks that the client context received all server messages.""" # Check the incoming messages self.assertEqual(self.client_communicator.client_worker.InQueueSize(), 10) for i, message in enumerate( self.client_communicator.client_worker._in_queue.queue): # This is the common name embedded in the certificate. self.assertEqual(message.source, "aff4:/GRR Test Server") self.assertEqual(message.response_id, 2) self.assertEqual(message.request_id, i) self.assertEqual(message.session_id, "aff4:/W:session") self.assertEqual(message.auth_state, rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED) # Clear the queue self.client_communicator.client_worker._in_queue.queue.clear() def SendToServer(self): """Schedule some packets from client to server.""" # Generate some client traffic for i in range(0, 10): self.client_communicator.client_worker.SendReply( rdf_flows.GrrStatus(), session_id=rdfvalue.SessionID("W:session"), response_id=i, request_id=1) def testInitialEnrollment(self): """If the client has no certificate initially it should enroll.""" # Clear the certificate so we can generate a new one. with test_lib.ConfigOverrider({ "Client.private_key": "", }): self.CreateNewClientObject() # Client should get a new Common Name. self.assertNotEqual(self.client_cn, self.client_communicator.communicator.common_name) self.client_cn = self.client_communicator.communicator.common_name # The client will sleep and re-attempt to connect multiple times. status = self.client_communicator.RunOnce() self.assertEqual(status.code, 406) # The client should now send an enrollment request. self.client_communicator.RunOnce() # Client should generate enrollment message by itself. self.assertLen(self.messages, 1) self.assertEqual(self.messages[0].session_id.Basename(), "E:%s" % ca_enroller.EnrolmentHandler.handler_name) def testEnrollment(self): """Test the http response to unknown clients.""" self._ClearClient() # Now communicate with the server. self.SendToServer() status = self.client_communicator.RunOnce() # We expect to receive a 406 and all client messages will be tagged as # UNAUTHENTICATED. self.assertEqual(status.code, 406) self.assertLen(self.messages, 10) self.assertEqual(self.messages[0].auth_state, rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED) # The next request should be an enrolling request. self.client_communicator.RunOnce() self.assertLen(self.messages, 11) enrolment_messages = [] expected_id = "E:%s" % ca_enroller.EnrolmentHandler.handler_name for m in self.messages: if m.session_id.Basename() == expected_id: enrolment_messages.append(m) self.assertLen(enrolment_messages, 1) # Now we manually run the enroll well known flow with the enrollment # request. This will start a new flow for enrolling the client, sign the # cert and add it to the data store. handler = ca_enroller.EnrolmentHandler() req = rdf_objects.MessageHandlerRequest( client_id=self.client_id, request=enrolment_messages[0].payload) handler.ProcessMessages([req]) # The next client communication should be enrolled now. status = self.client_communicator.RunOnce() self.assertEqual(status.code, 200) # There should be a cert for the client right now. md = data_store.REL_DB.ReadClientMetadata(self.client_id) self.assertTrue(md.certificate) # Now communicate with the server once again. self.SendToServer() status = self.client_communicator.RunOnce() self.assertEqual(status.code, 200) def testEnrollmentHandler(self): self._ClearClient() # First 406 queues an EnrolmentRequest. status = self.client_communicator.RunOnce() self.assertEqual(status.code, 406) # Send it to the server. status = self.client_communicator.RunOnce() self.assertEqual(status.code, 406) self.assertLen(self.messages, 1) self.assertEqual(self.messages[0].session_id.Basename(), "E:%s" % ca_enroller.EnrolmentHandler.handler_name) request = rdf_objects.MessageHandlerRequest( client_id=self.messages[0].source.Basename(), handler_name="Enrol", request_id=12345, request=self.messages[0].payload) handler = ca_enroller.EnrolmentHandler() handler.ProcessMessages([request]) # The next client communication should give a 200. status = self.client_communicator.RunOnce() self.assertEqual(status.code, 200) def testReboots(self): """Test the http communication with reboots.""" # Now we add the new client record to the server cache self.SendToServer() self.client_communicator.RunOnce() self.CheckClientQueue() # Simulate the client rebooted self.CreateNewClientObject() self.SendToServer() self.client_communicator.RunOnce() self.CheckClientQueue() # Simulate the server rebooting self.CreateNewServerCommunicator() self.SendToServer() self.client_communicator.RunOnce() self.CheckClientQueue() def _CheckFastPoll(self, require_fastpoll, expected_sleeptime): self.server_response = dict( session_id="aff4:/W:session", name="Echo", response_id=2, require_fastpoll=require_fastpoll) # Make sure we don't have any output messages that might override the # fastpoll setting from the input messages we send self.assertEqual(self.client_communicator.client_worker.OutQueueSize(), 0) self.client_communicator.RunOnce() # Make sure the timer is set to the correct value. self.assertEqual(self.client_communicator.timer.sleep_time, expected_sleeptime) self.CheckClientQueue() def testNoFastPoll(self): """Test that the fast poll False is respected on input messages. Also make sure we wait the correct amount of time before next poll. """ self._CheckFastPoll(False, config.CONFIG["Client.poll_max"]) def testFastPoll(self): """Test that the fast poll True is respected on input messages. Also make sure we wait the correct amount of time before next poll. """ self._CheckFastPoll(True, config.CONFIG["Client.poll_min"]) def testCorruption(self): """Simulate corruption of the http payload.""" self.corruptor_field = None def Corruptor(url="", data=None, **kwargs): """Futz with some of the fields.""" comm_cls = rdf_flows.ClientCommunication if data is not None: self.client_communication = comm_cls.FromSerializedBytes(data) else: self.client_communication = comm_cls(None) if self.corruptor_field and "server.pem" not in url: orig_str_repr = self.client_communication.SerializeToBytes() field_data = getattr(self.client_communication, self.corruptor_field) if hasattr(field_data, "SerializeToBytes"): # This converts encryption keys to a string so we can corrupt them. field_data = field_data.SerializeToBytes() # TODO: We use `bytes` from the `future` package here to # have Python 3 iteration behaviour. This call should be a noop in # Python 3 and should be safe to remove on support for Python 2 is # dropped. field_data = bytes(field_data) # TODO: On Python 2.7.6 and lower `array.array` accepts # only byte strings as argument so the call below is necessary. Once # support for old Python versions is dropped, this call should be # removed. modified_data = array.array(compatibility.NativeStr("B"), field_data) offset = len(field_data) // 2 char = field_data[offset] modified_data[offset] = char % 250 + 1 setattr(self.client_communication, self.corruptor_field, modified_data.tostring()) # Make sure we actually changed the data. self.assertNotEqual(field_data, modified_data) mod_str_repr = self.client_communication.SerializeToBytes() self.assertLen(orig_str_repr, len(mod_str_repr)) differences = [ True for x, y in zip(orig_str_repr, mod_str_repr) if x != y ] self.assertLen(differences, 1) data = self.client_communication.SerializeToBytes() return self.UrlMock(url=url, data=data, **kwargs) with utils.Stubber(requests, "request", Corruptor): self.SendToServer() status = self.client_communicator.RunOnce() self.assertEqual(status.code, 200) for field in ["packet_iv", "encrypted"]: # Corrupting each field should result in HMAC verification errors. self.corruptor_field = field self.SendToServer() status = self.client_communicator.RunOnce() self.assertEqual(status.code, 500) self.assertIn("HMAC verification failed", str(self.last_urlmock_error)) # Corruption of these fields will likely result in RSA errors, since we do # the RSA operations before the HMAC verification (in order to recover the # hmac key): for field in ["encrypted_cipher", "encrypted_cipher_metadata"]: # Corrupting each field should result in HMAC verification errors. self.corruptor_field = field self.SendToServer() status = self.client_communicator.RunOnce() self.assertEqual(status.code, 500) def testClientRetransmission(self): """Test that client retransmits failed messages.""" fail = True num_messages = 10 def FlakyServer(url=None, **kwargs): if not fail or "server.pem" in url: return self.UrlMock(num_messages=num_messages, url=url, **kwargs) raise MakeHTTPException(500) with utils.Stubber(requests, "request", FlakyServer): self.SendToServer() status = self.client_communicator.RunOnce() self.assertEqual(status.code, 500) # Server should not receive anything. self.assertEmpty(self.messages) # Try to send these messages again. fail = False self.assertEqual(self.client_communicator.client_worker.InQueueSize(), 0) status = self.client_communicator.RunOnce() self.assertEqual(status.code, 200) # We have received 10 client messages. self.assertEqual(self.client_communicator.client_worker.InQueueSize(), 10) self.CheckClientQueue() # Server should have received 10 messages this time. self.assertLen(self.messages, 10) # TODO(hanuszczak): We have a separate test suite for the stat collector. # Most of these test methods are no longer required, especially that now they # need to use implementation-specific methods instead of the public API. def testClientStatsCollection(self): """Tests that the client stats are collected automatically.""" now = 1000000 # Pretend we have already sent stats. self.client_communicator.client_worker.stats_collector._last_send_time = ( rdfvalue.RDFDatetime.FromSecondsSinceEpoch(now)) with test_lib.FakeTime(now): self.client_communicator.client_worker.stats_collector._Send() runs = [] with utils.Stubber(admin.GetClientStatsAuto, "Run", lambda cls, _: runs.append(1)): #
<reponame>Gael-de-Sailly/flopy<filename>flopy/mf6/modflow/mfgwfmaw.py # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py # FILE created on March 19, 2021 03:08:37 UTC from .. import mfpackage from ..data.mfdatautil import ListTemplateGenerator class ModflowGwfmaw(mfpackage.MFPackage): """ ModflowGwfmaw defines a maw package within a gwf6 model. Parameters ---------- model : MFModel Model that this package is a part of. Package is automatically added to model when it is initialized. loading_package : bool Do not set this parameter. It is intended for debugging and internal processing purposes only. auxiliary : [string] * auxiliary (string) defines an array of one or more auxiliary variable names. There is no limit on the number of auxiliary variables that can be provided on this line; however, lists of information provided in subsequent blocks must have a column of data for each auxiliary variable name defined here. The number of auxiliary variables detected on this line determines the value for naux. Comments cannot be provided anywhere on this line as they will be interpreted as auxiliary variable names. Auxiliary variables may not be used by the package, but they will be available for use by other parts of the program. The program will terminate with an error if auxiliary variables are specified on more than one line in the options block. boundnames : boolean * boundnames (boolean) keyword to indicate that boundary names may be provided with the list of multi-aquifer well cells. print_input : boolean * print_input (boolean) keyword to indicate that the list of multi- aquifer well information will be written to the listing file immediately after it is read. print_head : boolean * print_head (boolean) keyword to indicate that the list of multi- aquifer well heads will be printed to the listing file for every stress period in which "HEAD PRINT" is specified in Output Control. If there is no Output Control option and PRINT_HEAD is specified, then heads are printed for the last time step of each stress period. print_flows : boolean * print_flows (boolean) keyword to indicate that the list of multi- aquifer well flow rates will be printed to the listing file for every stress period time step in which "BUDGET PRINT" is specified in Output Control. If there is no Output Control option and "PRINT_FLOWS" is specified, then flow rates are printed for the last time step of each stress period. save_flows : boolean * save_flows (boolean) keyword to indicate that multi-aquifer well flow terms will be written to the file specified with "BUDGET FILEOUT" in Output Control. head_filerecord : [headfile] * headfile (string) name of the binary output file to write head information. budget_filerecord : [budgetfile] * budgetfile (string) name of the binary output file to write budget information. no_well_storage : boolean * no_well_storage (boolean) keyword that deactivates inclusion of well storage contributions to the multi-aquifer well package continuity equation. flow_correction : boolean * flow_correction (boolean) keyword that activates flow corrections in cases where the head in a multi-aquifer well is below the bottom of the screen for a connection or the head in a convertible cell connected to a multi-aquifer well is below the cell bottom. When flow corrections are activated, unit head gradients are used to calculate the flow between a multi-aquifer well and a connected GWF cell. By default, flow corrections are not made. flowing_wells : boolean * flowing_wells (boolean) keyword that activates the flowing wells option for the multi-aquifer well package. shutdown_theta : double * shutdown_theta (double) value that defines the weight applied to discharge rate for wells that limit the water level in a discharging well (defined using the HEAD_LIMIT keyword in the stress period data). SHUTDOWN_THETA is used to control discharge rate oscillations when the flow rate from the aquifer is less than the specified flow rate from the aquifer to the well. Values range between 0.0 and 1.0, and larger values increase the weight (decrease under-relaxation) applied to the well discharge rate. The HEAD_LIMIT option has been included to facilitate backward compatibility with previous versions of MODFLOW but use of the RATE_SCALING option instead of the HEAD_LIMIT option is recommended. By default, SHUTDOWN_THETA is 0.7. shutdown_kappa : double * shutdown_kappa (double) value that defines the weight applied to discharge rate for wells that limit the water level in a discharging well (defined using the HEAD_LIMIT keyword in the stress period data). SHUTDOWN_KAPPA is used to control discharge rate oscillations when the flow rate from the aquifer is less than the specified flow rate from the aquifer to the well. Values range between 0.0 and 1.0, and larger values increase the weight applied to the well discharge rate. The HEAD_LIMIT option has been included to facilitate backward compatibility with previous versions of MODFLOW but use of the RATE_SCALING option instead of the HEAD_LIMIT option is recommended. By default, SHUTDOWN_KAPPA is 0.0001. timeseries : {varname:data} or timeseries data * Contains data for the ts package. Data can be stored in a dictionary containing data for the ts package with variable names as keys and package data as values. Data just for the timeseries variable is also acceptable. See ts package documentation for more information. observations : {varname:data} or continuous data * Contains data for the obs package. Data can be stored in a dictionary containing data for the obs package with variable names as keys and package data as values. Data just for the observations variable is also acceptable. See obs package documentation for more information. mover : boolean * mover (boolean) keyword to indicate that this instance of the MAW Package can be used with the Water Mover (MVR) Package. When the MOVER option is specified, additional memory is allocated within the package to store the available, provided, and received water. nmawwells : integer * nmawwells (integer) integer value specifying the number of multi- aquifer wells that will be simulated for all stress periods. packagedata : [wellno, radius, bottom, strt, condeqn, ngwfnodes, aux, boundname] * wellno (integer) integer value that defines the well number associated with the specified PACKAGEDATA data on the line. WELLNO must be greater than zero and less than or equal to NMAWWELLS. Multi- aquifer well information must be specified for every multi-aquifer well or the program will terminate with an error. The program will also terminate with an error if information for a multi-aquifer well is specified more than once. This argument is an index variable, which means that it should be treated as zero-based when working with FloPy and Python. Flopy will automatically subtract one when loading index variables and add one when writing index variables. * radius (double) radius for the multi-aquifer well. The program will terminate with an error if the radius is less than or equal to zero. * bottom (double) bottom elevation of the multi-aquifer well. If CONDEQN is SPECIFIED, THIEM, SKIN, or COMPOSITE, BOTTOM is set to the cell bottom in the lowermost GWF cell connection in cases where the specified well bottom is above the bottom of this GWF cell. If CONDEQN is MEAN, BOTTOM is set to the lowermost GWF cell connection screen bottom in cases where the specified well bottom is above this value. The bottom elevation defines the lowest well head that will be simulated when the NEWTON UNDER_RELAXATION option is specified in the GWF model name file. The bottom elevation is also used to calculate volumetric storage in the well. * strt (double) starting head for the multi-aquifer well. The program will terminate with an error if the starting head is less than the specified well bottom. * condeqn (string) character string that defines the conductance equation that is used to calculate the saturated conductance for the multi-aquifer well. Possible multi-aquifer well CONDEQN strings include: SPECIFIED--character keyword to indicate the multi-aquifer well saturated conductance
it is known updating a ``Catalog`` will result in a ``PermissionDenied``. This is intended as a hint to an application that may not wish to offer update operations to unauthorized users. :return: ``false`` if ``Catalog`` modification is not authorized, ``true`` otherwise :rtype: ``boolean`` *compliance: mandatory -- This method must be implemented.* """ return # boolean @abc.abstractmethod def get_catalog_form_for_update(self, catalog_id): """Gets the catalog form for updating an existing catalog. A new catalog form should be requested for each update transaction. :param catalog_id: the ``Id`` of the ``Catalog`` :type catalog_id: ``osid.id.Id`` :return: the catalog form :rtype: ``osid.cataloging.CatalogForm`` :raise: ``NotFound`` -- ``catalog_id`` is not found :raise: ``NullArgument`` -- ``catalog_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ return # osid.cataloging.CatalogForm @abc.abstractmethod def update_catalog(self, catalog_form): """Updates an existing catalog. :param catalog_form: the form containing the elements to be updated :type catalog_form: ``osid.cataloging.CatalogForm`` :raise: ``IllegalState`` -- ``catalog_form`` already used in an update transaction :raise: ``InvalidArgument`` -- the form contains an invalid value :raise: ``NullArgument`` -- ``catalog_form`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure :raise: ``Unsupported`` -- ``catalog_form`` did not originate from ``get_catalog_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def can_delete_catalogs(self): """Tests if this user can delete ``Catalogs``. A return of true does not guarantee successful authorization. A return of false indicates that it is known deleting a ``Catalog`` will result in a ``PermissionDenied``. This is intended as a hint to an application that may not wish to offer delete operations to unauthorized users. :return: ``false`` if ``Catalog`` deletion is not authorized, ``true`` otherwise :rtype: ``boolean`` *compliance: mandatory -- This method must be implemented.* """ return # boolean @abc.abstractmethod def delete_catalog(self, catalog_id): """Deletes a ``Catalog``. :param catalog_id: the ``Id`` of the ``Catalog`` to remove :type catalog_id: ``osid.id.Id`` :raise: ``NotFound`` -- ``catalog_id`` not found :raise: ``NullArgument`` -- ``catalog_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def can_manage_catalog_aliases(self): """Tests if this user can manage ``Id`` aliases for ``Catalogs``. A return of true does not guarantee successful authorization. A return of false indicates that it is known changing an alias will result in a ``PermissionDenied``. This is intended as a hint to an application that may opt not to offer alias operations to an unauthorized user. :return: ``false`` if ``Catalog`` aliasing is not authorized, ``true`` otherwise :rtype: ``boolean`` *compliance: mandatory -- This method must be implemented.* """ return # boolean @abc.abstractmethod def alias_catalog(self, catalog_id, alias_id): """Adds an ``Id`` to a ``Catalog`` for the purpose of creating compatibility. The primary ``Id`` of the ``Catalog`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another catalog, it is reassigned to the given catalog ``Id``. :param catalog_id: the ``Id`` of a ``Catalog`` :type catalog_id: ``osid.id.Id`` :param alias_id: the alias ``Id`` :type alias_id: ``osid.id.Id`` :raise: ``AlreadyExists`` -- ``alias_id`` is already assigned :raise: ``NotFound`` -- ``catalog_id`` not found :raise: ``NullArgument`` -- ``catalog_id`` or ``alias_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass class CatalogNotificationSession: """This session defines methods to receive notifications on adds/changes to ``Catalog`` objects. This session is intended for consumers needing to synchronize their state with this service without the use of polling. Notifications are cancelled when this session is closed. Notifications are triggered with changes to the ``Catalog`` object itself. Adding and removing ``Ids`` result in notifications available from the notification session for catalog entries. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def can_register_for_catalog_notifications(self): """Tests if this user can register for ``Catalog`` notifications. A return of true does not guarantee successful authorization. A return of false indicates that it is known all methods in this session will result in a ``PermissionDenied``. This is intended as a hint to an application that may opt not to offer notification operations. :return: ``false`` if notification methods are not authorized, ``true`` otherwise :rtype: ``boolean`` *compliance: mandatory -- This method must be implemented.* """ return # boolean @abc.abstractmethod def reliable_catalog_notifications(self): """Reliable notifications are desired. In reliable mode, notifications are to be acknowledged using ``acknowledge_catalog_notification()`` . *compliance: mandatory -- This method is must be implemented.* """ pass @abc.abstractmethod def unreliable_catalog_notifications(self): """Unreliable notifications are desired. In unreliable mode, notifications do not need to be acknowledged. *compliance: mandatory -- This method is must be implemented.* """ pass @abc.abstractmethod def acknowledge_catalog_notification(self, notification_id): """Acknowledge a catalog notification. :param notification_id: the ``Id`` of the notification :type notification_id: ``osid.id.Id`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def register_for_new_catalogs(self): """Register for notifications of new catalogs. ``CatalogReceiver.newCatalogs()`` is invoked when a new ``Catalog`` is created. :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def register_for_changed_catalogs(self): """Registers for notification of updated catalogs. ``CatalogReceiver.changedCatalogs()`` is invoked when a catalog is changed. :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def register_for_changed_catalog(self, catalog_id): """Registers for notification of an updated catalog. ``CatalogReceiver.changedCatalogs()`` is invoked when the specified catalog is changed. :param catalog_id: the ``Id`` of the ``Catalog`` to monitor :type catalog_id: ``osid.id.Id`` :raise: ``NullArgument`` -- ``catalog_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def register_for_deleted_catalogs(self): """Registers for notification of deleted catalogs. ``CatalogReceiver.deletedCatalogs()`` is invoked when a catalog is deleted. :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def register_for_deleted_catalog(self, catalog_id): """Registers for notification of a deleted catalog. ``CatalogReceiver.deletedCatalogs()`` is invoked when the specified catalog is deleted. :param catalog_id: the ``Id`` of the ``Catalog`` to monitor :type catalog_id: ``osid.id.Id`` :raise: ``NullArgument`` -- ``catalog_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def register_for_changed_catalog_hierarchy(self): """Registers for notification of an updated catalog hierarchy structure. ``CatalogReceiver.changedChildOfCatalogs()`` is invoked when a node experiences a change in its children. :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def register_for_changed_catalog_hierarchy_for_ancestors(self, catalog_id): """Catalog ``Receiver. changedChildOfCatalogs()`` is invoked when the specified node or any of its ancestors experiences a change in its children. :param catalog_id: the ``Id`` of the ``Catalog`` node to monitor :type catalog_id: ``osid.id.Id`` :raise: ``NullArgument`` -- ``catalog_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def register_for_changed_catalog_hierarchy_for_descendants(self, catalog_id): """Registers for notification of an updated catalog hierarchy structure. ``CatalogReceiver.changedChildOfCatalogs()`` is invoked when the specified node or any of its descendants experiences a change in its children. :param catalog_id: the ``Id`` of the ``catalog`` node to monitor :type catalog_id: ``osid.id.Id`` :raise: ``NullArgument`` -- ``catalog_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ pass @abc.abstractmethod def reliable_catalog_notifications(self): """Reliable notifications are desired. In reliable mode, notifications are to be acknowledged using ``acknowledge_item_notification()`` . *compliance: mandatory -- This method is must be implemented.* """ pass @abc.abstractmethod def unreliable_catalog_notifications(self): """Unreliable notifications are desired. In unreliable mode, notifications do not need to be acknowledged. *compliance: mandatory -- This method is must be implemented.* """ pass @abc.abstractmethod def acknowledge_catalog_notification(self, notification_id): """Acknowledge an catalog notification. :param notification_id: the ``Id``
# -*- coding: latin-1 -*- # ----------------------------------------------------------------------------- # Copyright 2014, 2017 <NAME> <<EMAIL>> # # Licensed under the EUPL, Version 1.1 or - as soon they # will be approved by the European Commission - subsequent # versions of the EUPL (the "Licence"); # You may not use this work except in compliance with the # Licence. # You may obtain a copy of the Licence at: # # https://joinup.ec.europa.eu/software/page/eupl # # Unless required by applicable law or agreed to in # writing, software distributed under the Licence is # distributed on an "AS IS" basis, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. # See the Licence for the specific language governing # permissions and limitations under the Licence. # ----------------------------------------------------------------------------- import nfc.tag from . import tt3 import os from binascii import hexlify from pyDes import triple_des, CBC from struct import pack, unpack import itertools import logging log = logging.getLogger(__name__) def activate(clf, target): # http://www.sony.net/Products/felica/business/tech-support/list.html ic_code = target.sensf_res[10] if ic_code in FelicaLite.IC_CODE_MAP.keys(): return FelicaLite(clf, target) if ic_code in FelicaLiteS.IC_CODE_MAP.keys(): return FelicaLiteS(clf, target) if ic_code in FelicaStandard.IC_CODE_MAP.keys(): return FelicaStandard(clf, target) if ic_code in FelicaMobile.IC_CODE_MAP.keys(): return FelicaMobile(clf, target) if ic_code in FelicaPlug.IC_CODE_MAP.keys(): return FelicaPlug(clf, target) return None class FelicaStandard(tt3.Type3Tag): """Standard FeliCa is a range of FeliCa OS based card products with a flexible file system that supports multiple applications and services on the same card. Services can individually be protected with a card key and all communication with protected services is encrypted. """ IC_CODE_MAP = { # IC IC-NAME NBR NBW 0x00: ("RC-S830", 8, 8), # RC-S831/833 0x01: ("RC-S915", 12, 8), # RC-S860/862/863/864/891 0x02: ("RC-S919", 1, 1), # RC-S890 0x08: ("RC-S952", 12, 8), 0x09: ("RC-S953", 12, 8), 0x0B: ("RC-S???", 1, 1), # new suica 0x0C: ("RC-S954", 12, 8), 0x0D: ("RC-S960", 12, 10), # RC-S880/889 0x20: ("RC-S962", 12, 10), # RC-S885/888/892/893 0x32: ("RC-SA00/1", 1, 1), # AES chip 0x35: ("RC-SA00/2", 1, 1), } def __init__(self, clf, target): super(FelicaStandard, self).__init__(clf, target) self._product = "FeliCa Standard ({0})".format( self.IC_CODE_MAP[self.pmm[1]][0]) def _is_present(self): # Perform a presence check. Modern FeliCa cards implement the # RequestResponse command, so we'll try that first. If it # fails we resort the generic way that works for all type 3 # tags (but resets the card operating mode to zero). try: return self.request_response() in (0, 1, 2, 3) except tt3.Type3TagCommandError: return super(FelicaStandard, self)._is_present() def dump(self): # Dump the content of a FeliCa card as good as possible. This # is unfortunately rather complex because we want to reflect # the area structure with indentation and summarize overlapped # services under a single item. def print_system(system_code): # Print system information system_code_map = { 0x0000: "SDK Sample", 0x0003: "Suica", 0x12FC: "NDEF", 0x811D: "Edy", 0x8620: "Blackboard", 0xFE00: "Common Area", } return ["System {0:04X} ({1})".format( system_code, system_code_map.get(system_code, 'unknown'))] def print_area(area_from, area_last, depth): # Prints area information with indentation. return ["{indent}Area {0:04X}--{1:04X}".format( area_from, area_last, indent=depth*' ')] def print_service(services, depth): # This function processes a list of overlapped services # and reads all block data if there is one service that # does not require a key. First we figure out the common # service type and which access modes are available. if services[0] >> 2 & 0b1111 == 0b0010: service_type = "Random" access_types = " & ".join([( "write with key", "write w/o key", "read with key", "read w/o key")[x & 3] for x in services]) if services[0] >> 2 & 0b1111 == 0b0011: service_type = "Cyclic" access_types = " & ".join([( "write with key", "write w/o key", "read with key", "read w/o key")[x & 3] for x in services]) if services[0] >> 2 & 0b1110 == 0b0100: service_type = "Purse" access_types = " & ".join([( "direct with key", "direct w/o key", "cashback with key", "cashback w/o key", "decrement with key", "decrement w/o key", "read with key", "read w/o key")[x & 7] for x in services]) # Now we print one line to verbosely describe the service # and list the service codes. service_codes = " ".join(["0x{0:04X}".format(x) for x in services]) lines = [ "{indent}{type} Service {number}: {access} ({0})".format( service_codes, indent=depth*' ', type=service_type, number=services[0] >> 6, access=access_types)] # The final piece is to see if any of the services allows # us to read block data without a key. Services w/o key # have the last bit set to 1, so we generate a list of # only those services and iterate over the slice from the # last item to end (that's one or zero services). for service in [sc for sc in services if sc & 1][-1:]: sc = tt3.ServiceCode(service >> 6, service & 0b111111) for line in self.dump_service(sc): lines.append(depth*' ' + ' ' + line) return lines # Unfortunately there are some older cards with reduced # command support. If request_system_code() is not supported # we can only see if the current system code is NDEF and try # to dup that, otherwise it is the end. try: card_system_codes = self.request_system_code() except nfc.tag.TagCommandError: if self.sys == 0x12FC: return super(FelicaStandard, self).dump() else: return ["unable to create a memory dump"] # A FeliCa card has one or more systems, each system has one # or more areas which may be nested, and an area may have zero # to many services. The outer loop iterates over all system # codes that are present on the card. The inner loop iterates # by index over all area and service definitions. lines = [] for system_code in card_system_codes: # A system must be activated first, this is what the # polling() command does. idm, pmm = self.polling(system_code) self.idm = idm self.pmm = pmm self.sys = system_code lines.extend(print_system(system_code)) area_stack = [] overlap_services = [] # Walk through the list of services by index. The first # index for which there is no service returns None and # terminate the loop. for service_index in itertools.count(): # pragma: no branch assert service_index < 0x10000 depth = len(area_stack) area_or_service = self.search_service_code(service_index) if area_or_service is None: # Went beyond the service index. Print overlap # services if any and exit loop. if len(overlap_services) > 0: lines.extend(print_service(overlap_services, depth)) overlap_services = [] break elif len(area_or_service) == 1: # Found a service definition. Add as overlap # service if it is either the first or same type # (Random, Cyclic, Purse) as the previous one. If # it is different then print the current overlap # services and remember this for the next round. service = area_or_service[0] end_overlap_services = False if len(overlap_services) == 0: overlap_services.append(service) elif service >> 4 == overlap_services[-1] >> 4: if service >> 4 & 1: # purse overlap_services.append(service) elif service >> 2 == overlap_services[-1] >> 2: overlap_services.append(service) else: end_overlap_services = True else: end_overlap_services = True if end_overlap_services: lines.extend(print_service(overlap_services, depth)) overlap_services = [service] elif len(area_or_service) == 2: # Found an area definition. Print any services # that we might so far have assembled, then # process the area information. if len(overlap_services) > 0: lines.extend(print_service(overlap_services, depth)) overlap_services = [] area_from, area_last = area_or_service if len(area_stack) > 0 and area_from > area_stack[-1][1]: area_stack.pop() lines.extend(print_area(area_from, area_last, depth)) area_stack.append((area_from, area_last)) return lines def request_service(self, service_list): """Verify existence of a service (or area) and get the key version. Each service (or area) to verify must be given as a :class:`~nfc.tag.tt3.ServiceCode` in the iterable *service_list*. The key versions are returned as a list of 16-bit integers, in the order requested. If a specified service (or area) does not exist, the key version will be 0xFFFF. Command execution errors raise :exc:`~nfc.tag.TagCommandError`. """ a, b, e = self.pmm[2] & 7, self.pmm[2] >> 3 & 7, self.pmm[2] >> 6 timeout = 302E-6 * ((b + 1) * len(service_list) + a + 1) * 4**e pack = lambda x: x.pack() # noqa: E731 data = chr(len(service_list)) + ''.join(map(pack, service_list)) data = self.send_cmd_recv_rsp(0x02, data, timeout, check_status=False) if len(data) != 1 + len(service_list) * 2: log.debug("insufficient data received from tag") raise tt3.Type3TagCommandError(tt3.DATA_SIZE_ERROR) return [unpack("<H", data[i:i+2])[0] for i in range(1, len(data), 2)] def request_response(self): """Verify that a card is still present and
<filename>xoa_driver/internals/core/commands/pr_commands.py #: L23 Port RX Statistics Commands from dataclasses import dataclass import typing from ..protocol.command_builders import ( build_get_request, build_set_request ) from .. import interfaces from ..transporter.token import Token from ..protocol.fields.data_types import * from ..protocol.fields.field import XmpField from ..registry import register_command from .enums import * @register_command @dataclass class PR_TPLDJITTER: """ Obtains statistics concerning the jitter experienced by the packets with a particular test payload id received on a port. The values are the difference in packet-to-packet latency, and the minimum will usually be zero.A special value of -1 is returned if jitter numbers are not applicable. They are only available for TID values 0..31. """ code: typing.ClassVar[int] = 239 pushed: typing.ClassVar[bool] = False _connection: "interfaces.IConnection" _module: int _port: int _test_payload_xindex: int @dataclass(frozen=True) class GetDataAttr: min_val: XmpField[XmpLong] = XmpField(XmpLong) # long integer, nanoseconds, minimum jitter for test payload stream avg_val: XmpField[XmpLong] = XmpField(XmpLong) # long integer, nanoseconds, average jitter for test payload stream max_val: XmpField[XmpLong] = XmpField(XmpLong) # long integer, nanoseconds, maximum jitter for test payload stream avg_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, nanoseconds, average jitter over last 1-second period min_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, nanoseconds, minimum jitter during last 1-second period max_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, nanoseconds, maximum jitter during last 1-second period def get(self) -> "Token[GetDataAttr]": """Get statistics concerning the jitter experienced by the packets with a particular test payload id received on a port. :return: minimum|average|maximum jitter (nanoseconds), average|average|maximum jitter over last 1-second period (nanoseconds) :rtype: PR_TPLDJITTER.GetDataAttr """ return Token(self._connection, build_get_request(self, module=self._module, port=self._port, indices=[self._test_payload_xindex])) @register_command @dataclass class PR_TOTAL: """ Obtains statistics concerning all the packets received on a port. """ code: typing.ClassVar[int] = 240 pushed: typing.ClassVar[bool] = False _connection: "interfaces.IConnection" _module: int _port: int @dataclass(frozen=True) class GetDataAttr: bit_count_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of bits received in the last second. packet_count_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of packets received in the last second. byte_count_since_cleared: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of bytes received since statistics were cleared. packet_count_since_cleared: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of packets received since statistics were cleared. def get(self) -> "Token[GetDataAttr]": """Get statistics concerning all the packets received on a port. :return: number of bits received in the last second, number of packets received in the last second, number of bytes received since statistics were cleared, and number of packets received since statistics were cleared. :rtype: PR_TOTAL.GetDataAttr """ return Token(self._connection, build_get_request(self, module=self._module, port=self._port)) @register_command @dataclass class PR_NOTPLD: """ Obtains statistics concerning the packets without a test payload received on a port. """ code: typing.ClassVar[int] = 241 pushed: typing.ClassVar[bool] = False _connection: "interfaces.IConnection" _module: int _port: int @dataclass(frozen=True) class GetDataAttr: bit_count_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of bits received in the last second. packet_count_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of packets received in the last second. byte_count_since_cleared: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of bytes received since statistics were cleared. packet_count_since_cleared: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of packets received since statistics were cleared. def get(self) -> "Token[GetDataAttr]": """Get statistics concerning the packets without a test payload received on a port. :return: number of bits received in the last second, number of packets received in the last second, number of bytes received since statistics were cleared, and number of packets received since statistics were cleared. :rtype: PR_NOTPLD.GetDataAttr """ return Token(self._connection, build_get_request(self, module=self._module, port=self._port)) @register_command @dataclass class PR_EXTRA: """ Obtains statistics concerning special errors received on a port since received statistics were cleared. """ code: typing.ClassVar[int] = 242 pushed: typing.ClassVar[bool] = False _connection: "interfaces.IConnection" _module: int _port: int @dataclass(frozen=True) class GetDataAttr: fcs_error_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of packets with frame checksum errors. pause_frame_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of Ethernet pause frames. rx_arp_request_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of ARP request packets received. rx_arp_reply_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of ARP reply packets received. rx_ping_request_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of PING request packets received. rx_ping_reply_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of PING reply packets received. gap_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of gap-monitored gaps encountered. gap_duration: XmpField[XmpLong] = XmpField(XmpLong) # long integer, combined duration of gap-monitored gaps encountered, microseconds. def get(self) -> "Token[GetDataAttr]": """Get statistics concerning special packets received on a port since statistics were cleared. :return: number of packets with fcs error frames, pause frames, arp rxreq, arp rxrsp, ping rxreq, ping rxrsp, gap events, and gap microseconds. :rtype: PR_EXTRA.GetDataAttr """ return Token(self._connection, build_get_request(self, module=self._module, port=self._port)) @register_command @dataclass class PR_TPLDS: """ Obtain the set of test payload IDs observed among the received packets since receive statistics were cleared. Traffic statistics for these test payload streams will have non-zero byte and packet count. """ code: typing.ClassVar[int] = 243 pushed: typing.ClassVar[bool] = False _connection: "interfaces.IConnection" _module: int _port: int @dataclass(frozen=True) class GetDataAttr: test_payload_identifiers: XmpField[XmpIntList] = XmpField(XmpIntList) # list of integers, the identifiers of the test payload. def get(self) -> "Token[GetDataAttr]": """Get the set of test payload IDs observed among the received packets since receive statistics were cleared. Traffic statistics for these test payload streams will have non-zero byte and packet count. :return: the identifiers of the test payload :rtype: PR_TPLDS.GetDataAttr """ return Token(self._connection, build_get_request(self, module=self._module, port=self._port)) @register_command @dataclass class PR_TPLDTRAFFIC: """ Obtains traffic statistics concerning the packets with a particular test payload identifier received on a port. """ code: typing.ClassVar[int] = 244 pushed: typing.ClassVar[bool] = False _connection: "interfaces.IConnection" _module: int _port: int _test_payload_xindex: int @dataclass(frozen=True) class GetDataAttr: bit_count_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of bits received in the last second. packet_count_last_sec: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of packets received in the last second. byte_count_since_cleared: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of bytes received since statistics were cleared. packet_count_since_cleared: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of packets received since statistics were cleared. def get(self) -> "Token[GetDataAttr]": """Get traffic statistics concerning the packets with a particular test payload identifier received on a port. :return: number of bits received in the last second, number of packets received in the last second, number of bytes received since statistics were cleared, number of packets received since statistics were cleared :rtype: PR_TPLDTRAFFIC.GetDataAttr """ return Token(self._connection, build_get_request(self, module=self._module, port=self._port, indices=[self._test_payload_xindex])) @register_command @dataclass class PR_TPLDERRORS: """ Obtains statistics concerning errors in the packets with a particular test payload id received on a port. The error information is derived from analysing the various fields contained in the embedded test payloads of the received packets, independent of which chassis and port may have originated the packets. Note that packet-lost statistics involve both a transmitting port and a receiving port, and in particular knowing which port originated the packets with a particular test payload identifier. This information requires knowledge of the global test environment, and is not supported at the port-level. """ code: typing.ClassVar[int] = 245 pushed: typing.ClassVar[bool] = False _connection: "interfaces.IConnection" _module: int _port: int _test_payload_xindex: int @dataclass(frozen=True) class GetDataAttr: dummy: XmpField[XmpLong] = XmpField(XmpLong) # long integer, not in use. non_incre_seq_event_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of non-incrementing-sequence-number events. swapped_seq_misorder_event_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of swapped-sequence-number misorder events. non_incre_payload_packet_count: XmpField[XmpLong] = XmpField(XmpLong) # long integer, number of packets with non-incrementing payload content. def get(self) -> "Token[GetDataAttr]": """Get statistics concerning errors in the packets with a particular test payload id received on a port. :return: number of non-incrementing-sequence-number events, number of swapped-sequence-number misorder events, number of packets with non-incrementing payload content :rtype: PR_TPLDERRORS.GetDataAttr """ return Token(self._connection, build_get_request(self, module=self._module, port=self._port, indices=[self._test_payload_xindex])) @register_command @dataclass class PR_TPLDLATENCY: """ Obtains statistics concerning the latency experienced by the packets with a particular test payload id received on a port. The values are adjusted by the port-level P_LATENCYOFFSET value. A special value of -1 is returned if latency numbers are not applicable. Latency is only meaningful when the clocks of the transmitter and receiver are synchronized. This requires the two ports to be on the same test module, and it
height=59, bordermode='ignore') self.mult_strike_scale.configure(activebackground="#86bad8") self.mult_strike_scale.configure(background="#86bad8") self.mult_strike_scale.configure(font="TkTextFont") self.mult_strike_scale.configure(foreground="#000000") self.mult_strike_scale.configure(highlightbackground="#86bad8") self.mult_strike_scale.configure(highlightcolor="black") self.mult_strike_scale.configure(label="Strike Marker Size") self.mult_strike_scale.configure(orient="horizontal") self.mult_strike_scale.configure(takefocus="0") self.mult_strike_scale.configure(troughcolor="#f2f2f2") ############################################################################### # Added slider for multinomial point size scale on point plot ############################################################################## self.mult_point_scale = tk.Scale(self.TNotebook1_t2, from_=1, to=100, command=self.mult_point_size) self.mult_point_scale.place(relx=0.65, rely=0.08, relwidth=0.104, relheight=0.0 , height=59, bordermode='ignore') self.mult_point_scale.configure(activebackground="#86bad8") self.mult_point_scale.configure(background="#86bad8") self.mult_point_scale.configure(font="TkTextFont") self.mult_point_scale.configure(foreground="#000000") self.mult_point_scale.configure(highlightbackground="#86bad8") self.mult_point_scale.configure(highlightcolor="black") self.mult_point_scale.configure(label="point Marker Size") self.mult_point_scale.configure(orient="horizontal") self.mult_point_scale.configure(takefocus="0") self.mult_point_scale.configure(troughcolor="#f2f2f2") ############################################################################### # Added slider for betastrike size ############################################################################## self.beta_strike_scale = tk.Scale(self.TNotebook1_t1, from_=1, to=100, command=self.beta_strike_size) self.beta_strike_scale.place(relx=0.167, rely=0.08, relwidth=0.104, relheight=0.0 , height=59, bordermode='ignore') self.beta_strike_scale.configure(activebackground="#86bad8") self.beta_strike_scale.configure(background="#86bad8") self.beta_strike_scale.configure(font="TkTextFont") self.beta_strike_scale.configure(foreground="#000000") self.beta_strike_scale.configure(highlightbackground="#86bad8") self.beta_strike_scale.configure(highlightcolor="black") self.beta_strike_scale.configure(label="Strike Marker Size") self.beta_strike_scale.configure(orient="horizontal") self.beta_strike_scale.configure(takefocus="0") self.beta_strike_scale.configure(troughcolor="#f2f2f2") ############################################################################### # 7/10/20 Added slider for betastrike size ############################################################################## self.beta_strike_scale = tk.Scale(self.TNotebook1_t2, from_=1, to=100, command=self.beta_point_size) self.beta_strike_scale.place(relx=0.167, rely=0.08, relwidth=0.104, relheight=0.0 , height=59, bordermode='ignore') self.beta_strike_scale.configure(activebackground="#86bad8") self.beta_strike_scale.configure(background="#86bad8") self.beta_strike_scale.configure(font="TkTextFont") self.beta_strike_scale.configure(foreground="#000000") self.beta_strike_scale.configure(highlightbackground="#86bad8") self.beta_strike_scale.configure(highlightcolor="black") self.beta_strike_scale.configure(label="Strike Marker Size") self.beta_strike_scale.configure(orient="horizontal") self.beta_strike_scale.configure(takefocus="0") self.beta_strike_scale.configure(troughcolor="#f2f2f2") ############################################################################## ############################################################################## self.Label2_3 = tk.Label(self.TNotebook1_t1) self.Label2_3.place(relx=0.289, rely=0.11, height=21, width=64) self.Label2_3.configure(activebackground="#f9f9f9") self.Label2_3.configure(activeforeground="black") self.Label2_3.configure(background="#86bad8") self.Label2_3.configure(disabledforeground="#a3a3a3") self.Label2_3.configure(foreground="#000000") self.Label2_3.configure(highlightbackground="#d9d9d9") self.Label2_3.configure(highlightcolor="black") self.Label2_3.configure(text='''Chart Title''') self.beta_chart_title = tk.Entry(self.TNotebook1_t1) self.beta_chart_title.place(relx=0.289, rely=0.14, height=20 , relwidth=0.112) self.beta_chart_title.configure(background="white") self.beta_chart_title.configure(disabledforeground="#a3a3a3") self.beta_chart_title.configure(font=font10) self.beta_chart_title.configure(foreground="#000000") self.beta_chart_title.configure(highlightbackground="#d9d9d9") self.beta_chart_title.configure(highlightcolor="black") self.beta_chart_title.configure(insertbackground="black") self.beta_chart_title.configure(selectbackground="#c4c4c4") self.beta_chart_title.configure(selectforeground="black") self.beta_chart_title.configure(takefocus="0") self.label2_4 = tk.Label(self.TNotebook1_t1) self.label2_4.place(relx=0.167, rely=0.19, height=21, width=44) self.label2_4.configure(activebackground="#f9f9f9") self.label2_4.configure(activeforeground="black") self.label2_4.configure(background="#86bad8") self.label2_4.configure(borderwidth="0") self.label2_4.configure(disabledforeground="#a3a3a3") self.label2_4.configure(foreground="#000000") self.label2_4.configure(highlightbackground="#d9d9d9") self.label2_4.configure(highlightcolor="black") self.label2_4.configure(text='''X Label''') self.beta_chart_x_name = tk.Entry(self.TNotebook1_t1) self.beta_chart_x_name.place(relx=0.162, rely=0.23, height=20 , relwidth=0.112) self.beta_chart_x_name.configure(background="white") self.beta_chart_x_name.configure(disabledforeground="#a3a3a3") self.beta_chart_x_name.configure(font=font10) self.beta_chart_x_name.configure(foreground="#000000") self.beta_chart_x_name.configure(highlightbackground="#d9d9d9") self.beta_chart_x_name.configure(highlightcolor="black") self.beta_chart_x_name.configure(insertbackground="black") self.beta_chart_x_name.configure(selectbackground="#c4c4c4") self.beta_chart_x_name.configure(selectforeground="black") self.beta_chart_x_name.configure(takefocus="0") # self.beta_chart_x_name = tk.Entry(self.TNotebook1_t1) # self.beta_chart_x_name.place(relx=0.289, rely=0.192, height=20 # , relwidth=0.112) # self.beta_chart_x_name.configure(background="white") # self.beta_chart_x_name.configure(disabledforeground="#a3a3a3") # self.beta_chart_x_name.configure(font=font10) # self.beta_chart_x_name.configure(foreground="#000000") # self.beta_chart_x_name.configure(highlightbackground="#d9d9d9") # self.beta_chart_x_name.configure(highlightcolor="black") # self.beta_chart_x_name.configure(insertbackground="black") # self.beta_chart_x_name.configure(selectbackground="#c4c4c4") # self.beta_chart_x_name.configure(selectforeground="black") # self.beta_chart_x_name.configure(takefocus="0") self.beta_chart_y_label = tk.Entry(self.TNotebook1_t1) self.beta_chart_y_label.place(relx=0.289, rely=0.23, height=20 , relwidth=0.112) self.beta_chart_y_label.configure(background="white") self.beta_chart_y_label.configure(disabledforeground="#a3a3a3") self.beta_chart_y_label.configure(font=font10) self.beta_chart_y_label.configure(foreground="#000000") self.beta_chart_y_label.configure(highlightbackground="#d9d9d9") self.beta_chart_y_label.configure(highlightcolor="black") self.beta_chart_y_label.configure(insertbackground="black") self.beta_chart_y_label.configure(selectbackground="#c4c4c4") self.beta_chart_y_label.configure(selectforeground="black") self.beta_chart_y_label.configure(takefocus="0") self.label2_5 = tk.Label(self.TNotebook1_t1) self.label2_5.place(relx=0.294, rely=0.19, height=21, width=44) self.label2_5.configure(activebackground="#f9f9f9") self.label2_5.configure(activeforeground="black") self.label2_5.configure(background="#86bad8") self.label2_5.configure(disabledforeground="#a3a3a3") self.label2_5.configure(foreground="#000000") self.label2_5.configure(highlightbackground="#d9d9d9") self.label2_5.configure(highlightcolor="black") self.label2_5.configure(text='''Y Label''') self.beta_chart_axis_label_val=IntVar(self.TNotebook1_t1) self.beta_chart_axis_label_val.set(1) self.beta_chart_axis_labels = tk.Checkbutton(self.TNotebook1_t1) self.beta_chart_axis_labels.place(relx=0.02, rely=0.233, relheight=0.042 , relwidth=0.083) self.beta_chart_axis_labels.configure(activebackground="#ececec") self.beta_chart_axis_labels.configure(activeforeground="#000000") self.beta_chart_axis_labels.configure(background="#86bad8") self.beta_chart_axis_labels.configure(disabledforeground="#a3a3a3") self.beta_chart_axis_labels.configure(foreground="#000000") self.beta_chart_axis_labels.configure(highlightbackground="#d9d9d9") self.beta_chart_axis_labels.configure(highlightcolor="black") self.beta_chart_axis_labels.configure(justify='left') self.beta_chart_axis_labels.configure(takefocus="0") self.beta_chart_axis_labels.configure(text='''Axis Labels''') self.beta_chart_axis_labels.configure(variable=self.beta_chart_axis_label_val) self.label2_9 = tk.Label(self.TNotebook1_t1) self.label2_9.place(relx=0.775, rely=0.1, height=22, width=64) self.label2_9.configure(activebackground="#f9f9f9") self.label2_9.configure(activeforeground="black") self.label2_9.configure(background="#86bad8") self.label2_9.configure(disabledforeground="#a3a3a3") self.label2_9.configure(foreground="#000000") self.label2_9.configure(highlightbackground="#d9d9d9") self.label2_9.configure(highlightcolor="black") self.label2_9.configure(text='''Chart Title''') self.mult_cont_title = tk.Entry(self.TNotebook1_t1) self.mult_cont_title.place(relx=0.779, rely=0.14, height=20 , relwidth=0.112) self.mult_cont_title.configure(background="white") self.mult_cont_title.configure(disabledforeground="#a3a3a3") self.mult_cont_title.configure(font=font10) self.mult_cont_title.configure(foreground="#000000") self.mult_cont_title.configure(highlightbackground="#d9d9d9") self.mult_cont_title.configure(highlightcolor="black") self.mult_cont_title.configure(insertbackground="black") self.mult_cont_title.configure(selectbackground="#c4c4c4") self.mult_cont_title.configure(selectforeground="black") self.mult_cont_title.configure(takefocus="0") self.mult_cont_y_label = tk.Entry(self.TNotebook1_t1) self.mult_cont_y_label.place(relx=0.779, rely=0.225, height=20 , relwidth=0.112) self.mult_cont_y_label.configure(background="white") self.mult_cont_y_label.configure(disabledforeground="#a3a3a3") self.mult_cont_y_label.configure(font=font10) self.mult_cont_y_label.configure(foreground="#000000") self.mult_cont_y_label.configure(highlightbackground="#d9d9d9") self.mult_cont_y_label.configure(highlightcolor="black") self.mult_cont_y_label.configure(insertbackground="black") self.mult_cont_y_label.configure(selectbackground="#c4c4c4") self.mult_cont_y_label.configure(selectforeground="black") self.mult_cont_y_label.configure(takefocus="0") self.cont_max = tk.Entry(self.TNotebook1_t1) self.cont_max.place(relx=0.95, rely=0.19, height=20 , relwidth=0.04) self.cont_max.configure(background="white") self.cont_max.configure(disabledforeground="#a3a3a3") self.cont_max.configure(font=font10) self.cont_max.configure(foreground="#000000") self.cont_max.configure(highlightbackground="#d9d9d9") self.cont_max.configure(highlightcolor="black") self.cont_max.configure(insertbackground="black") self.cont_max.configure(selectbackground="#c4c4c4") self.cont_max.configure(selectforeground="black") self.cont_max.configure(takefocus="0") self.cont_num = tk.Entry(self.TNotebook1_t1) self.cont_num.place(relx=0.95, rely=0.23, height=20 , relwidth=0.04) self.cont_num.configure(background="white") self.cont_num.configure(disabledforeground="#a3a3a3") self.cont_num.configure(font=font10) self.cont_num.configure(foreground="#000000") self.cont_num.configure(highlightbackground="#d9d9d9") self.cont_num.configure(highlightcolor="black") self.cont_num.configure(insertbackground="black") self.cont_num.configure(selectbackground="#c4c4c4") self.cont_num.configure(selectforeground="black") self.cont_num.configure(takefocus="0") self.cont_max_label = tk.Label(self.TNotebook1_t1,anchor='w') self.cont_max_label.place(relx=0.91, rely=0.1875, height=21, width=30) self.cont_max_label.configure(activebackground="#f9f9f9") self.cont_max_label.configure(activeforeground="black") self.cont_max_label.configure(background="#86bad8") self.cont_max_label.configure(disabledforeground="#a3a3a3") self.cont_max_label.configure(foreground="#000000") self.cont_max_label.configure(highlightbackground="#d9d9d9") self.cont_max_label.configure(highlightcolor="black") self.cont_max_label.configure(text='''Max''') self.cont_num_label = tk.Label(self.TNotebook1_t1,anchor='w') self.cont_num_label.place(relx=0.91, rely=0.2275, height=21, width=30) self.cont_num_label.configure(activebackground="#f9f9f9") self.cont_num_label.configure(activeforeground="black") self.cont_num_label.configure(background="#86bad8") self.cont_num_label.configure(disabledforeground="#a3a3a3") self.cont_num_label.configure(foreground="#000000") self.cont_num_label.configure(highlightbackground="#d9d9d9") self.cont_num_label.configure(highlightcolor="black") self.cont_num_label.configure(text='''Num''') self.mult_cont_x_name = tk.Entry(self.TNotebook1_t1) self.mult_cont_x_name.place(relx=0.637, rely=0.225, height=20 , relwidth=0.112) self.mult_cont_x_name.configure(background="white") self.mult_cont_x_name.configure(disabledforeground="#a3a3a3") self.mult_cont_x_name.configure(font=font10) self.mult_cont_x_name.configure(foreground="#000000") self.mult_cont_x_name.configure(highlightbackground="#d9d9d9") self.mult_cont_x_name.configure(highlightcolor="black") self.mult_cont_x_name.configure(insertbackground="black") self.mult_cont_x_name.configure(selectbackground="#c4c4c4") self.mult_cont_x_name.configure(selectforeground="black") self.mult_cont_x_name.configure(takefocus="0") self.label2_10 = tk.Label(self.TNotebook1_t1) self.label2_10.place(relx=0.784, rely=0.19, height=21, width=44) self.label2_10.configure(activebackground="#f9f9f9") self.label2_10.configure(activeforeground="black") self.label2_10.configure(background="#86bad8") self.label2_10.configure(disabledforeground="#a3a3a3") self.label2_10.configure(foreground="#000000") self.label2_10.configure(highlightbackground="#d9d9d9") self.label2_10.configure(highlightcolor="black") self.label2_10.configure(text='''Y Label''') self.label2_7 = tk.Label(self.TNotebook1_t1) self.label2_7.place(relx=0.647, rely=0.19, height=21, width=44) self.label2_7.configure(activebackground="#f9f9f9") self.label2_7.configure(activeforeground="black") self.label2_7.configure(background="#86bad8") self.label2_7.configure(disabledforeground="#a3a3a3") self.label2_7.configure(foreground="#000000") self.label2_7.configure(highlightbackground="#d9d9d9") self.label2_7.configure(highlightcolor="black") self.label2_7.configure(text='''X Label''') # self.mult_plot_rng_scale = tk.Checkbutton(self.TNotebook1_t1) # self.mult_plot_rng_scale.place(relx=0.25, rely=0.242, relheight=0.042 # , relwidth=0.089) # self.mult_plot_rng_scale.configure(activebackground="#ececec") # self.mult_plot_rng_scale.configure(activeforeground="#000000") # self.mult_plot_rng_scale.configure(background="#86bad8") # self.mult_plot_rng_scale.configure(disabledforeground="#a3a3a3") # self.mult_plot_rng_scale.configure(foreground="#000000") # self.mult_plot_rng_scale.configure(highlightbackground="#d9d9d9") # self.mult_plot_rng_scale.configure(highlightcolor="black") # self.mult_plot_rng_scale.configure(justify='left') # self.mult_plot_rng_scale.configure(text='''Range Scale''') # self.mult_plot_rng_scale.configure(variable=GUI_support.che49) self.mult_cont_rng_scale_val=IntVar(self.TNotebook1_t1) self.mult_cont_rng_scale_val.set(1) self.mult_cont_rng_scale = tk.Radiobutton(self.TNotebook1_t1,value=0,variable=self.mult_cont_rng_scale_val,anchor='w') self.mult_cont_rng_scale.place(relx=0.9, rely=0.08, relheight=0.042 , relwidth=0.089) self.mult_cont_rng_scale.configure(activebackground="#86bad8") self.mult_cont_rng_scale.configure(activeforeground="#000000") self.mult_cont_rng_scale.configure(background="#86bad8") self.mult_cont_rng_scale.configure(disabledforeground="#a3a3a3") self.mult_cont_rng_scale.configure(foreground="#000000") self.mult_cont_rng_scale.configure(highlightbackground="#d9d9d9") self.mult_cont_rng_scale.configure(highlightcolor="black") self.mult_cont_rng_scale.configure(justify='left') self.mult_cont_rng_scale.configure(text='''Max''') self.mult_cont_rng_scale1 = tk.Radiobutton(self.TNotebook1_t1,value=1,variable=self.mult_cont_rng_scale_val,anchor='w') self.mult_cont_rng_scale1.place(relx=0.9, rely=0.11, relheight=0.042 , relwidth=0.089) self.mult_cont_rng_scale1.configure(activebackground="#86bad8") self.mult_cont_rng_scale1.configure(activeforeground="#000000") self.mult_cont_rng_scale1.configure(background="#86bad8") self.mult_cont_rng_scale1.configure(disabledforeground="#a3a3a3") self.mult_cont_rng_scale1.configure(foreground="#000000") self.mult_cont_rng_scale1.configure(highlightbackground="#d9d9d9") self.mult_cont_rng_scale1.configure(highlightcolor="black") self.mult_cont_rng_scale1.configure(justify='left') self.mult_cont_rng_scale1.configure(text='''Range''') self.mult_cont_rng_scale2 = tk.Radiobutton(self.TNotebook1_t1,value=2,variable=self.mult_cont_rng_scale_val,anchor='w') self.mult_cont_rng_scale2.place(relx=0.9, rely=0.14, relheight=0.042 , relwidth=0.089) self.mult_cont_rng_scale2.configure(activebackground="#86bad8") self.mult_cont_rng_scale2.configure(activeforeground="#000000") self.mult_cont_rng_scale2.configure(background="#86bad8") self.mult_cont_rng_scale2.configure(disabledforeground="#a3a3a3") self.mult_cont_rng_scale2.configure(foreground="#000000") self.mult_cont_rng_scale2.configure(highlightbackground="#d9d9d9") self.mult_cont_rng_scale2.configure(highlightcolor="black") self.mult_cont_rng_scale2.configure(justify='left') self.mult_cont_rng_scale2.configure(text='''Custom''') self.mult_cont_axis_label_val=IntVar(self.TNotebook1_t1) self.mult_cont_axis_label_val.set(1) self.mult_cont_axis_labels = tk.Checkbutton(self.TNotebook1_t1, anchor='w') self.mult_cont_axis_labels.place(relx=0.52, rely=0.225, height=24, width=100) self.mult_cont_axis_labels.configure(activebackground="#ececec") self.mult_cont_axis_labels.configure(activeforeground="#000000") self.mult_cont_axis_labels.configure(background="#86bad8") self.mult_cont_axis_labels.configure(disabledforeground="#a3a3a3") self.mult_cont_axis_labels.configure(foreground="#000000") self.mult_cont_axis_labels.configure(highlightbackground="#d9d9d9") self.mult_cont_axis_labels.configure(highlightcolor="black") self.mult_cont_axis_labels.configure(justify='left') self.mult_cont_axis_labels.configure(takefocus="0") self.mult_cont_axis_labels.configure(text='''Axis Labels''') self.mult_cont_axis_labels.configure(variable=self.mult_cont_axis_label_val) self.Beta_Points = tk.Canvas(self.TNotebook1_t2) self.Beta_Points.place(relx=0.02, rely=0.283, relheight=0.65 , relwidth=0.454) self.Beta_Points.configure(background="#d9d9d9") self.Beta_Points.configure(borderwidth="2") self.Beta_Points.configure(highlightbackground="#d9d9d9") self.Beta_Points.configure(highlightcolor="black") self.Beta_Points.configure(insertbackground="black") self.Beta_Points.configure(relief='ridge') self.Beta_Points.configure(selectbackground="#c4c4c4") self.Beta_Points.configure(selectforeground="black") self.Beta_Points.configure(takefocus="0") self.Beta_Points.configure(width=463) self.Mult_Points = tk.Canvas(self.TNotebook1_t2) self.Mult_Points.place(relx=0.52, rely=0.283, relheight=0.65, relwidth=0.454) self.Mult_Points.configure(background="#d9d9d9") self.Mult_Points.configure(borderwidth="2") self.Mult_Points.configure(highlightbackground="#d9d9d9") self.Mult_Points.configure(highlightcolor="black") self.Mult_Points.configure(insertbackground="black") self.Mult_Points.configure(relief='ridge') self.Mult_Points.configure(selectbackground="#c4c4c4") self.Mult_Points.configure(selectforeground="black") self.Mult_Points.configure(takefocus="0") self.Mult_Points.configure(width=463) self.TSeparator3 = ttk.Separator(self.TNotebook1_t2) self.TSeparator3.place(relx=0.495, rely=-0.017, relheight=1.0) self.TSeparator3.configure(orient="vertical") self.TSeparator3.configure(takefocus="0") self.Label3betaplot = tk.Label(self.TNotebook1_t2) self.Label3betaplot.place(relx=0.01, rely=0.017, height=31, width=156) self.Label3betaplot.configure(activebackground="#f9f9f9") self.Label3betaplot.configure(activeforeground="black") self.Label3betaplot.configure(background="#86bad8") self.Label3betaplot.configure(disabledforeground="#a3a3a3") self.Label3betaplot.configure(font=font13) self.Label3betaplot.configure(foreground="#000000") self.Label3betaplot.configure(highlightbackground="#d9d9d9") self.Label3betaplot.configure(highlightcolor="black") self.Label3betaplot.configure(text='''Beta Plot Settings''') self.Label3betacont = tk.Label(self.TNotebook1_t2) self.Label3betacont.place(relx=0.51, rely=0.017, height=31, width=206) self.Label3betacont.configure(activebackground="#f9f9f9") self.Label3betacont.configure(activeforeground="black") self.Label3betacont.configure(background="#86bad8") self.Label3betacont.configure(disabledforeground="#a3a3a3") self.Label3betacont.configure(font=font13) self.Label3betacont.configure(foreground="#000000") self.Label3betacont.configure(highlightbackground="#d9d9d9") self.Label3betacont.configure(highlightcolor="black") self.Label3betacont.configure(text='''Beta Contour Settings''') self.TSeparator1 = ttk.Separator(self.TNotebook1_t2) self.TSeparator1.place(relx=0.01, rely=0.067, relwidth=0.196) self.TSeparator1.configure(takefocus="0") self.TSeparator1 = ttk.Separator(self.TNotebook1_t2) self.TSeparator1.place(relx=0.52, rely=0.067, relwidth=0.196) self.TSeparator1.configure(takefocus="0") # self.Label32 = tk.Label(self.TNotebook1_t2) # self.Label32.place(relx=0.167, rely=0.083, height=21, width=64) # self.Label32.configure(activebackground="#f9f9f9") # self.Label32.configure(activeforeground="black") # self.Label32.configure(background="#86bad8") # self.Label32.configure(disabledforeground="#a3a3a3") # self.Label32.configure(foreground="#000000") # self.Label32.configure(highlightbackground="#d9d9d9") # self.Label32.configure(highlightcolor="black") # self.Label32.configure(text='''Marker Size''') self.Label31 = tk.Label(self.TNotebook1_t2) self.Label31.place(relx=0.02, rely=0.083, height=21, width=64) self.Label31.configure(activebackground="#f9f9f9") self.Label31.configure(activeforeground="black") self.Label31.configure(background="#86bad8") self.Label31.configure(disabledforeground="#a3a3a3") self.Label31.configure(foreground="#000000") self.Label31.configure(highlightbackground="#d9d9d9") self.Label31.configure(highlightcolor="black") self.Label31.configure(text='''Color Ramp''') self.Label33 = tk.Label(self.TNotebook1_t2) self.Label33.place(relx=0.304, rely=0.083, height=21, width=64) self.Label33.configure(activebackground="#f9f9f9") self.Label33.configure(activeforeground="black") self.Label33.configure(background="#86bad8") self.Label33.configure(disabledforeground="#a3a3a3") self.Label33.configure(foreground="#000000") self.Label33.configure(highlightbackground="#d9d9d9") self.Label33.configure(highlightcolor="black") self.Label33.configure(text='''Chart Title''') self.Label35 = tk.Label(self.TNotebook1_t2) self.Label35.place(relx=0.304, rely=0.2, height=21, width=64) self.Label35.configure(activebackground="#f9f9f9") self.Label35.configure(activeforeground="black") self.Label35.configure(background="#86bad8") self.Label35.configure(disabledforeground="#a3a3a3") self.Label35.configure(foreground="#000000") self.Label35.configure(highlightbackground="#d9d9d9") self.Label35.configure(highlightcolor="black") self.Label35.configure(text='''Y Label''') self.Label36 = tk.Label(self.TNotebook1_t2) self.Label36.place(relx=0.525, rely=0.075, height=21, width=64) self.Label36.configure(activebackground="#f9f9f9") self.Label36.configure(activeforeground="black") self.Label36.configure(background="#86bad8") self.Label36.configure(disabledforeground="#a3a3a3") self.Label36.configure(foreground="#000000") self.Label36.configure(highlightbackground="#d9d9d9") self.Label36.configure(highlightcolor="black") self.Label36.configure(text='''Color Ramp''') self.Label38 = tk.Label(self.TNotebook1_t2) self.Label38.place(relx=0.814, rely=0.075, height=21, width=64) self.Label38.configure(activebackground="#f9f9f9") self.Label38.configure(activeforeground="black") self.Label38.configure(background="#86bad8") self.Label38.configure(disabledforeground="#a3a3a3") self.Label38.configure(foreground="#000000") self.Label38.configure(highlightbackground="#d9d9d9") self.Label38.configure(highlightcolor="black") self.Label38.configure(text='''Chart Title''') self.Label37 = tk.Label(self.TNotebook1_t2) self.Label37.place(relx=0.667, rely=0.2, height=21, width=64) self.Label37.configure(activebackground="#f9f9f9") self.Label37.configure(activeforeground="black") self.Label37.configure(background="#86bad8") self.Label37.configure(disabledforeground="#a3a3a3") self.Label37.configure(foreground="#000000") self.Label37.configure(highlightbackground="#d9d9d9") self.Label37.configure(highlightcolor="black") self.Label37.configure(text='''X Label''') self.Label39 = tk.Label(self.TNotebook1_t2) self.Label39.place(relx=0.809, rely=0.2, height=21, width=64) self.Label39.configure(activebackground="#f9f9f9") self.Label39.configure(activeforeground="black") self.Label39.configure(background="#86bad8") self.Label39.configure(disabledforeground="#a3a3a3") self.Label39.configure(foreground="#000000") self.Label39.configure(highlightbackground="#d9d9d9") self.Label39.configure(highlightcolor="black") self.Label39.configure(text='''Y Label''') self.beta_point_plot_axis_check = tk.Checkbutton(self.TNotebook1_t2) self.beta_point_plot_axis_check.place(relx=0.01, rely=0.233, relheight=0.042 , relwidth=0.083) self.beta_point_plot_axis_check.configure(activebackground="#ececec") self.beta_point_plot_axis_check.configure(activeforeground="#000000") self.beta_point_plot_axis_check.configure(background="#86bad8") self.beta_point_plot_axis_check.configure(disabledforeground="#a3a3a3") self.beta_point_plot_axis_check.configure(foreground="#000000") self.beta_point_plot_axis_check.configure(highlightbackground="#d9d9d9") self.beta_point_plot_axis_check.configure(highlightcolor="black") self.beta_point_plot_axis_check.configure(justify='left') self.beta_point_plot_axis_check.configure(takefocus="0") self.beta_point_plot_axis_check.configure(text='''Axis Labels''') self.beta_point_plot_axis_check.configure(variable=GUI_support.che63) self.mult_point_axis_check = tk.Checkbutton(self.TNotebook1_t2) self.mult_point_axis_check.place(relx=0.51, rely=0.225, relheight=0.042 , relwidth=0.083) self.mult_point_axis_check.configure(activebackground="#ececec") self.mult_point_axis_check.configure(activeforeground="#000000") self.mult_point_axis_check.configure(background="#86bad8") self.mult_point_axis_check.configure(disabledforeground="#a3a3a3") self.mult_point_axis_check.configure(foreground="#000000") self.mult_point_axis_check.configure(highlightbackground="#d9d9d9") self.mult_point_axis_check.configure(highlightcolor="black") self.mult_point_axis_check.configure(justify='left') self.mult_point_axis_check.configure(takefocus="0") self.mult_point_axis_check.configure(text='''Axis Labels''') self.mult_point_axis_check.configure(variable=GUI_support.che64) # self.mult_plot_rng_scale = tk.Checkbutton(self.TNotebook1_t2) # self.mult_plot_rng_scale.place(relx=0.255, rely=0.233, relheight=0.042 # , relwidth=0.089) # self.mult_plot_rng_scale.configure(activebackground="#ececec") # self.mult_plot_rng_scale.configure(activeforeground="#000000") # self.mult_plot_rng_scale.configure(background="#86bad8") # self.mult_plot_rng_scale.configure(disabledforeground="#a3a3a3") # self.mult_plot_rng_scale.configure(foreground="#000000") # self.mult_plot_rng_scale.configure(highlightbackground="#d9d9d9") # self.mult_plot_rng_scale.configure(highlightcolor="black") # self.mult_plot_rng_scale.configure(justify='left') # self.mult_plot_rng_scale.configure(text='''Range Scale''') # self.mult_plot_rng_scale.configure(variable=GUI_support.che65) # # self.mult_plot_rng_scale = tk.Checkbutton(self.TNotebook1_t2) # self.mult_plot_rng_scale.place(relx=0.775, rely=0.225, relheight=0.042 # , relwidth=0.089) # self.mult_plot_rng_scale.configure(activebackground="#ececec") # self.mult_plot_rng_scale.configure(activeforeground="#000000") # self.mult_plot_rng_scale.configure(background="#86bad8") # self.mult_plot_rng_scale.configure(disabledforeground="#a3a3a3") # self.mult_plot_rng_scale.configure(foreground="#000000") # self.mult_plot_rng_scale.configure(highlightbackground="#d9d9d9") # self.mult_plot_rng_scale.configure(highlightcolor="black") # self.mult_plot_rng_scale.configure(justify='left') # self.mult_plot_rng_scale.configure(text='''Range Scale''') # self.mult_plot_rng_scale.configure(variable=GUI_support.che66) # self.beta_plot_marker_size = tk.Entry(self.TNotebook1_t2) # self.beta_plot_marker_size.place(relx=0.167, rely=0.117, height=20 # , relwidth=0.112) # self.beta_plot_marker_size.configure(background="white") # self.beta_plot_marker_size.configure(disabledforeground="#a3a3a3") # self.beta_plot_marker_size.configure(font=font10) # self.beta_plot_marker_size.configure(foreground="#000000") # self.beta_plot_marker_size.configure(highlightbackground="#d9d9d9") # self.beta_plot_marker_size.configure(highlightcolor="black") # self.beta_plot_marker_size.configure(insertbackground="black") # self.beta_plot_marker_size.configure(selectbackground="#c4c4c4") # self.beta_plot_marker_size.configure(selectforeground="black") # self.beta_plot_marker_size.configure(takefocus="0") self.beta_plot_title = tk.Entry(self.TNotebook1_t2) self.beta_plot_title.place(relx=0.304, rely=0.117, height=20 , relwidth=0.112) self.beta_plot_title.configure(background="white") self.beta_plot_title.configure(disabledforeground="#a3a3a3") self.beta_plot_title.configure(font=font10) self.beta_plot_title.configure(foreground="#000000") self.beta_plot_title.configure(highlightbackground="#d9d9d9") self.beta_plot_title.configure(highlightcolor="black") self.beta_plot_title.configure(insertbackground="black") self.beta_plot_title.configure(selectbackground="#c4c4c4") self.beta_plot_title.configure(selectforeground="black") self.beta_plot_title.configure(takefocus="0") self.beta_plot_x_label = tk.Entry(self.TNotebook1_t2) self.beta_plot_x_label.place(relx=0.167, rely=0.238, height=20 , relwidth=0.112) self.beta_plot_x_label.configure(background="white") self.beta_plot_x_label.configure(disabledforeground="#a3a3a3") self.beta_plot_x_label.configure(font=font10) self.beta_plot_x_label.configure(foreground="#000000") self.beta_plot_x_label.configure(highlightbackground="#d9d9d9") self.beta_plot_x_label.configure(highlightcolor="black") self.beta_plot_x_label.configure(insertbackground="black") self.beta_plot_x_label.configure(selectbackground="#c4c4c4") self.beta_plot_x_label.configure(selectforeground="black") self.beta_plot_x_label.configure(takefocus="0") self.beta_plot_y_label = tk.Entry(self.TNotebook1_t2) self.beta_plot_y_label.place(relx=0.304, rely=0.238, height=20 , relwidth=0.112) self.beta_plot_y_label.configure(background="white") self.beta_plot_y_label.configure(disabledforeground="#a3a3a3") self.beta_plot_y_label.configure(font=font10) self.beta_plot_y_label.configure(foreground="#000000") self.beta_plot_y_label.configure(highlightbackground="#d9d9d9") self.beta_plot_y_label.configure(highlightcolor="black") self.beta_plot_y_label.configure(insertbackground="black") self.beta_plot_y_label.configure(selectbackground="#c4c4c4") self.beta_plot_y_label.configure(selectforeground="black") self.beta_plot_y_label.configure(takefocus="0") self.Label34 = tk.Label(self.TNotebook1_t2) self.Label34.place(relx=0.167, rely=0.2, height=21, width=64) self.Label34.configure(activebackground="#f9f9f9") self.Label34.configure(activeforeground="black") self.Label34.configure(background="#86bad8") self.Label34.configure(disabledforeground="#a3a3a3") self.Label34.configure(foreground="#000000") self.Label34.configure(highlightbackground="#d9d9d9") self.Label34.configure(highlightcolor="black") self.Label34.configure(text='''X Label''') self.Update_Plots_beta1 = tk.Button(self.TNotebook1_t2) self.Update_Plots_beta1.place(relx=0.054, rely=0.183, height=24 , width=49) self.Update_Plots_beta1.configure(activebackground="#ececec") self.Update_Plots_beta1.configure(activeforeground="#000000") self.Update_Plots_beta1.configure(background="#ffae21") self.Update_Plots_beta1.configure(disabledforeground="#a3a3a3") self.Update_Plots_beta1.configure(foreground="#000000") self.Update_Plots_beta1.configure(highlightbackground="#d9d9d9") self.Update_Plots_beta1.configure(highlightcolor="black") self.Update_Plots_beta1.configure(pady="0") self.Update_Plots_beta1.configure(takefocus="0") self.Update_Plots_beta1.configure(text='''Update''') self.Update_Plots_beta2 = tk.Button(self.TNotebook1_t2) self.Update_Plots_beta2.place(relx=0.569, rely=0.167, height=24 , width=49) self.Update_Plots_beta2.configure(activebackground="#ececec") self.Update_Plots_beta2.configure(activeforeground="#000000") self.Update_Plots_beta2.configure(background="#ffae21") self.Update_Plots_beta2.configure(disabledforeground="#a3a3a3") self.Update_Plots_beta2.configure(foreground="#000000") self.Update_Plots_beta2.configure(highlightbackground="#d9d9d9") self.Update_Plots_beta2.configure(highlightcolor="black") self.Update_Plots_beta2.configure(pady="0") self.Update_Plots_beta2.configure(takefocus="0") self.Update_Plots_beta2.configure(text='''Update''') # self.Export_b11 = tk.Button(self.TNotebook1_t2) # self.Export_b11.place(relx=0.054, rely=0.233, height=24, width=49) # self.Export_b11.configure(activebackground="#ececec") # self.Export_b11.configure(activeforeground="#000000") # self.Export_b11.configure(background="#ffae21") # self.Export_b11.configure(disabledforeground="#a3a3a3") # self.Export_b11.configure(foreground="#000000") # self.Export_b11.configure(highlightbackground="#d9d9d9") # self.Export_b11.configure(highlightcolor="black") # self.Export_b11.configure(pady="0") # self.Export_b11.configure(takefocus="0") # self.Export_b11.configure(text='''Export''') # self.Export_b2 = tk.Button(self.TNotebook1_t2) # self.Export_b2.place(relx=0.569, rely=0.217, height=24, width=49) # self.Export_b2.configure(activebackground="#ececec") # self.Export_b2.configure(activeforeground="#000000") # self.Export_b2.configure(background="#ffae21") # self.Export_b2.configure(disabledforeground="#a3a3a3") # self.Export_b2.configure(foreground="#000000") # self.Export_b2.configure(highlightbackground="#d9d9d9") # self.Export_b2.configure(highlightcolor="black") # self.Export_b2.configure(pady="0") # self.Export_b2.configure(takefocus="0") # self.Export_b2.configure(text='''Export''') self.user_beta_point_cmap=StringVar(self.TNotebook1_t2, value='gnuplot2') self.user_beta_point_cmap.trace('w',self.beta_point_cmap) self.beta_point_cmap = ttk.Combobox(self.TNotebook1_t2, textvariable=self.user_beta_point_cmap,values=self.cmaps) self.beta_point_cmap.place(relx=0.01, rely=0.117, relheight=0.035 , relwidth=0.111) #This is the dropdown menu for multinomial point plot cmap self.user_mult_point_cmap=StringVar(self.TNotebook1_t2, value='gnuplot2') self.user_mult_point_cmap.trace('w',self.mult_point_cmap) self.mult_point_cmap = ttk.Combobox(self.TNotebook1_t2, textvariable=self.user_mult_point_cmap,values=self.cmaps) self.mult_point_cmap.place(relx=0.52, rely=0.117, relheight=0.035 , relwidth=0.101) self.beta_cont_title = tk.Entry(self.TNotebook1_t2) self.beta_cont_title.place(relx=0.809, rely=0.117, height=20 , relwidth=0.112) self.beta_cont_title.configure(background="white") self.beta_cont_title.configure(disabledforeground="#a3a3a3") self.beta_cont_title.configure(font=font10) self.beta_cont_title.configure(foreground="#000000") self.beta_cont_title.configure(highlightbackground="#d9d9d9") self.beta_cont_title.configure(highlightcolor="black") self.beta_cont_title.configure(insertbackground="black") self.beta_cont_title.configure(selectbackground="#c4c4c4") self.beta_cont_title.configure(selectforeground="black") self.beta_cont_title.configure(takefocus="0") self.beta_cont_x_label = tk.Entry(self.TNotebook1_t2) self.beta_cont_x_label.place(relx=0.657, rely=0.233, height=20 , relwidth=0.112) self.beta_cont_x_label.configure(background="white") self.beta_cont_x_label.configure(disabledforeground="#a3a3a3") self.beta_cont_x_label.configure(font=font10) self.beta_cont_x_label.configure(foreground="#000000") self.beta_cont_x_label.configure(highlightbackground="#d9d9d9") self.beta_cont_x_label.configure(highlightcolor="black") self.beta_cont_x_label.configure(insertbackground="black") self.beta_cont_x_label.configure(selectbackground="#c4c4c4") self.beta_cont_x_label.configure(selectforeground="black") self.beta_cont_x_label.configure(takefocus="0") self.beta_cont_y_label = tk.Entry(self.TNotebook1_t2) self.beta_cont_y_label.place(relx=0.809, rely=0.233, height=20 , relwidth=0.112) self.beta_cont_y_label.configure(background="white") self.beta_cont_y_label.configure(disabledforeground="#a3a3a3") self.beta_cont_y_label.configure(font=font10) self.beta_cont_y_label.configure(foreground="#000000") self.beta_cont_y_label.configure(highlightbackground="#d9d9d9") self.beta_cont_y_label.configure(highlightcolor="black") self.beta_cont_y_label.configure(insertbackground="black") self.beta_cont_y_label.configure(selectbackground="#c4c4c4") self.beta_cont_y_label.configure(selectforeground="black") self.beta_cont_y_label.configure(takefocus="0") self.FractureDensityCanvas = tk.Canvas(self.TNotebook1_t3) self.FractureDensityCanvas.place(relx=0.01, rely=0.058, relheight=0.542 , relwidth=0.319) self.FractureDensityCanvas.configure(background="#d9d9d9") self.FractureDensityCanvas.configure(borderwidth="2") self.FractureDensityCanvas.configure(highlightbackground="#d9d9d9") self.FractureDensityCanvas.configure(highlightcolor="black") self.FractureDensityCanvas.configure(insertbackground="black") self.FractureDensityCanvas.configure(relief='ridge') self.FractureDensityCanvas.configure(selectbackground="#c4c4c4") self.FractureDensityCanvas.configure(selectforeground="black") self.FractureDensityCanvas.configure(takefocus="0") self.FractureDensityCanvas.configure(width=423) self.LogFracDensityCanvas = tk.Canvas(self.TNotebook1_t3) self.LogFracDensityCanvas.place(relx=0.353, rely=0.058, relheight=0.542 , relwidth=0.319) self.LogFracDensityCanvas.configure(background="#d9d9d9") self.LogFracDensityCanvas.configure(borderwidth="2") self.LogFracDensityCanvas.configure(highlightbackground="#d9d9d9") self.LogFracDensityCanvas.configure(highlightcolor="black") self.LogFracDensityCanvas.configure(insertbackground="black") self.LogFracDensityCanvas.configure(relief='ridge') self.LogFracDensityCanvas.configure(selectbackground="#c4c4c4") self.LogFracDensityCanvas.configure(selectforeground="black") self.LogFracDensityCanvas.configure(takefocus="0") self.LogFracDensityCanvas.configure(width=423) self.UniformStrikeBeta = tk.Canvas(self.TNotebook1_t3) self.UniformStrikeBeta.place(relx=0.706, rely=0.542, relheight=0.458 , relwidth=0.27) self.UniformStrikeBeta.configure(background="#d9d9d9") self.UniformStrikeBeta.configure(borderwidth="2") self.UniformStrikeBeta.configure(highlightbackground="#d9d9d9") self.UniformStrikeBeta.configure(highlightcolor="black") self.UniformStrikeBeta.configure(insertbackground="black") self.UniformStrikeBeta.configure(relief='ridge') self.UniformStrikeBeta.configure(selectbackground="#c4c4c4") self.UniformStrikeBeta.configure(selectforeground="black") self.UniformStrikeBeta.configure(takefocus="0") self.UniformStrikeBeta.configure(width=325) self.Label4_1 = tk.Label(self.TNotebook1_t3) self.Label4_1.place(relx=0.01, rely=0.017, height=21, width=91) self.Label4_1.configure(activebackground="#f9f9f9") self.Label4_1.configure(activeforeground="black") self.Label4_1.configure(background="#86bad8") self.Label4_1.configure(disabledforeground="#a3a3a3") self.Label4_1.configure(foreground="#000000") self.Label4_1.configure(highlightbackground="#d9d9d9") self.Label4_1.configure(highlightcolor="black") self.Label4_1.configure(text='''Fracture Density''') self.Label4_2 = tk.Label(self.TNotebook1_t3) self.Label4_2.place(relx=0.358, rely=0.017, height=21, width=111) self.Label4_2.configure(activebackground="#f9f9f9") self.Label4_2.configure(activeforeground="black") self.Label4_2.configure(background="#86bad8") self.Label4_2.configure(cursor="fleur") self.Label4_2.configure(disabledforeground="#a3a3a3") self.Label4_2.configure(foreground="#000000") self.Label4_2.configure(highlightbackground="#d9d9d9") self.Label4_2.configure(highlightcolor="black") self.Label4_2.configure(text='''Log Fracture Density''') self.Label4_3 = tk.Label(self.TNotebook1_t3) self.Label4_3.place(relx=0.701, rely=0.0, height=21, width=151) self.Label4_3.configure(activebackground="#f9f9f9") self.Label4_3.configure(activeforeground="black") self.Label4_3.configure(background="#86bad8") self.Label4_3.configure(disabledforeground="#a3a3a3") self.Label4_3.configure(foreground="#000000") self.Label4_3.configure(highlightbackground="#d9d9d9") self.Label4_3.configure(highlightcolor="black") self.Label4_3.configure(text='''Uniform Strike Multinomial''') self.UniformReroll = tk.Button(self.TNotebook1_t3, command=self.reroll_random) self.UniformReroll.place(relx=0.858, rely=0.0, height=24, width=115) self.UniformReroll.configure(activebackground="#ececec") self.UniformReroll.configure(activeforeground="#000000") self.UniformReroll.configure(background="#ffae21") self.UniformReroll.configure(disabledforeground="#a3a3a3") self.UniformReroll.configure(foreground="#000000") self.UniformReroll.configure(highlightbackground="#d9d9d9") self.UniformReroll.configure(highlightcolor="black") self.UniformReroll.configure(pady="0") self.UniformReroll.configure(text='''Reroll Uniform Data''') self.UniformStrikeMultinomial = tk.Canvas(self.TNotebook1_t3) self.UniformStrikeMultinomial.place(relx=0.706, rely=0.042 , relheight=0.458, relwidth=0.27) self.UniformStrikeMultinomial.configure(background="#d9d9d9") self.UniformStrikeMultinomial.configure(borderwidth="2") self.UniformStrikeMultinomial.configure(highlightbackground="#d9d9d9") self.UniformStrikeMultinomial.configure(highlightcolor="black") self.UniformStrikeMultinomial.configure(insertbackground="black") self.UniformStrikeMultinomial.configure(relief='ridge') self.UniformStrikeMultinomial.configure(selectbackground="#c4c4c4") self.UniformStrikeMultinomial.configure(selectforeground="black") self.UniformStrikeMultinomial.configure(takefocus="0") self.UniformStrikeMultinomial.configure(width=325) self.Label4_6 = tk.Label(self.TNotebook1_t3) self.Label4_6.place(relx=0.701, rely=0.5, height=21, width=121) self.Label4_6.configure(activebackground="#f9f9f9") self.Label4_6.configure(activeforeground="black") self.Label4_6.configure(background="#86bad8") self.Label4_6.configure(disabledforeground="#a3a3a3") self.Label4_6.configure(foreground="#000000") self.Label4_6.configure(highlightbackground="#d9d9d9") self.Label4_6.configure(highlightcolor="black") self.Label4_6.configure(text='''Uniform Strike Beta''') self.Label4_6.configure(width=121) self.FracDenseMultCanvas = tk.Canvas(self.TNotebook1_t3) self.FracDenseMultCanvas.place(relx=0.01, rely=0.667, relheight=0.32 , relwidth=0.196) self.FracDenseMultCanvas.configure(background="#d9d9d9") self.FracDenseMultCanvas.configure(borderwidth="2") self.FracDenseMultCanvas.configure(highlightbackground="#d9d9d9") self.FracDenseMultCanvas.configure(highlightcolor="black") self.FracDenseMultCanvas.configure(insertbackground="black") self.FracDenseMultCanvas.configure(relief='ridge') self.FracDenseMultCanvas.configure(selectbackground="#c4c4c4") self.FracDenseMultCanvas.configure(selectforeground="black") # self.FracDenseMultCanvas.configure(width=323) self.FracBetaCanvas = tk.Canvas(self.TNotebook1_t3) self.FracBetaCanvas.place(relx=0.25, rely=0.667, relheight=0.322 , relwidth=0.2) self.FracBetaCanvas.configure(background="#d9d9d9") self.FracBetaCanvas.configure(borderwidth="2") self.FracBetaCanvas.configure(highlightbackground="#d9d9d9") self.FracBetaCanvas.configure(highlightcolor="black") self.FracBetaCanvas.configure(insertbackground="black") self.FracBetaCanvas.configure(relief='ridge') self.FracBetaCanvas.configure(selectbackground="#c4c4c4") self.FracBetaCanvas.configure(selectforeground="black") self.Label64_5 = tk.Label(self.TNotebook1_t3)
no conversion is possible, an exception will be raised. """ if (value not in State.values): if (not convert): raise ValueError ("{0} is not valid for State.".format (value)) if (value.upper () in {"UP", "ON", "PRESENT", "ENABLED", "OK", "POWER LIMIT ACTIVE"}): value = "Enabled" elif (value.upper () in {"DOWN", "OFF", "NO ACTIVE POWER LIMIT"}): value = "Disabled" elif (value.upper() in {"NA", "UNAVAILABLE"}): value = "" else: raise ValueError ("Unknown State conversion for {0}.".format (value)) self.value = value def __str__ (self): return self.value def __repr__ (self): return self.__str__ () class AddressOrigin: """ Define the enumeration for the AddressOrgin property. Valid values are: DHCP, Static """ DHCP = "DHCP" STATIC = "Static" values = [DHCP, STATIC] def __init__ (self, value, convert = False): """ Create a AddressOrigin enumeration instance. :param value: The value to assign to the enumeration. If the value is not valid, an exception will be raised. :param convert: A flag indicating if conversion to a valid enumeration value should be performed when given an invalid enumeration value. If no conversion is possible, an exception will be raised. """ if (value not in AddressOrigin.values): if (not convert): raise ValueError ("{0} is not valid for AddressOrigin.".format (value)) if ("DHCP" in value): value = AddressOrigin.DHCP else: raise ValueError ("Unknown AddressOrigin conversion for {0}.".format (value)) self.value = value def __str__ (self): return self.value def __repr__ (self): return self.__str__ () def __eq__ (self, other): if isinstance (other, AddressOrigin): return self.value == other.value elif isinstance (other, basestring): return self.value == other return NotImplemented def __ne__ (self, other): result = self.__eq__ (other) if result is NotImplemented: return result return not result class PowerState: """ Define the enumeration for the PowerState property. Valid values are: On Off """ values = ["On", "Off"] def __init__ (self, value, convert = False, to_lower = False): """ Create a PowerState enumeration instance. :param value: The value to assign to the enumeration. If the value is not valid, an exception will be raised. :param convert: A flag indicating if conversion to a valid enumeration value should be performed when given an invalid enumeration value. If no conversion is possible, an exception will be raised. :param to_lower: A flag indicating if the string representation should be in lower case letters. """ if (value not in PowerState.values): if (not convert): raise ValueError ("{0} is not valid for PowerState.".format (value)) if ("ON" == value.upper ()): value = "On" elif ("OFF" == value.upper ()): value = "Off" else: raise ValueError ("Unknown PowerState conversion for {0}.".format (value)) self.value = value self.to_lower = to_lower def __str__ (self): if (not self.to_lower): return self.value else: return self.value.lower () def __repr__ (self): return self.__str__ () class BootSourceOverrideEnabled: """ Define the enumeration for the BootSourceOverrideEnabled property. Valid values are: Disabled Once Continuous """ DISABLED = "Disabled" ONCE = "Once" CONTINUOUS = "Continuous" values = [DISABLED, ONCE, CONTINUOUS] def __init__ (self, value, convert = False): """ Create a BootSourceOverrideEnabled enumeration instance. :param value: The value to assign to the enumeration. If the value is not valid, an exception will be raised. :param convert: A flag indicating if conversion to a valid enumeration value should be performed when given an invalid enumeration value. If no conversion is possible, an exception will be raised. """ if (value not in BootSourceOverrideEnabled.values): if (not convert): raise ValueError ("{0} is not valid for BootSourceOverrideEnabled.".format (value)) if ("True" == value): value = BootSourceOverrideEnabled.ONCE elif ("False" == value): value = BootSourceOverrideEnabled.DISABLED elif ("Persistent" == value): value = BootSourceOverrideEnabled.CONTINUOUS else: raise ValueError ( "Unknown BootSourceOverrideEnabled conversion for {0}.".format (value)) self.value = value def __str__ (self): return self.value def __repr__ (self): return self.__str__ () class BootSourceOverrideTarget: """ Define the enumeration for the BootSourceOverrideTarget property. Valid values are: None Pxe Floppy Hdd BiosSetup """ NONE = "None" PXE = "Pxe" FLOPPY = "Floppy" HDD = "Hdd" BIOS_SETUP = "BiosSetup" values = { NONE : "none", PXE : "pxe", FLOPPY : "floppy", HDD : "disk", BIOS_SETUP : "bios" } def __init__ (self, value, convert = False, cmd_arg = False): """ Create a BootSourceOverrideTarget enumeration instance. :param value: The value to assign to the enumeration. If the value is not valid, an exception will be raised. :param convert: A flag indicating if conversion to a valid enumeration value should be performed when given an invalid enumeration value. If no conversion is possible, an exception will be raised. :param cmd_arg: A flag indicating the command argument mapping should be returned as the string representation of the enumeration. """ if (value not in BootSourceOverrideTarget.values): if (not convert): raise ValueError ("{0} is not valid for BootSourceOverrideTarget.".format (value)) if ("PXE" in value): value = BootSourceOverrideTarget.PXE elif ("Hard-Drive" in value): value = BootSourceOverrideTarget.HDD elif ("BIOS" in value): value = BootSourceOverrideTarget.BIOS_SETUP elif ("Floppy" in value): value = BootSourceOverrideTarget.FLOPPY elif ("No override" in value): value = BootSourceOverrideTarget.NONE else: raise ValueError ( "Unknown BootSourceOverrideTarget conversion for {0}.".format (value)) self.value = value self.cmd_arg = cmd_arg def __str__ (self): if (not self.cmd_arg): return self.value else: return BootSourceOverrideTarget.values[self.value] def __repr__ (self): return self.__str__ () def __eq__ (self, other): if isinstance (other, BootSourceOverrideTarget): return self.value == other.value elif isinstance (other, basestring): return self.value == other return NotImplemented def __ne__ (self, other): result = self.__eq__ (other) if result is NotImplemented: return result return not result class BootSourceOverrideMode: """ Define the enumeration for the BootSourceOverrideMode property. Valid values are: Legacy UEFI """ values = { "Legacy" : 0, "UEFI" : 1 } def __init__ (self, value, convert = False): """ Create a BootSourceOverrideMode enumeration instance. :param value: The value to assign to the enumeration. If the value is not valid, an exception will be raised. :param convert: A flag indicating if conversion to a valid enumeration value should be performed when given an invalid enumeration value. If no conversion is possible, an exception will be raised. """ if (value not in BootSourceOverrideMode.values): if (not convert): raise ValueError ("{0} is not valid for BootSourceOverrideMode.".format (value)) raise ValueError ( "Unknown BootSourceOverrideMode conversion for {0}.".format (value)) self.value = value def __str__ (self): return self.value def __repr__ (self): return self.__str__ () def __int__ (self): return BootSourceOverrideMode.values[self.value] class BypassMode: """ Define the enumeration for the BypassMode property. Valid values are: Enabled Disabled """ values = ["Enabled", "Disabled"] def __init__ (self, value, convert = False): """ Create a BypassMode enumeration instance. :param value: The value to assign to the enumeration. If the value is not valid, an exception will be raised. :param convert: A flag indicating if conversion to a valid enumeration value should be performed when given an invalid enumeration value. If no conversion is possible, an exception will be raised. """ if (value not in BypassMode.values): if (not convert): raise ValueError ("{0} is not valid for BypassMode.".format (value)) if ("On" == value): value = "Enabled" elif ("Off" == value): value = "Disabled" else: raise ValueError ("Unknown BypassMode conversion for {0}.".format (value)) self.value = value def __str__ (self): return self.value def __repr__ (self): return self.__str__ () class UserLogic: """ Define the enumeration for the UserLogic property. Valid values are: Enabled Disabled """ values = ["Enabled", "Disabled"] def __init__ (self, value, convert = False): """ Create a UserLogic enumeration instance. :param value: The value to assign to the enumeration. If the value is not valid, an exception will be raised. :param convert: A flag indicating if conversion to a valid enumeration value should be performed when given an invalid enumeration value. If no conversion is possible, an exception will be raised. """
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['StorageDrsVmOverrideArgs', 'StorageDrsVmOverride'] @pulumi.input_type class StorageDrsVmOverrideArgs: def __init__(__self__, *, datastore_cluster_id: pulumi.Input[str], virtual_machine_id: pulumi.Input[str], sdrs_automation_level: Optional[pulumi.Input[str]] = None, sdrs_enabled: Optional[pulumi.Input[str]] = None, sdrs_intra_vm_affinity: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a StorageDrsVmOverride resource. :param pulumi.Input[str] datastore_cluster_id: The managed object reference ID of the datastore cluster to put the override in. Forces a new resource if changed. :param pulumi.Input[str] virtual_machine_id: The UUID of the virtual machine to create the override for. Forces a new resource if changed. :param pulumi.Input[str] sdrs_automation_level: Overrides any Storage DRS automation levels for this virtual machine. Can be one of `automated` or `manual`. When not specified, the datastore cluster's settings are used according to the specific SDRS subsystem. :param pulumi.Input[str] sdrs_enabled: Overrides the default Storage DRS setting for this virtual machine. When not specified, the datastore cluster setting is used. :param pulumi.Input[str] sdrs_intra_vm_affinity: Overrides the intra-VM affinity setting for this virtual machine. When `true`, all disks for this virtual machine will be kept on the same datastore. When `false`, Storage DRS may locate individual disks on different datastores if it helps satisfy cluster requirements. When not specified, the datastore cluster's settings are used. """ pulumi.set(__self__, "datastore_cluster_id", datastore_cluster_id) pulumi.set(__self__, "virtual_machine_id", virtual_machine_id) if sdrs_automation_level is not None: pulumi.set(__self__, "sdrs_automation_level", sdrs_automation_level) if sdrs_enabled is not None: pulumi.set(__self__, "sdrs_enabled", sdrs_enabled) if sdrs_intra_vm_affinity is not None: pulumi.set(__self__, "sdrs_intra_vm_affinity", sdrs_intra_vm_affinity) @property @pulumi.getter(name="datastoreClusterId") def datastore_cluster_id(self) -> pulumi.Input[str]: """ The managed object reference ID of the datastore cluster to put the override in. Forces a new resource if changed. """ return pulumi.get(self, "datastore_cluster_id") @datastore_cluster_id.setter def datastore_cluster_id(self, value: pulumi.Input[str]): pulumi.set(self, "datastore_cluster_id", value) @property @pulumi.getter(name="virtualMachineId") def virtual_machine_id(self) -> pulumi.Input[str]: """ The UUID of the virtual machine to create the override for. Forces a new resource if changed. """ return pulumi.get(self, "virtual_machine_id") @virtual_machine_id.setter def virtual_machine_id(self, value: pulumi.Input[str]): pulumi.set(self, "virtual_machine_id", value) @property @pulumi.getter(name="sdrsAutomationLevel") def sdrs_automation_level(self) -> Optional[pulumi.Input[str]]: """ Overrides any Storage DRS automation levels for this virtual machine. Can be one of `automated` or `manual`. When not specified, the datastore cluster's settings are used according to the specific SDRS subsystem. """ return pulumi.get(self, "sdrs_automation_level") @sdrs_automation_level.setter def sdrs_automation_level(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sdrs_automation_level", value) @property @pulumi.getter(name="sdrsEnabled") def sdrs_enabled(self) -> Optional[pulumi.Input[str]]: """ Overrides the default Storage DRS setting for this virtual machine. When not specified, the datastore cluster setting is used. """ return pulumi.get(self, "sdrs_enabled") @sdrs_enabled.setter def sdrs_enabled(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sdrs_enabled", value) @property @pulumi.getter(name="sdrsIntraVmAffinity") def sdrs_intra_vm_affinity(self) -> Optional[pulumi.Input[str]]: """ Overrides the intra-VM affinity setting for this virtual machine. When `true`, all disks for this virtual machine will be kept on the same datastore. When `false`, Storage DRS may locate individual disks on different datastores if it helps satisfy cluster requirements. When not specified, the datastore cluster's settings are used. """ return pulumi.get(self, "sdrs_intra_vm_affinity") @sdrs_intra_vm_affinity.setter def sdrs_intra_vm_affinity(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sdrs_intra_vm_affinity", value) @pulumi.input_type class _StorageDrsVmOverrideState: def __init__(__self__, *, datastore_cluster_id: Optional[pulumi.Input[str]] = None, sdrs_automation_level: Optional[pulumi.Input[str]] = None, sdrs_enabled: Optional[pulumi.Input[str]] = None, sdrs_intra_vm_affinity: Optional[pulumi.Input[str]] = None, virtual_machine_id: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering StorageDrsVmOverride resources. :param pulumi.Input[str] datastore_cluster_id: The managed object reference ID of the datastore cluster to put the override in. Forces a new resource if changed. :param pulumi.Input[str] sdrs_automation_level: Overrides any Storage DRS automation levels for this virtual machine. Can be one of `automated` or `manual`. When not specified, the datastore cluster's settings are used according to the specific SDRS subsystem. :param pulumi.Input[str] sdrs_enabled: Overrides the default Storage DRS setting for this virtual machine. When not specified, the datastore cluster setting is used. :param pulumi.Input[str] sdrs_intra_vm_affinity: Overrides the intra-VM affinity setting for this virtual machine. When `true`, all disks for this virtual machine will be kept on the same datastore. When `false`, Storage DRS may locate individual disks on different datastores if it helps satisfy cluster requirements. When not specified, the datastore cluster's settings are used. :param pulumi.Input[str] virtual_machine_id: The UUID of the virtual machine to create the override for. Forces a new resource if changed. """ if datastore_cluster_id is not None: pulumi.set(__self__, "datastore_cluster_id", datastore_cluster_id) if sdrs_automation_level is not None: pulumi.set(__self__, "sdrs_automation_level", sdrs_automation_level) if sdrs_enabled is not None: pulumi.set(__self__, "sdrs_enabled", sdrs_enabled) if sdrs_intra_vm_affinity is not None: pulumi.set(__self__, "sdrs_intra_vm_affinity", sdrs_intra_vm_affinity) if virtual_machine_id is not None: pulumi.set(__self__, "virtual_machine_id", virtual_machine_id) @property @pulumi.getter(name="datastoreClusterId") def datastore_cluster_id(self) -> Optional[pulumi.Input[str]]: """ The managed object reference ID of the datastore cluster to put the override in. Forces a new resource if changed. """ return pulumi.get(self, "datastore_cluster_id") @datastore_cluster_id.setter def datastore_cluster_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "datastore_cluster_id", value) @property @pulumi.getter(name="sdrsAutomationLevel") def sdrs_automation_level(self) -> Optional[pulumi.Input[str]]: """ Overrides any Storage DRS automation levels for this virtual machine. Can be one of `automated` or `manual`. When not specified, the datastore cluster's settings are used according to the specific SDRS subsystem. """ return pulumi.get(self, "sdrs_automation_level") @sdrs_automation_level.setter def sdrs_automation_level(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sdrs_automation_level", value) @property @pulumi.getter(name="sdrsEnabled") def sdrs_enabled(self) -> Optional[pulumi.Input[str]]: """ Overrides the default Storage DRS setting for this virtual machine. When not specified, the datastore cluster setting is used. """ return pulumi.get(self, "sdrs_enabled") @sdrs_enabled.setter def sdrs_enabled(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sdrs_enabled", value) @property @pulumi.getter(name="sdrsIntraVmAffinity") def sdrs_intra_vm_affinity(self) -> Optional[pulumi.Input[str]]: """ Overrides the intra-VM affinity setting for this virtual machine. When `true`, all disks for this virtual machine will be kept on the same datastore. When `false`, Storage DRS may locate individual disks on different datastores if it helps satisfy cluster requirements. When not specified, the datastore cluster's settings are used. """ return pulumi.get(self, "sdrs_intra_vm_affinity") @sdrs_intra_vm_affinity.setter def sdrs_intra_vm_affinity(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sdrs_intra_vm_affinity", value) @property @pulumi.getter(name="virtualMachineId") def virtual_machine_id(self) -> Optional[pulumi.Input[str]]: """ The UUID of the virtual machine to create the override for. Forces a new resource if changed. """ return pulumi.get(self, "virtual_machine_id") @virtual_machine_id.setter def virtual_machine_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "virtual_machine_id", value) class StorageDrsVmOverride(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, datastore_cluster_id: Optional[pulumi.Input[str]] = None, sdrs_automation_level: Optional[pulumi.Input[str]] = None, sdrs_enabled: Optional[pulumi.Input[str]] = None, sdrs_intra_vm_affinity: Optional[pulumi.Input[str]] = None, virtual_machine_id: Optional[pulumi.Input[str]] = None, __props__=None): """ Create a StorageDrsVmOverride resource with the given unique name, props, and options. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] datastore_cluster_id: The managed object reference ID of the datastore cluster to put the override in. Forces a new resource if changed. :param pulumi.Input[str] sdrs_automation_level: Overrides any Storage DRS automation levels for this virtual machine. Can be one of `automated` or `manual`. When not specified, the datastore cluster's settings are used according to the specific SDRS subsystem. :param pulumi.Input[str] sdrs_enabled: Overrides the default Storage DRS setting for this virtual machine. When not specified, the datastore cluster setting is used. :param pulumi.Input[str] sdrs_intra_vm_affinity: Overrides the intra-VM affinity setting for this virtual machine. When `true`, all disks for this virtual machine will be kept on the same datastore. When `false`, Storage DRS may locate individual disks on different datastores if it helps satisfy cluster requirements. When not specified, the datastore cluster's settings are used. :param pulumi.Input[str] virtual_machine_id: The UUID of the virtual machine to create the override for. Forces a new resource if changed. """ ... @overload def __init__(__self__, resource_name: str, args: StorageDrsVmOverrideArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Create a StorageDrsVmOverride resource with the given unique name, props, and options. :param str resource_name: The name of the resource. :param StorageDrsVmOverrideArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(StorageDrsVmOverrideArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, datastore_cluster_id: Optional[pulumi.Input[str]] = None, sdrs_automation_level: Optional[pulumi.Input[str]] = None, sdrs_enabled: Optional[pulumi.Input[str]] = None, sdrs_intra_vm_affinity: Optional[pulumi.Input[str]] = None, virtual_machine_id: Optional[pulumi.Input[str]] = None, __props__=None): if
[1]*(num_blocks-1) layers = nn.ModuleList() shortcuts = nn.ModuleList() cur_fig_size = int(fm * fm / self.down_rate) rnn_input_size = block.expansion * planes * cur_fig_size rnn_memory_size = int(self.args.rnn_ratio * block.expansion * planes * cur_fig_size) if self.memory_type == 'rnn': rnn = torch.nn.RNNCell(rnn_input_size, rnn_memory_size, bias=True, nonlinearity='tanh') elif self.memory_type == 'lstm': rnn = torch.nn.LSTMCell(rnn_input_size, rnn_memory_size, bias=True) elif self.memory_type == 'gru': rnn = torch.nn.GRUCell(rnn_input_size, rnn_memory_size, bias=True) else: rnn = None if self.rnn_ratio != 1: m_out_linear = nn.Linear(rnn_memory_size, rnn_input_size) else: m_out_linear = None if self.num_downs > 0: convs = nn.ModuleList() deconvs = nn.ModuleList() for j in range(self.num_downs): convs.append(nn.Conv2d(in_channels=block.expansion*planes, out_channels=block.expansion*planes, kernel_size=3, stride=2, padding=1)) deconvs.append(nn.ConvTranspose2d(block.expansion*planes, block.expansion*planes, kernel_size=3, stride=2, padding=1)) else: convs=None deconvs=None for i in range(num_blocks): # 对rnn来说,第一个残差连接虽然等维度,考虑到其他都是传h0,我就把当做和其他大块间残差的一样的 stride = strides[i] # 16*16, 16*16 .. # 16*32(stride=2) 32*32 .. # 32*64(stride=2),64*64 .. layers.append(block(self.in_planes, planes, stride)) if i == 0: if not self.pass_hidden: shortcut = nn.Sequential() if stride != 1 or self.in_planes != block.expansion * planes: shortcut = nn.Sequential( nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(block.expansion * planes) ) shortcuts.append(shortcut) else: # if self.keep_block_residual: # shortcut = nn.Sequential() # if stride != 1 or self.in_planes != block.expansion * planes: # shortcut = nn.Sequential( # nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, # bias=False), # nn.BatchNorm2d(block.expansion * planes) # ) # shortcuts.append(shortcut) memory_shortcut = nn.Sequential() if stride != 1 or self.in_planes != block.expansion * planes: memory_shortcut = nn.Sequential(nn.Linear(rnn_memory_size*2, rnn_memory_size), nn.BatchNorm2d(rnn_memory_size)) shortcuts.append(memory_shortcut) self.in_planes = planes * block.expansion return layers, shortcuts, rnn, m_out_linear, rnn_memory_size, convs, deconvs def set_m_rnn(self, x, rnn_memory_size): # origin_bsz, channel, height, width, = x.size() # bsz = height * width * origin_bsz bsz = x.size()[0] if self.memory_type in ['rnn', 'gru']: hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x) return hx if self.memory_type == 'lstm': hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x) cx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x) return (hx, cx) def m_rnn(self, x, cur_i, rnn_hidden): input_size_list = [] rnn = self.rnn_list[cur_i] if self.convs_list: # 可能四层dim变长的deconv更合理 convs = self.convs_list[cur_i] for j in range(self.num_downs): input_size_list.append(x.size()) x = convs[j](x) bsz, channel, new_height, new_width = x.size() x = x.permute([0, 2, 3, 1]).reshape(bsz, int(self.rnn_memory_size_list[cur_i]/ self.args.rnn_ratio)) # bsz, new_height * new_width * channel if self.memory_type in ['rnn', 'gru']: hx = rnn(x, rnn_hidden) m_output = hx # bsz, self.rnn_memory_size rnn_hidden = hx elif self.memory_type == 'lstm': hx, cx = rnn(x, rnn_hidden) m_output = hx # bsz, self.rnn_memory_size rnn_hidden = (hx, cx) if self.m_out_list[cur_i] is not None: m_output = self.m_out_list[cur_i](m_output) m_output = torch.reshape(m_output, (bsz, new_height, new_height, channel,)).permute((0, 3, 1, 2)) if self.deconvs_list: deconvs = self.deconvs_list[cur_i] for j in range(self.num_downs): m_output = deconvs[j](m_output, output_size=input_size_list[-j-1]) return m_output, rnn_hidden def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) # out size torch.Size([128, 16, 32, 32]) rnn_hidden = 0 # 0 to error for i in range(self.num_big_block): for j in range(self.num_blocks[i]): layer_i = self.layer_list[i] shortcut_i = self.shortcut_list[i] print('layer i=0,j=0', layer_i[j]) print('big_block%d| layer%d| rnn%d: %s|' % (i, j, i, str(self.rnn_list[i]))) print('out size', out.size()) if not self.pass_hidden or i == 0: if j == 0: res = shortcut_i[j](out) else: if j == 1: rnn_hidden = self.set_m_rnn(out, self.rnn_memory_size_list[i]) m_out, rnn_hidden = self.m_rnn(out, i, rnn_hidden) res = m_out if self.pass_hidden and i > 0: if j == 0: print('shortcut_i[j]', shortcut_i[j]) rnn_hidden = shortcut_i[j](rnn_hidden) m_out, rnn_hidden = self.m_rnn(out, i, rnn_hidden) res = m_out out = layer_i[j](out) # [bsz,dim,h,w] out += res out = F.relu(out) out = F.avg_pool2d(out, 8) out = out.view(out.size(0), -1) out = self.linear(out) return out def LmRnnKbSmall56CIFAR10(args): return LmRnnKbSmallCIFAR10(block=BaseBlock, num_blocks=[9, 9, 9], args=args) def LmRnnKbSmall110CIFAR10(args): return LmRnnKbSmallCIFAR10(block=BaseBlock, num_blocks=[18, 18, 18], args=args) class DepthTransposeCNN(nn.Module): def __init__(self,in_dim, out_dim, kernel_size=4, is_out=False): super(DepthTransposeCNN, self).__init__() self.nets = nn.ModuleList() self.is_out = is_out self.nets.extend([nn.ConvTranspose2d(in_channels=in_dim, out_channels=in_dim, kernel_size=kernel_size, stride=2, padding=1, groups=in_dim), nn.ConvTranspose2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, stride=1, padding=0, bias=False)]) if not is_out: self.nets.extend([nn.BatchNorm2d(in_dim), nn.ReLU(True), nn.BatchNorm2d(out_dim), nn.ReLU(True)]) def forward(self, x, output_size): bsz, dim, h, w = output_size if self.is_out: x = self.nets[0](x, output_size=[bsz, dim * 2, h, w]) x = self.nets[1](x, output_size=output_size) else: x = self.nets[0](x, output_size=[bsz, dim * 2, h, w]) x = self.nets[2](x) x = self.nets[3](x) x = self.nets[1](x, output_size=output_size) x = self.nets[4](x) x = self.nets[5](x) return x class TransposeCNN(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=4, is_out=False): super(TransposeCNN, self).__init__() self.nets = nn.ModuleList() self.is_out = is_out self.nets.extend([nn.ConvTranspose2d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, stride=2, padding=1), ]) if not is_out: self.nets.extend([nn.BatchNorm2d(out_dim), nn.ReLU(True)]) def forward(self, x, output_size): if self.is_out: x = self.nets[0](x, output_size=output_size) else: x = self.nets[0](x, output_size=output_size) x = self.nets[1](x) x = self.nets[2](x) return x class LmRnnConsistentSmallCIFAR10(nn.Module): # keep batch size same as origin, 32*32*16 ,16*16*32 8*8*64 as the input_size can pass hidden or not pass hidden def __init__(self, block, num_blocks, args, num_classes=10): super(LmRnnConsistentSmallCIFAR10, self).__init__() self.in_planes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.num_blocks = num_blocks self.num_big_block = len(num_blocks) self.layer_list = nn.ModuleList() self.shortcut_list = nn.ModuleList() self.rnn_list = nn.ModuleList() self.m_out_list = nn.ModuleList() self.rnn_memory_size_list = [] self.convs_list = nn.ModuleList() self.deconvs_list = nn.ModuleList() self.args = args self.memory_type = args.memory_type self.rnn_ratio = args.rnn_ratio self.conv_activate = args.conv_activate self.memory_before = args.memory_before self.depth_separate = args.depth_separate self.consistent_separate_rnn = args.consistent_separate_rnn self.dcgan_init = args.dcgan_init self.dcgan_kernel= args.dcgan_kernel self.dcgan_share_conv = args.dcgan_share_conv layer1, shortcut1, rnn1, m_out_linear1, rnn_memory_size1, convs1, deconvs1 = self._make_layer(block, 16, num_blocks[0], stride=1, fm=32,) layer2, shortcut2, rnn2, m_out_linear2, rnn_memory_size2, convs2, deconvs2 = self._make_layer(block, 32, num_blocks[1], stride=2, fm=16,) layer3, shortcut3, rnn3, m_out_linear3, rnn_memory_size3, convs3, deconvs3 = self._make_layer(block, 64, num_blocks[2], stride=2, fm=8,) self.layer_list.extend([layer1, layer2, layer3]) self.shortcut_list.extend([shortcut1, shortcut2, shortcut3]) if not self.consistent_separate_rnn: rnn2 = rnn1 rnn3 = rnn1 self.rnn_list.extend([rnn1, rnn2, rnn3]) if not self.consistent_separate_rnn: m_out_linear2 = m_out_linear1 m_out_linear3 = m_out_linear1 self.m_out_list.extend([m_out_linear1, m_out_linear2, m_out_linear3]) self.rnn_memory_size_list.extend([rnn_memory_size1, rnn_memory_size2, rnn_memory_size3]) if self.dcgan_share_conv: # 32*32*16, 16*16*32, 8*8*64, 4*4*128,2*2*256,1*1*512 # 1*1*512, 2*2*256, 4*4*128, 8*8*64, 16*16*32, 32*32*16, dim_list = [512, 256, 128, 64, 32, 16] convs2 = convs1[1:] convs3 = convs1[2:] deconvs2 = deconvs1[:-2].append(DepthTransposeCNN(in_dim=dim_list[-3], out_dim=dim_list[-2], kernel_size=self.dcgan_kernel, is_out=True) if self.depth_separate else TransposeCNN(in_dim=dim_list[-3], out_dim=dim_list[-2], kernel_size=self.dcgan_kernel, is_out=True)) deconvs3 = deconvs2[:-3].append(DepthTransposeCNN(in_dim=dim_list[-4], out_dim=dim_list[-3], kernel_size=self.dcgan_kernel, is_out=True) if self.depth_separate else TransposeCNN(in_dim=dim_list[-4], out_dim=dim_list[-3], kernel_size=self.dcgan_kernel, is_out=True)) self.convs_list.extend([convs1, convs2, convs3]) self.deconvs_list.extend([deconvs1, deconvs2, deconvs3]) if self.dcgan_init: self.deconvs_list.apply(self.weight_init) self.convs_list.apply(self.weight_init) self.linear = nn.Linear(64*block.expansion, num_classes) def _make_layer(self, block, planes, num_blocks, stride, fm, ): strides = [stride] + [1]*(num_blocks-1) layers = nn.ModuleList() shortcuts = nn.ModuleList() down_rate = fm num_downs = int(np.log(fm)/np.log(2)) cur_fig_size = int(fm * fm / down_rate) # build rnn rnn_input_size = block.expansion * planes * cur_fig_size rnn_memory_size = int(self.args.rnn_ratio * block.expansion * planes * cur_fig_size) assert rnn_memory_size == 512 * self.rnn_ratio if self.consistent_separate_rnn or fm ==32: if self.memory_type == 'rnn': rnn = torch.nn.RNNCell(rnn_input_size, rnn_memory_size, bias=True, nonlinearity='tanh') elif self.memory_type == 'lstm': rnn = torch.nn.LSTMCell(rnn_input_size, rnn_memory_size, bias=True) elif self.memory_type == 'gru': rnn = torch.nn.GRUCell(rnn_input_size, rnn_memory_size, bias=True) else: rnn = None # rnn out linear if self.rnn_ratio != 1: m_out_linear = nn.Linear(rnn_memory_size, rnn_input_size) else: m_out_linear = None else: rnn = None m_out_linear = None if self.conv_activate == 'lrelu': conv_activation = nn.LeakyReLU(True) elif self.conv_activate == 'relu': conv_activation = nn.ReLU(True) if num_downs > 0 or (self.dcgan_share_conv and fm != 32): dcgan_kernel=self.dcgan_kernel convs = nn.ModuleList() deconvs = nn.ModuleList() output_dim = block.expansion*planes for j in range(num_downs): output_dim = output_dim * 2 # print('output_dim:', output_dim) if j == num_downs-1: if self.depth_separate: cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim / 2), out_channels=int(output_dim / 2), kernel_size=dcgan_kernel, stride=2, padding=1, groups=int(output_dim / 2)), nn.Conv2d(in_channels=int(output_dim / 2), out_channels=output_dim, kernel_size=1, stride=1, padding=0, bias=False)) else: cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim/2), out_channels=output_dim, kernel_size=dcgan_kernel, stride=2, padding=1)) else: if self.depth_separate: cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim / 2), out_channels=int(output_dim / 2), kernel_size=dcgan_kernel, stride=2, padding=1, groups=int(output_dim / 2)), nn.BatchNorm2d(int(output_dim / 2)), nn.ReLU(True), nn.Conv2d(in_channels=int(output_dim / 2), out_channels=output_dim, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(output_dim), nn.ReLU(True)) else: cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim/2), out_channels=output_dim, kernel_size=dcgan_kernel, stride=2, padding=1), nn.BatchNorm2d(output_dim), conv_activation) convs.append(cur_conv) for j in range(num_downs): output_dim = int(output_dim / 2) # print('output_dim:',output_dim) if j == num_downs-1: is_out = True else: is_out = False if self.depth_separate: cur_deconv = DepthTransposeCNN(in_dim=output_dim * 2, out_dim=output_dim, kernel_size=self.dcgan_kernel, is_out=is_out) else: cur_deconv = TransposeCNN(in_dim=output_dim * 2, out_dim=output_dim, kernel_size=self.dcgan_kernel, is_out=is_out) deconvs.append(cur_deconv) else: convs=None deconvs=None for i in range(num_blocks): stride = strides[i] # 16*16, 16*16 .. 16*32(stride=2) 32*32 .. 32*64(stride=2),64*64 .. layers.append(block(self.in_planes, planes, stride)) if i == 0: shortcut = nn.Sequential() if stride != 1 or self.in_planes != block.expansion * planes: shortcut = nn.Sequential( nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(block.expansion * planes) ) shortcuts.append(shortcut) self.in_planes = planes * block.expansion return layers, shortcuts, rnn, m_out_linear, rnn_memory_size, convs, deconvs def weight_init(self, m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') !=
self.read_row_group_file(rg, columns, categories, index, assign=views) yield df def _get_index(self, index=None): if index is None: index = [i for i in self.pandas_metadata.get('index_columns', []) if isinstance(i, str)] if isinstance(index, str): index = [index] return index def to_pandas(self, columns=None, categories=None, filters=[], index=None): """ Read data from parquet into a Pandas dataframe. Parameters ---------- columns: list of names or `None` Column to load (see `ParquetFile.columns`). Any columns in the data not in this list will be ignored. If `None`, read all columns. categories: list, dict or `None` If a column is encoded using dictionary encoding in every row-group and its name is also in this list, it will generate a Pandas Category-type column, potentially saving memory and time. If a dict {col: int}, the value indicates the number of categories, so that the optimal data-dtype can be allocated. If ``None``, will automatically set *if* the data was written from pandas. filters: list of tuples To filter out (i.e., not read) some of the row-groups. (This is not row-level filtering) Filter syntax: [(column, op, val), ...], where op is [==, >, >=, <, <=, !=, in, not in] index: string or list of strings or False or None Column(s) to assign to the (multi-)index. If None, index is inferred from the metadata (if this was originally pandas data); if the metadata does not exist or index is False, index is simple sequential integers. Returns ------- Pandas data-frame """ rgs = self.filter_row_groups(filters) size = sum(rg.num_rows for rg in rgs) index = self._get_index(index) if columns is not None: columns = columns[:] else: columns = self.columns if index: columns += [i for i in index if i not in columns] check_column_names(self.columns + list(self.cats), columns, categories) df, views = self.pre_allocate(size, columns, categories, index) start = 0 if self.file_scheme == 'simple': with self.open(self.fn, 'rb') as f: for rg in rgs: parts = {name: (v if name.endswith('-catdef') else v[start:start + rg.num_rows]) for (name, v) in views.items()} self.read_row_group(rg, columns, categories, infile=f, index=index, assign=parts) start += rg.num_rows else: for rg in rgs: parts = {name: (v if name.endswith('-catdef') else v[start:start + rg.num_rows]) for (name, v) in views.items()} self.read_row_group_file(rg, columns, categories, index, assign=parts, partition_meta=self.partition_meta) start += rg.num_rows return df def pre_allocate(self, size, columns, categories, index): categories = self.check_categories(categories) df, arrs = _pre_allocate(size, columns, categories, index, self.cats, self._dtypes(categories), self.tz) i_no_name = re.compile(r"__index_level_\d+__") if self.has_pandas_metadata: md = self.pandas_metadata if md.get('column_indexes', False): names = [(c['name'] if isinstance(c, dict) else c) for c in md['column_indexes']] names = [None if n is None or i_no_name.match(n) else n for n in names] df.columns.names = names if md.get('index_columns', False) and not (index or index is False): if len(md['index_columns']) == 1: ic = md['index_columns'][0] if isinstance(ic, dict) and ic['kind'] == 'range': from pandas import RangeIndex df.index = RangeIndex( start=ic['start'], stop=ic['start'] + size * ic['step'] + 1, step=ic['step'] )[:size] names = [(c['name'] if isinstance(c, dict) else c) for c in md['index_columns']] names = [None if n is None or i_no_name.match(n) else n for n in names] df.index.names = names return df, arrs @property def count(self): """ Total number of rows """ return sum(rg.num_rows for rg in self.row_groups) @property def info(self): """ Some metadata details """ return {'name': self.fn, 'columns': self.columns, 'partitions': list(self.cats), 'rows': self.count} def check_categories(self, cats): categ = self.categories if not self.has_pandas_metadata: return cats or {} if cats is None: return categ or {} if any(c not in categ for c in cats): raise TypeError('Attempt to load column as categorical that was' ' not categorical in the original pandas data') return cats @property def has_pandas_metadata(self): if self.fmd.key_value_metadata is None: return False return bool(self.key_value_metadata.get('pandas', False)) @property def pandas_metadata(self): if self.has_pandas_metadata: return json.loads(self.key_value_metadata['pandas']) else: return {} @property def categories(self): if self.has_pandas_metadata: metadata = self.pandas_metadata cats = {m['name']: m['metadata']['num_categories'] for m in metadata['columns'] if m['pandas_type'] == 'categorical'} return cats # old track vals = self.key_value_metadata.get("fastparquet.cats", None) if vals: return json.loads(vals) else: return {} def _dtypes(self, categories=None): """ Implied types of the columns in the schema """ import pandas as pd if self.has_pandas_metadata: md = self.pandas_metadata['columns'] tz = {c['name']: c['metadata']['timezone'] for c in md if (c.get('metadata', {}) or {}).get('timezone', None)} else: tz = None self.tz = tz categories = self.check_categories(categories) dtype = OrderedDict((name, (converted_types.typemap(f) if f.num_children in [None, 0] else np.dtype("O"))) for name, f in self.schema.root.children.items() if getattr(f, 'isflat', False) is False) for i, (col, dt) in enumerate(dtype.copy().items()): if dt.kind in ['i', 'b', 'u']: # uint/int/bool columns that may have nulls become float columns num_nulls = 0 for rg in self.row_groups: chunk = rg.columns[i] if chunk.meta_data.statistics is None: num_nulls = True break if chunk.meta_data.statistics.null_count is None: num_nulls = True break if chunk.meta_data.statistics.null_count: num_nulls = True break if num_nulls: if dtype[col].itemsize == 1: dtype[col] = np.dtype('f2') elif dtype[col].itemsize == 2: dtype[col] = np.dtype('f4') else: dtype[col] = np.dtype('f8') elif dt.kind == "M": if tz is not None and tz.get(col, False): dtype[col] = pd.Series([], dtype='M8[ns]' ).dt.tz_localize(tz[col]).dtype elif dt == 'S12': dtype[col] = 'M8[ns]' for field in categories: dtype[field] = 'category' for cat in self.cats: dtype[cat] = "category" self.dtypes = dtype return dtype def __getstate__(self): return {"fn": self.fn, "open": self.open, "sep": self.sep, "fmd": self.fmd} def __setstate__(self, state): self.__dict__.update(state) self._set_attrs() def __str__(self): return "<Parquet File: %s>" % self.info __repr__ = __str__ def _pre_allocate(size, columns, categories, index, cs, dt, tz=None): index = [index] if isinstance(index, str) else (index or []) cols = [c for c in columns if c not in index] categories = categories or {} cats = cs.copy() if isinstance(categories, dict): cats.update(categories) def get_type(name): if name in categories: return 'category' return dt.get(name, None) dtypes = [get_type(c) for c in cols] index_types = [get_type(i) for i in index] cols.extend(cs) dtypes.extend(['category'] * len(cs)) df, views = dataframe.empty(dtypes, size, cols=cols, index_names=index, index_types=index_types, cats=cats, timezones=tz) return df, views def paths_to_cats(paths, file_scheme, partition_meta=None): """ Extract categorical fields and labels from hive- or drill-style paths. Parameters ---------- paths (Iterable[str]): file paths relative to root file_scheme (str): partition_meta (Dict[str, dict]): Returns ------- cats (OrderedDict[str, List[Any]]): a dict of field names and their values """ partition_meta = partition_meta or {} if file_scheme in ['simple', 'flat', 'other']: cats = {} return cats cats = OrderedDict() paths = set(path.rsplit("/", 1)[0] for path in paths) s = ex_from_sep('/') seen = set() if file_scheme == 'hive': for key, val in ( (k, v) for path in paths for k, v in s.findall(path) ): if (key, val) in seen: continue seen.add((key, val)) cats.setdefault(key, set()).add(val_to_num(val, partition_meta.get(key))) else: for i, val in ( (i, val) for path in paths for i, val in enumerate(path.split('/')) ): if (i, val) in seen: continue seen.add((i, val)) key = 'dir%i' % i cats.setdefault(key, set()).add(val_to_num(val, partition_meta.get(key))) cats = OrderedDict([(key, list(v)) for key, v in cats.items()]) return cats def filter_out_stats(rg, filters, schema): """ According to the filters, should this row-group be excluded Considers the statistics included in the metadata of this row-group Parameters ---------- rg: thrift RowGroup structure filters: list of 3-tuples Structure of each tuple: (column, op, value) where op is one of ['==', '!=', '<', '<=', '>', '>=', 'in', 'not in'] and value is appropriate for the column in question Returns ------- True or False """ if rg.num_rows == 0: # always ignore empty row-groups, don't bother loading return True if len(filters) == 0: return False for column in rg.columns: vmax, vmin = None, None name = ".".join(column.meta_data.path_in_schema) app_filters = [f[1:] for f in filters if f[0] == name] for op, val in app_filters: se = schema.schema_element(name) if column.meta_data.statistics is not None: s = column.meta_data.statistics if s.max is not None: b = ensure_bytes(s.max) vmax = encoding.read_plain(b, column.meta_data.type, 1) if se.converted_type is not None: vmax = converted_types.convert(vmax, se) if s.min is not None: b = ensure_bytes(s.min) vmin = encoding.read_plain(b, column.meta_data.type, 1) if se.converted_type is not None: vmin = converted_types.convert(vmin, se) if filter_val(op, val, vmin, vmax): return True return False def statistics(obj): """ Return per-column statistics for a ParquerFile Parameters ---------- obj: ParquetFile Returns ------- dictionary mapping stats (min, max, distinct_count, null_count) to column names