input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<filename>daskms/reads.py
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import dask
import dask.array as da
import numpy as np
import pyrap.tables as pt
from daskms.columns import (column_metadata, ColumnMetadataError,
dim_extents_array, infer_dtype)
from daskms.constants import DASKMS_PARTITION_KEY
from daskms.ordering import (ordering_taql, row_ordering,
group_ordering_taql, group_row_ordering)
from daskms.optimisation import inlined_array
from daskms.dataset import Dataset
from daskms.table_executor import executor_key
from daskms.table import table_exists
from daskms.table_proxy import TableProxy, READLOCK
from daskms.table_schemas import lookup_table_schema
from daskms.utils import table_path_split
_DEFAULT_ROW_CHUNKS = 10000
log = logging.getLogger(__name__)
def ndarray_getcol(row_runs, table_future, column, result, dtype):
""" Get numpy array data """
table = table_future.result()
getcolnp = table.getcolnp
rr = 0
table.lock(write=False)
try:
for rs, rl in row_runs:
getcolnp(column, result[rr:rr + rl], startrow=rs, nrow=rl)
rr += rl
finally:
table.unlock()
return result
def ndarray_getcolslice(row_runs, table_future, column, result,
blc, trc, dtype):
""" Get numpy array data """
table = table_future.result()
getcolslicenp = table.getcolslicenp
rr = 0
table.lock(write=False)
try:
for rs, rl in row_runs:
getcolslicenp(column, result[rr:rr + rl],
blc=blc, trc=trc,
startrow=rs, nrow=rl)
rr += rl
finally:
table.unlock()
return result
def object_getcol(row_runs, table_future, column, result, dtype):
""" Get object list data """
table = table_future.result()
getcol = table.getcol
rr = 0
table.lock(write=False)
try:
for rs, rl in row_runs:
data = getcol(column, rs, rl)
# Multi-dimensional string arrays are returned as a
# dict with 'array' and 'shape' keys. Massage the data.
if isinstance(data, dict):
data = (np.asarray(data['array'], dtype=dtype)
.reshape(data['shape']))
# NOTE(sjperkins)
# Dask wants ndarrays internally, so we asarray objects
# the returning list of objects.
# See https://github.com/ska-sa/dask-ms/issues/42
result[rr:rr + rl] = np.asarray(data, dtype=dtype)
rr += rl
finally:
table.unlock()
return result
def object_getcolslice(row_runs, table_future, column, result,
blc, trc, dtype):
""" Get object list data """
table = table_future.result()
getcolslice = table.getcolslice
rr = 0
table.lock(write=False)
try:
for rs, rl in row_runs:
data = getcolslice(column, blc, trc, startrow=rs, nrow=rl)
# Multi-dimensional string arrays are returned as a
# dict with 'array' and 'shape' keys. Massage the data.
if isinstance(data, dict):
data = (np.asarray(data['array'], dtype=dtype)
.reshape(data['shape']))
# NOTE(sjperkins)
# Dask wants ndarrays internally, so we asarray objects
# the returning list of objects.
# See https://github.com/ska-sa/dask-ms/issues/42
result[rr:rr + rl] = np.asarray(data, dtype=dtype)
rr += rl
finally:
table.unlock()
return result
def getter_wrapper(row_orders, *args):
"""
Wrapper running I/O operations
within the table_proxy's associated executor
"""
# Infer number of shape arguments
nextent_args = len(args) - 4
# Extract other arguments
table_proxy, column, col_shape, dtype = args[nextent_args:]
# Handle dask compute_meta gracefully
if len(row_orders) == 0:
return np.empty((0,)*(nextent_args+1), dtype=dtype)
row_runs, resort = row_orders
# In this case, we've been passed dimension extent arrays
# that define a slice of the column and we defer to getcolslice.
if nextent_args > 0:
blc, trc = zip(*args[:nextent_args])
shape = tuple(t - b + 1 for b, t in zip(blc, trc))
result = np.empty((np.sum(row_runs[:, 1]),) + shape, dtype=dtype)
io_fn = (object_getcolslice if np.dtype == object
else ndarray_getcolslice)
# Submit table I/O on executor
future = table_proxy._ex.submit(io_fn, row_runs,
table_proxy._table_future,
column, result,
blc, trc, dtype)
# In this case, the full resolution data
# for each row is requested, so we defer to getcol
else:
result = np.empty((np.sum(row_runs[:, 1]),) + col_shape, dtype=dtype)
io_fn = (object_getcol if dtype == object
else ndarray_getcol)
# Submit table I/O on executor
future = table_proxy._ex.submit(io_fn, row_runs,
table_proxy._table_future,
column, result, dtype)
# Resort result if necessary
if resort is not None:
return future.result()[resort]
return future.result()
def _dataset_variable_factory(table_proxy, table_schema, select_cols,
exemplar_row, orders, chunks, array_suffix):
"""
Returns a dictionary of dask arrays representing
a series of getcols on the appropriate table.
Produces variables for inclusion in a Dataset.
Parameters
----------
table_proxy : :class:`daskms.table_proxy.TableProxy`
Table proxy object
table_schema : dict
Table schema
select_cols : list of strings
List of columns to return
exemplar_row : int
row id used to possibly extract an exemplar array in
order to determine the column shape and dtype attributes
orders : tuple of :class:`dask.array.Array`
A (sorted_rows, row_runs) tuple, specifying the
appropriate rows to extract from the table.
chunks : dict
Chunking strategy for the dataset.
array_suffix : str
dask array string prefix
Returns
-------
dict
A dictionary looking like :code:`{column: (arrays, dims)}`.
"""
sorted_rows, row_runs = orders
dataset_vars = {"ROWID": (("row",), sorted_rows)}
for column in select_cols:
try:
meta = column_metadata(column, table_proxy, table_schema,
chunks, exemplar_row)
except ColumnMetadataError as e:
exc_info = logging.DEBUG >= log.getEffectiveLevel()
log.warning("Ignoring '%s': %s", column, e,
exc_info=exc_info)
continue
full_dims = ("row",) + meta.dims
args = [row_runs, ("row",)]
# We only need to pass in dimension extent arrays if
# there is more than one chunk in any of the non-row columns.
# In that case, we can getcol, otherwise getcolslice is required
if not all(len(c) == 1 for c in meta.chunks):
for d, c in zip(meta.dims, meta.chunks):
# Create an array describing the dimension chunk extents
args.append(dim_extents_array(d, c))
args.append((d,))
# Disable getcolslice caching
# https://github.com/ska-sa/dask-ms/issues/92
# https://github.com/casacore/casacore/issues/1018
table_proxy.setmaxcachesize(column, 1).result()
new_axes = {}
else:
# We need to inform blockwise about the size of our
# new dimensions as no arrays with them are supplied
new_axes = {d: s for d, s in zip(meta.dims, meta.shape)}
# Add other variables
args.extend([table_proxy, None,
column, None,
meta.shape, None,
meta.dtype, None])
# Name of the dask array representing this column
token = dask.base.tokenize(args)
name = "~".join(("read", column, array_suffix)) + "-" + token
# Construct the array
dask_array = da.blockwise(getter_wrapper, full_dims,
*args,
name=name,
new_axes=new_axes,
dtype=meta.dtype)
dask_array = inlined_array(dask_array)
# Assign into variable and dimension dataset
dataset_vars[column] = (full_dims, dask_array)
return dataset_vars
def _col_keyword_getter(table):
""" Gets column keywords for all columns in table """
return {c: table.getcolkeywords(c) for c in table.colnames()}
class DatasetFactory(object):
def __init__(self, table, select_cols, group_cols, index_cols, **kwargs):
if not table_exists(table):
raise ValueError(f"'{table}' does not appear to be a CASA Table")
chunks = kwargs.pop('chunks', [{'row': _DEFAULT_ROW_CHUNKS}])
# Create or promote chunks to a list of dicts
if isinstance(chunks, dict):
chunks = [chunks]
elif not isinstance(chunks, (tuple, list)):
raise TypeError("'chunks' must be a dict or sequence of dicts")
self.canonical_name = table
self.table_path = str(Path(*table_path_split(table)))
self.select_cols = select_cols
self.group_cols = [] if group_cols is None else group_cols
self.index_cols = [] if index_cols is None else index_cols
self.chunks = chunks
self.table_schema = kwargs.pop('table_schema', None)
self.taql_where = kwargs.pop('taql_where', '')
self.table_keywords = kwargs.pop('table_keywords', False)
self.column_keywords = kwargs.pop('column_keywords', False)
self.table_proxy = kwargs.pop('table_proxy', False)
if len(kwargs) > 0:
raise ValueError(f"Unhandled kwargs: {kwargs}")
def _table_proxy_factory(self):
return TableProxy(pt.table, self.table_path, ack=False,
readonly=True, lockoptions='user',
__executor_key__=executor_key(self.canonical_name))
def _table_schema(self):
return lookup_table_schema(self.canonical_name, self.table_schema)
def _single_dataset(self, table_proxy, orders, exemplar_row=0):
_, t, s = table_path_split(self.canonical_name)
short_table_name = "/".join((t, s)) if s else t
table_schema = self._table_schema()
select_cols = set(self.select_cols or table_proxy.colnames().result())
variables = _dataset_variable_factory(table_proxy, table_schema,
select_cols, exemplar_row,
orders, self.chunks[0],
short_table_name)
try:
rowid = variables.pop("ROWID")
except KeyError:
coords = None
else:
coords = {"ROWID": rowid}
attrs = {DASKMS_PARTITION_KEY: ()}
return Dataset(variables, coords=coords, attrs=attrs)
def _group_datasets(self, table_proxy, groups, exemplar_rows, orders):
_, t, s = table_path_split(self.canonical_name)
short_table_name = '/'.join((t, s)) if s else t
table_schema = self._table_schema()
datasets = []
group_ids = list(zip(*groups))
assert len(group_ids) == len(orders)
# Select columns, excluding grouping columns
select_cols = set(self.select_cols or table_proxy.colnames().result())
select_cols -= set(self.group_cols)
# Create a dataset for each group
it = enumerate(zip(group_ids, exemplar_rows, orders))
for g, (group_id, exemplar_row, order) in it:
# Extract group chunks
try:
group_chunks = self.chunks[g] # Get group chunking strategy
except IndexError:
group_chunks = self.chunks[-1] # Re-use last group's chunks
# Prefix dataset
gid_str = ",".join(str(gid) for gid in group_id)
array_suffix = f"[{gid_str}]-{short_table_name}"
# Create dataset variables
group_var_dims = _dataset_variable_factory(table_proxy,
table_schema,
select_cols,
exemplar_row,
order, group_chunks,
array_suffix)
# Extract ROWID
try:
rowid = group_var_dims.pop("ROWID")
except KeyError:
coords = None
else:
coords = {"ROWID": rowid}
# Assign values for the dataset's grouping columns
# as attributes
partitions = tuple((c, g.dtype.name) for c, g
in zip(self.group_cols, group_id))
attrs = {DASKMS_PARTITION_KEY: partitions}
# Use python types which are json serializable
group_id = [gid.item() for gid in group_id]
attrs.update(zip(self.group_cols, group_id))
datasets.append(Dataset(group_var_dims, attrs=attrs,
coords=coords))
return datasets
def datasets(self):
table_proxy = self._table_proxy_factory()
# No grouping case
if len(self.group_cols) == 0:
order_taql = ordering_taql(table_proxy, self.index_cols,
self.taql_where)
orders = row_ordering(order_taql, self.index_cols, self.chunks[0])
datasets = [self._single_dataset(table_proxy, orders)]
# Group by row
elif len(self.group_cols) == 1 and self.group_cols[0] == "__row__":
order_taql = ordering_taql(table_proxy, self.index_cols,
self.taql_where)
sorted_rows, | |
<filename>USBIP.py
# Copyright (c) 2014 <NAME> <<EMAIL>>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# This software is provided ``as is'' and any express or implied
# warranties, including, but not limited to, the implied warranties of
# merchantability and fitness for a particular purpose are
# disclaimed. In no event shall author or contributors be liable for any
# direct, indirect, incidental, special, exemplary, or consequential
# damages (including, but not limited to, procurement of substitute
# goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether
# in contract, strict liability, or tort (including negligence or
# otherwise) arising in any way out of the use of this software, even if
# advised of the possibility of such damage.
import SocketServer
import struct
import types
from bitstring import Bits
# Hey StackOverflow !
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class BaseStucture:
_fields_ = []
def __init__(self, **kwargs):
self.init_from_dict(**kwargs)
for field in self._fields_:
if len(field) > 2:
if not hasattr(self, field[0]):
setattr(self, field[0], field[2])
def init_from_dict(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def size(self):
return struct.calcsize(self.format())
def format(self):
pack_format = '>'
for field in self._fields_:
if type(field[1]) is types.InstanceType:
if BaseStucture in field[1].__class__.__bases__:
pack_format += str(field[1].size()) + 's'
elif 'si' == field[1]:
pack_format += 'c'
elif '<' in field[1]:
pack_format += field[1][1:]
else:
pack_format += field[1]
return pack_format
def formatDevicesList(self, devicesCount):
pack_format = '>'
i = 0
for field in self._fields_:
if (i == devicesCount + 2):
break
if type(field[1]) is types.InstanceType:
if BaseStucture in field[1].__class__.__bases__:
pack_format += str(field[1].size()) + 's'
elif 'si' == field[1]:
pack_format += 'c'
elif '<' in field[1]:
pack_format += field[1][1:]
else:
pack_format += field[1]
i += 1
return pack_format
def pack(self):
values = []
for field in self._fields_:
if type(field[1]) is types.InstanceType:
if BaseStucture in field[1].__class__.__bases__:
values.append(getattr(self, field[0], 0).pack())
else:
if 'si' == field[1]:
values.append(chr(getattr(self, field[0], 0)))
else:
values.append(getattr(self, field[0], 0))
return struct.pack(self.format(), *values)
def packDevicesList(self, devicesCount):
values = []
i = 0
for field in self._fields_:
if (i == devicesCount + 2):
break
if type(field[1]) is types.InstanceType:
if BaseStucture in field[1].__class__.__bases__:
values.append(getattr(self, field[0], 0).pack())
else:
if 'si' == field[1]:
values.append(chr(getattr(self, field[0], 0)))
else:
values.append(getattr(self, field[0], 0))
i += 1
return struct.pack(self.formatDevicesList(devicesCount), *values)
def unpack(self, buf):
values = struct.unpack(self.format(), buf)
i=0
keys_vals = {}
for val in values:
if '<' in self._fields_[i][1][0]:
val = struct.unpack('<' +self._fields_[i][1][1], struct.pack('>' + self._fields_[i][1][1], val))[0]
keys_vals[self._fields_[i][0]]=val
i+=1
self.init_from_dict(**keys_vals)
def int_to_hex_string(val):
sval= format(val, 'x')
if len(sval) < 16:
for i in range(len(sval),16):
sval= '0'+sval
#sval= sval+'0'
return sval.decode('hex')
class USBIPHeader(BaseStucture):
_fields_ = [
('version', 'H', 273),
('command', 'H'),
('status', 'I')
]
class USBInterface(BaseStucture):
_fields_ = [
('bInterfaceClass', 'B'),
('bInterfaceSubClass', 'B'),
('bInterfaceProtocol', 'B'),
('align', 'B', 0)
]
class USBIPDevice(BaseStucture):
_fields_ = [
('usbPath', '256s'),
('busID', '32s'),
('busnum', 'I'),
('devnum', 'I'),
('speed', 'I'),
('idVendor', 'H'),
('idProduct', 'H'),
('bcdDevice', 'H'),
('bDeviceClass', 'B'),
('bDeviceSubClass', 'B'),
('bDeviceProtocol', 'B'),
('bConfigurationValue', 'B'),
('bNumConfigurations', 'B'),
('bNumInterfaces', 'B'),
('interfaces', USBInterface())
]
class OPREPDevList(BaseStucture):
def __init__(self, dictArg, count):
self._fields_ = [
('base', USBIPHeader(), USBIPHeader(command=5,status=0)), # Declare this here to make sure it's in the right order
('nExportedDevice', 'I', count) # Same for this guy
]
for key, value in dictArg.iteritems():
field = (str(key), value[0], value[1])
self._fields_.append(field)
for field in self._fields_:
if len(field) > 2:
if not hasattr(self, field[0]):
setattr(self, field[0], field[2])
class OPREPImport(BaseStucture):
_fields_ = [
('base', USBIPHeader()),
('usbPath', '256s'),
('busID', '32s'),
('busnum', 'I'),
('devnum', 'I'),
('speed', 'I'),
('idVendor', 'H'),
('idProduct', 'H'),
('bcdDevice', 'H'),
('bDeviceClass', 'B'),
('bDeviceSubClass', 'B'),
('bDeviceProtocol', 'B'),
('bConfigurationValue', 'B'),
('bNumConfigurations', 'B'),
('bNumInterfaces', 'B')
]
class USBIPRETSubmit(BaseStucture):
_fields_ = [
('command', 'I'),
('seqnum', 'I'),
('devid', 'I'),
('direction', 'I'),
('ep', 'I'),
('status', 'I'),
('actual_length', 'I'),
('start_frame', 'I'),
('number_of_packets', 'I'),
('error_count', 'I'),
('setup', 'Q')
]
def pack(self):
packed_data = BaseStucture.pack(self)
packed_data += self.data
return packed_data
class USBIPCMDUnlink(BaseStucture):
_fields_ = [
('seqnum', 'I'),
('devid', 'I'),
('direction', 'I'),
('ep', 'I'),
('seqnum2', 'I'),
]
class USBIPCMDSubmit(BaseStucture):
_fields_ = [
('seqnum', 'I'),
('devid', 'I'),
('direction', 'I'),
('ep', 'I'),
('transfer_flags', 'I'),
('transfer_buffer_length', 'I'),
('start_frame', 'I'),
('number_of_packets', 'I'),
('interval', 'I'),
('setup', 'Q')
]
class USBIPUnlinkReq(BaseStucture):
_fields_ = [
('command', 'I', 0x2),
('seqnum', 'I'),
('devid', 'I', 0x2),
('direction', 'I'),
('ep', 'I'),
('transfer_flags', 'I'),
('transfer_buffer_length', 'I'),
('start_frame', 'I'),
('number_of_packets', 'I'),
('interval', 'I'),
('setup', 'Q')
]
class StandardDeviceRequest(BaseStucture):
_fields_ = [
('bmRequestType', 'B'),
('bRequest', 'B'),
('wValue', 'H'),
('wIndex', 'H'),
('wLength', '<H')
]
class DeviceDescriptor(BaseStucture):
_fields_ = [
('bLength', 'B', 18),
('bDescriptorType', 'B', 1),
('bcdUSB', 'H', 0x1001),
('bDeviceClass', 'B'),
('bDeviceSubClass', 'B'),
('bDeviceProtocol', 'B'),
('bMaxPacketSize0', 'B'),
('idVendor', 'H'),
('idProduct', 'H'),
('bcdDevice', 'H'),
('iManufacturer', 'B'),
('iProduct', 'B'),
('iSerialNumber', 'B'),
('bNumConfigurations', 'B')
]
class DeviceConfigurations(BaseStucture):
_fields_ = [
('bLength', 'B', 9),
('bDescriptorType', 'B', 2),
('wTotalLength', 'H', 0x2200),
('bNumInterfaces', 'B', 1),
('bConfigurationValue', 'B', 1),
('iConfiguration', 'B', 0),
('bmAttributes', 'B', 0x80),
('bMaxPower', 'B', 0x32)
]
class InterfaceDescriptor(BaseStucture):
_fields_ = [
('bLength', 'B', 9),
('bDescriptorType', 'B', 4),
('bInterfaceNumber', 'B', 0),
('bAlternateSetting', 'B', 0),
('bNumEndpoints', 'B', 1),
('bInterfaceClass', 'B', 3),
('bInterfaceSubClass', 'B', 1),
('bInterfaceProtocol', 'B', 2),
('iInterface', 'B', 0)
]
class EndPoint(BaseStucture):
_fields_ = [
('bLength', 'B', 7),
('bDescriptorType', 'B', 0x5),
('bEndpointAddress', 'B', 0x81),
('bmAttributes', 'B', 0x3),
('wMaxPacketSize', 'H', 0x8000),
('bInterval', 'B', 0x0A)
]
class USBRequest():
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class USBDevice():
'''interfaces = [USBInterface(bInterfaceClass=0x3, bInterfaceSubClass=0x0, bInterfaceProtocol=0x0)]
speed=2
speed = 2
vendorID = 0xc410
productID = 0x0
bcdDevice = 0x0
bDeviceClass = 0x0
bDeviceSubClass = 0x0
bDeviceProtocol = 0x0
bNumConfigurations = 1
bConfigurationValue = 1
bNumInterfaces = 1'''
def __init__(self, container):
self.generate_raw_configuration()
self.usb_container = container
def generate_raw_configuration(self):
str = self.configurations[0].pack()
str += self.configurations[0].interfaces[0].pack()
str += self.configurations[0].interfaces[0].descriptions[0].pack()
str += self.configurations[0].interfaces[0].endpoints[0].pack()
self.all_configurations = str
def send_usb_req(self, usb_req, usb_res, usb_len, status=0):
self.connection.sendall(USBIPRETSubmit(command=0x3,
seqnum=usb_req.seqnum,
ep=0,
status=status,
actual_length=usb_len,
start_frame=0x0,
number_of_packets=0x0,
interval=0x0,
data=usb_res).pack())
def handle_get_descriptor(self, control_req, usb_req):
handled = False
#print "handle_get_descriptor {}".format(control_req.wValue,'n')
if control_req.wValue == 0x1: # Device
handled = True
ret=DeviceDescriptor(bDeviceClass=self.bDeviceClass,
bDeviceSubClass=self.bDeviceSubClass,
bDeviceProtocol=self.bDeviceProtocol,
bMaxPacketSize0=0x8,
idVendor=self.vendorID,
idProduct=self.productID,
bcdDevice=self.bcdDevice,
iManufacturer=0,
iProduct=0,
iSerialNumber=0,
bNumConfigurations=1).pack()
self.send_usb_req(usb_req, ret, len(ret))
elif control_req.wValue == 0x2: # configuration
handled = True
ret= self.all_configurations[:control_req.wLength]
self.send_usb_req(usb_req, ret, len(ret))
elif control_req.wValue == 0xA: # config status ???
handled = True
self.send_usb_req(usb_req,'',0,1)
return handled
def handle_set_configuration(self, control_req, usb_req):
handled = False
#print "handle_set_configuration {}".format(control_req.wValue,'n')
handled = True
self.send_usb_req(usb_req,'',0,0)
return handled
def handle_usb_control(self, usb_req):
control_req = StandardDeviceRequest()
control_req.unpack(int_to_hex_string(usb_req.setup))
handled = False
#print " UC Request Type {}".format(control_req.bmRequestType)
#print " UC Request {}".format(control_req.bRequest)
#print " UC Value {}".format(control_req.wValue)
#print " UCIndex {}".format(control_req.wIndex)
#print " UC Length {}".format(control_req.wLength)
if control_req.bmRequestType == 0x80: # Host Request
if control_req.bRequest == 0x06: # Get Descriptor
handled = self.handle_get_descriptor(control_req, usb_req)
if control_req.bRequest == 0x00: # Get STATUS
self.send_usb_req(usb_req, "\x01\x00", 2);
handled = True
if control_req.bmRequestType == 0x00: # Host Request
if control_req.bRequest == 0x09: # Set Configuration
handled = self.handle_set_configuration(control_req, usb_req)
if not handled:
self.handle_unknown_control(control_req, usb_req)
def handle_usb_request(self, usb_req):
if usb_req.ep == 0:
self.handle_usb_control(usb_req)
else:
self.handle_data(usb_req)
class USBContainer:
usb_devices = {}
attached_devices = {}
devices_count = 0
def add_usb_device(self, usb_device):
self.devices_count += 1
busID = '1-1.' + str(self.devices_count)
self.usb_devices[busID] = usb_device
self.attached_devices[busID] = False
def remove_usb_device(self, usb_device):
for busid, dev in self.usb_devices.iteritems():
if dev == usb_device:
del self.attached_devices[busid]
del self.usb_devices[busid]
break
self.devices_count -= 1
def detach_all(self):
self.attached_devices = {}
self.usb_devices = {}
self.devices_count = 0
def handle_attach(self, busid):
if (self.usb_devices[busid] != None):
busnum = int(busid[4:])
return OPREPImport(base=USBIPHeader(command=3, status=0),
usbPath='/sys/devices/pci0000:00/0000:00:01.2/usb1/' + busid,
busID=busid,
busnum=busnum,
devnum=2,
speed=2,
idVendor=self.usb_devices[busid].vendorID,
idProduct=self.usb_devices[busid].productID,
bcdDevice=self.usb_devices[busid].bcdDevice,
bDeviceClass=self.usb_devices[busid].bDeviceClass,
bDeviceSubClass=self.usb_devices[busid].bDeviceSubClass,
bDeviceProtocol=self.usb_devices[busid].bDeviceProtocol,
bNumConfigurations=self.usb_devices[busid].bNumConfigurations,
bConfigurationValue=self.usb_devices[busid].bConfigurationValue,
bNumInterfaces=self.usb_devices[busid].bNumInterfaces)
def handle_device_list(self):
devices = {}
i = 0
for busid, usb_dev in self.usb_devices.iteritems():
i += 1
devices['device' + str(i)] = [USBIPDevice(), USBIPDevice(
usbPath='/sys/devices/pci0000:00/0000:00:01.2/usb1/' + busid,
busID=busid,
busnum=i,
devnum=2,
speed=2,
idVendor=self.usb_devices[busid].vendorID,
idProduct=self.usb_devices[busid].productID,
bcdDevice=self.usb_devices[busid].bcdDevice,
bDeviceClass=self.usb_devices[busid].bDeviceClass,
bDeviceSubClass=self.usb_devices[busid].bDeviceSubClass,
bDeviceProtocol=self.usb_devices[busid].bDeviceProtocol,
bNumConfigurations=self.usb_devices[busid].bNumConfigurations,
bConfigurationValue=self.usb_devices[busid].bConfigurationValue,
bNumInterfaces=self.usb_devices[busid].bNumInterfaces,
| |
import colorsys
import operator
from enum import Enum
from io import BytesIO
from PIL import Image
from fs_helpers import *
try:
import pyfastbti
PY_FAST_BTI_INSTALLED = True
except ImportError:
PY_FAST_BTI_INSTALLED = False
try:
import pyfasttextureutils
PY_FAST_TEXTURE_UTILS_INSTALLED = True
except ImportError:
PY_FAST_TEXTURE_UTILS_INSTALLED = False
class TooManyColorsError(Exception):
pass
class ImageFormat(Enum):
I4 = 0
I8 = 1
IA4 = 2
IA8 = 3
RGB565 = 4
RGB5A3 = 5
RGBA32 = 6
C4 = 8
C8 = 9
C14X2 = 0xA
CMPR = 0xE
class PaletteFormat(Enum):
IA8 = 0
RGB565 = 1
RGB5A3 = 2
BLOCK_WIDTHS = {
ImageFormat.I4: 8,
ImageFormat.I8: 8,
ImageFormat.IA4: 8,
ImageFormat.IA8: 4,
ImageFormat.RGB565: 4,
ImageFormat.RGB5A3: 4,
ImageFormat.RGBA32: 4,
ImageFormat.C4: 8,
ImageFormat.C8: 8,
ImageFormat.C14X2: 4,
ImageFormat.CMPR: 8,
}
BLOCK_HEIGHTS = {
ImageFormat.I4: 8,
ImageFormat.I8: 4,
ImageFormat.IA4: 4,
ImageFormat.IA8: 4,
ImageFormat.RGB565: 4,
ImageFormat.RGB5A3: 4,
ImageFormat.RGBA32: 4,
ImageFormat.C4: 8,
ImageFormat.C8: 4,
ImageFormat.C14X2: 4,
ImageFormat.CMPR: 8,
}
BLOCK_DATA_SIZES = {
ImageFormat.I4: 32,
ImageFormat.I8: 32,
ImageFormat.IA4: 32,
ImageFormat.IA8: 32,
ImageFormat.RGB565: 32,
ImageFormat.RGB5A3: 32,
ImageFormat.RGBA32: 64,
ImageFormat.C4: 32,
ImageFormat.C8: 32,
ImageFormat.C14X2: 32,
ImageFormat.CMPR: 32,
}
IMAGE_FORMATS_THAT_USE_PALETTES = [
ImageFormat.C4,
ImageFormat.C8,
ImageFormat.C14X2,
]
GREYSCALE_IMAGE_FORMATS = [
ImageFormat.I4,
ImageFormat.I8,
ImageFormat.IA4,
ImageFormat.IA8,
]
GREYSCALE_PALETTE_FORMATS = [
PaletteFormat.IA8,
]
PALETTE_FORMATS_WITH_ALPHA = [
PaletteFormat.IA8,
PaletteFormat.RGB5A3,
]
MAX_COLORS_FOR_IMAGE_FORMAT = {
ImageFormat.C4: 1 << 4,
ImageFormat.C8: 1 << 8,
ImageFormat.C14X2: 1 << 14,
}
def get_rgba(color):
if len(color) == 4:
r, g, b, a = color
else:
r, g, b = color
a = 0xFF
return (r, g, b, a)
def swizzle_3_bit_to_8_bit(v):
# 00000123 -> 12312312
return (v << 5) | (v << 2) | (v >> 1)
def swizzle_4_bit_to_8_bit(v):
# 00001234 -> 12341234
return (v << 4) | (v >> 0)
def swizzle_5_bit_to_8_bit(v):
# 00012345 -> 12345123
return (v << 3) | (v >> 2)
def swizzle_6_bit_to_8_bit(v):
# 00123456 -> 12345612
return (v << 2) | (v >> 4)
def convert_rgb_to_greyscale(r, g, b):
return round(((r * 30) + (g * 59) + (b * 11)) / 100)
def convert_rgb565_to_color(rgb565):
r = (rgb565 >> 11) & 0x1F
g = (rgb565 >> 5) & 0x3F
b = (rgb565 >> 0) & 0x1F
r = swizzle_5_bit_to_8_bit(r)
g = swizzle_6_bit_to_8_bit(g)
b = swizzle_5_bit_to_8_bit(b)
return (r, g, b, 255)
def convert_color_to_rgb565(color):
r, g, b, a = get_rgba(color)
r = r >> 3
g = g >> 2
b = b >> 3
rgb565 = 0x0000
rgb565 |= (r & 0x1F) << 11
rgb565 |= (g & 0x3F) << 5
rgb565 |= (b & 0x1F) << 0
return rgb565
def convert_rgb5a3_to_color(rgb5a3):
# RGB5A3 format.
# Each color takes up two bytes.
# Format depends on the most significant bit. Two possible formats:
# Top bit is 0: 0AAARRRRGGGGBBBB
# Top bit is 1: 1RRRRRGGGGGBBBBB (Alpha set to 0xff)
if (rgb5a3 & 0x8000) == 0:
a = (rgb5a3 >> 12) & 0x7
r = (rgb5a3 >> 8) & 0xF
g = (rgb5a3 >> 4) & 0xF
b = (rgb5a3 >> 0) & 0xF
a = swizzle_3_bit_to_8_bit(a)
r = swizzle_4_bit_to_8_bit(r)
g = swizzle_4_bit_to_8_bit(g)
b = swizzle_4_bit_to_8_bit(b)
else:
a = 255
r = (rgb5a3 >> 10) & 0x1F
g = (rgb5a3 >> 5) & 0x1F
b = (rgb5a3 >> 0) & 0x1F
r = swizzle_5_bit_to_8_bit(r)
g = swizzle_5_bit_to_8_bit(g)
b = swizzle_5_bit_to_8_bit(b)
return (r, g, b, a)
def convert_color_to_rgb5a3(color):
r, g, b, a = get_rgba(color)
if a != 255:
a = a >> 5
r = r >> 4
g = g >> 4
b = b >> 4
rgb5a3 = 0x0000
rgb5a3 |= (a & 0x7) << 12
rgb5a3 |= (r & 0xF) << 8
rgb5a3 |= (g & 0xF) << 4
rgb5a3 |= (b & 0xF) << 0
else:
r = r >> 3
g = g >> 3
b = b >> 3
rgb5a3 = 0x8000
rgb5a3 |= (r & 0x1F) << 10
rgb5a3 |= (g & 0x1F) << 5
rgb5a3 |= (b & 0x1F) << 0
return rgb5a3
def convert_ia4_to_color(ia4):
low_nibble = ia4 & 0xF
high_nibble = (ia4 >> 4) & 0xF
r = g = b = swizzle_4_bit_to_8_bit(low_nibble)
a = swizzle_4_bit_to_8_bit(high_nibble)
return (r, g, b, a)
def convert_color_to_ia4(color):
r, g, b, a = get_rgba(color)
l = convert_rgb_to_greyscale(r, g, b)
ia4 = 0x00
ia4 |= (l >> 4) & 0xF
ia4 |= a & 0xF0
return ia4
def convert_ia8_to_color(ia8):
low_byte = ia8 & 0xFF
high_byte = (ia8 >> 8) & 0xFF
r = g = b = low_byte
a = high_byte
return (r, g, b, a)
def convert_color_to_ia8(color):
r, g, b, a = get_rgba(color)
l = convert_rgb_to_greyscale(r, g, b)
ia8 = 0x0000
ia8 |= l & 0x00FF
ia8 |= (a << 8) & 0xFF00
return ia8
def convert_i4_to_color(i4):
r = g = b = a = swizzle_4_bit_to_8_bit(i4)
return (r, g, b, a)
def convert_color_to_i4(color):
r, g, b, a = get_rgba(color)
l = convert_rgb_to_greyscale(r, g, b)
i4 = (l >> 4) & 0xF
return i4
def convert_i8_to_color(i8):
r = g = b = a = i8
return (r, g, b, a)
def convert_color_to_i8(color):
r, g, b, a = get_rgba(color)
l = convert_rgb_to_greyscale(r, g, b)
i8 = l & 0xFF
return i8
def get_interpolated_cmpr_colors(color_0_rgb565, color_1_rgb565):
color_0 = convert_rgb565_to_color(color_0_rgb565)
color_1 = convert_rgb565_to_color(color_1_rgb565)
r0, g0, b0, _ = color_0
r1, g1, b1, _ = color_1
if color_0_rgb565 > color_1_rgb565:
color_2 = (
(2 * r0 + 1 * r1) // 3,
(2 * g0 + 1 * g1) // 3,
(2 * b0 + 1 * b1) // 3,
255,
)
color_3 = (
(1 * r0 + 2 * r1) // 3,
(1 * g0 + 2 * g1) // 3,
(1 * b0 + 2 * b1) // 3,
255,
)
else:
color_2 = (r0 // 2 + r1 // 2, g0 // 2 + g1 // 2, b0 // 2 + b1 // 2, 255)
color_3 = (0, 0, 0, 0)
colors = [color_0, color_1, color_2, color_3]
return colors
def get_best_cmpr_key_colors(all_colors):
if PY_FAST_BTI_INSTALLED:
return pyfastbti.get_best_cmpr_key_colors(all_colors)
max_dist = -1
color_1 = None
color_2 = None
for i in range(len(all_colors)):
curr_color_1 = all_colors[i]
for j in range(i + 1, len(all_colors)):
curr_color_2 = all_colors[j]
curr_dist = get_color_distance_fast(curr_color_1, curr_color_2)
if curr_dist > max_dist:
max_dist = curr_dist
color_1 = curr_color_1
color_2 = curr_color_2
if max_dist == -1:
return ((0, 0, 0, 0xFF), (0xFF, 0xFF, 0xFF, 0xFF))
else:
r1, g1, b1, a1 = color_1
color_1 = (r1, g1, b1, 0xFF)
r2, g2, b2, a2 = color_2
color_2 = (r2, g2, b2, 0xFF)
if (r1 >> 3) == (r2 >> 3) and (g1 >> 2) == (g2 >> 2) and (b1 >> 3) == (b2 >> 3):
if (r1 >> 3) == 0 and (g1 >> 2) == 0 and (b1 >> 3) == 0:
color_2 = (0xFF, 0xFF, 0xFF, 0xFF)
else:
color_2 = (0, 0, 0, 0xFF)
return (color_1, color_2)
# Picks a color from a palette that is visually the closest to the given color.
# Based off Aseprite's code: https://github.com/aseprite/aseprite/blob/cc7bde6cd1d9ab74c31ccfa1bf41a000150a1fb2/src/doc/palette.cpp#L226-L272
def get_nearest_color_slow(color, palette):
if color in palette:
return color
r, g, b, a = get_rgba(color)
if a == 0: # Transparent
for indexed_color in palette:
if len(indexed_color) == 4 and indexed_color[3] == 0:
return indexed_color
min_dist = 9999999999.0
value = None
col_diff_g = []
col_diff_r = []
col_diff_b = []
col_diff_a = []
for i in range(128):
col_diff_g.append(0)
col_diff_r.append(0)
col_diff_b.append(0)
col_diff_a.append(0)
for i in range(1, 63 + 1):
k = i * i
col_diff_g[i] = col_diff_g[128 - i] = k * 59 * 59
col_diff_r[i] = col_diff_r[128 - i] = k * 30 * 30
col_diff_b[i] = col_diff_b[128 - i] = k * 11 * 11
col_diff_a[i] = col_diff_a[128 - i] = k * 8 * 8
for indexed_color in palette:
r1, g1, b1, a1 = get_rgba(color)
r2, g2, b2, a2 = get_rgba(indexed_color)
r1 >>= 3
g1 >>= 3
b1 >>= 3
a1 >>= 3
r2 >>= 3
g2 >>= 3
b2 >>= 3
a2 >>= 3
coldiff = col_diff_g[g2 - g1 & 127]
if coldiff < min_dist:
coldiff += col_diff_r[r2 - r1 & 127]
if coldiff < min_dist:
coldiff += col_diff_b[b2 - b1 & 127]
if coldiff < min_dist:
coldiff += col_diff_a[a2 - a1 & 127]
if coldiff < min_dist:
min_dist = coldiff
value = indexed_color
return value
def get_nearest_color_fast(color, palette):
if color in | |
from __future__ import with_statement
import subprocess
import unittest
import sys
import os
import imp
from tempfile import mkdtemp
from shutil import rmtree
import mozunit
from UserString import UserString
# Create a controlled configuration for use by expandlibs
config_win = {
'AR_EXTRACT': '',
'DLL_PREFIX': '',
'LIB_PREFIX': '',
'OBJ_SUFFIX': '.obj',
'LIB_SUFFIX': '.lib',
'DLL_SUFFIX': '.dll',
'IMPORT_LIB_SUFFIX': '.lib',
'LIBS_DESC_SUFFIX': '.desc',
'EXPAND_LIBS_LIST_STYLE': 'list',
}
config_unix = {
'AR_EXTRACT': 'ar -x',
'DLL_PREFIX': 'lib',
'LIB_PREFIX': 'lib',
'OBJ_SUFFIX': '.o',
'LIB_SUFFIX': '.a',
'DLL_SUFFIX': '.so',
'IMPORT_LIB_SUFFIX': '',
'LIBS_DESC_SUFFIX': '.desc',
'EXPAND_LIBS_LIST_STYLE': 'linkerscript',
}
config = sys.modules['expandlibs_config'] = imp.new_module('expandlibs_config')
from expandlibs import LibDescriptor, ExpandArgs, relativize, ExpandLibsDeps
from expandlibs_gen import generate
from expandlibs_exec import ExpandArgsMore, SectionFinder
def Lib(name):
return config.LIB_PREFIX + name + config.LIB_SUFFIX
def Obj(name):
return name + config.OBJ_SUFFIX
def Dll(name):
return config.DLL_PREFIX + name + config.DLL_SUFFIX
def ImportLib(name):
if not len(config.IMPORT_LIB_SUFFIX): return Dll(name)
return config.LIB_PREFIX + name + config.IMPORT_LIB_SUFFIX
class TestRelativize(unittest.TestCase):
def test_relativize(self):
'''Test relativize()'''
os_path_exists = os.path.exists
def exists(path):
return True
os.path.exists = exists
self.assertEqual(relativize(os.path.abspath(os.curdir)), os.curdir)
self.assertEqual(relativize(os.path.abspath(os.pardir)), os.pardir)
self.assertEqual(relativize(os.path.join(os.curdir, 'a')), 'a')
self.assertEqual(relativize(os.path.join(os.path.abspath(os.curdir), 'a')), 'a')
# relativize is expected to return the absolute path if it is shorter
self.assertEqual(relativize(os.sep), os.sep)
os.path.exists = os.path.exists
class TestLibDescriptor(unittest.TestCase):
def test_serialize(self):
'''Test LibDescriptor's serialization'''
desc = LibDescriptor()
desc[LibDescriptor.KEYS[0]] = ['a', 'b']
self.assertEqual(str(desc), "%s = a b" % LibDescriptor.KEYS[0])
desc['unsupported-key'] = ['a']
self.assertEqual(str(desc), "%s = a b" % LibDescriptor.KEYS[0])
desc[LibDescriptor.KEYS[1]] = ['c', 'd', 'e']
self.assertEqual(str(desc), "%s = a b\n%s = c d e" % (LibDescriptor.KEYS[0], LibDescriptor.KEYS[1]))
desc[LibDescriptor.KEYS[0]] = []
self.assertEqual(str(desc), "%s = c d e" % (LibDescriptor.KEYS[1]))
def test_read(self):
'''Test LibDescriptor's initialization'''
desc_list = ["# Comment",
"%s = a b" % LibDescriptor.KEYS[1],
"", # Empty line
"foo = bar", # Should be discarded
"%s = c d e" % LibDescriptor.KEYS[0]]
desc = LibDescriptor(desc_list)
self.assertEqual(desc[LibDescriptor.KEYS[1]], ['a', 'b'])
self.assertEqual(desc[LibDescriptor.KEYS[0]], ['c', 'd', 'e'])
self.assertEqual(False, 'foo' in desc)
def wrap_method(conf, wrapped_method):
'''Wrapper used to call a test with a specific configuration'''
def _method(self):
for key in conf:
setattr(config, key, conf[key])
self.init()
try:
wrapped_method(self)
except:
raise
finally:
self.cleanup()
return _method
class ReplicateTests(type):
'''Replicates tests for unix and windows variants'''
def __new__(cls, clsName, bases, dict):
for name in [key for key in dict if key.startswith('test_')]:
dict[name + '_unix'] = wrap_method(config_unix, dict[name])
dict[name + '_unix'].__doc__ = dict[name].__doc__ + ' (unix)'
dict[name + '_win'] = wrap_method(config_win, dict[name])
dict[name + '_win'].__doc__ = dict[name].__doc__ + ' (win)'
del dict[name]
return type.__new__(cls, clsName, bases, dict)
class TestCaseWithTmpDir(unittest.TestCase):
__metaclass__ = ReplicateTests
def init(self):
self.tmpdir = os.path.abspath(mkdtemp(dir=os.curdir))
def cleanup(self):
rmtree(self.tmpdir)
def touch(self, files):
for f in files:
open(f, 'w').close()
def tmpfile(self, *args):
return os.path.join(self.tmpdir, *args)
class TestExpandLibsGen(TestCaseWithTmpDir):
def test_generate(self):
'''Test library descriptor generation'''
files = [self.tmpfile(f) for f in
[Lib('a'), Obj('b'), Lib('c'), Obj('d'), Obj('e'), Lib('f')]]
self.touch(files[:-1])
self.touch([files[-1] + config.LIBS_DESC_SUFFIX])
desc = generate(files)
self.assertEqual(desc['OBJS'], [self.tmpfile(Obj(s)) for s in ['b', 'd', 'e']])
self.assertEqual(desc['LIBS'], [self.tmpfile(Lib(s)) for s in ['a', 'c', 'f']])
self.assertRaises(Exception, generate, files + [self.tmpfile(Obj('z'))])
self.assertRaises(Exception, generate, files + [self.tmpfile(Lib('y'))])
class TestExpandInit(TestCaseWithTmpDir):
def init(self):
''' Initializes test environment for library expansion tests'''
super(TestExpandInit, self).init()
# Create 2 fake libraries, each containing 3 objects, and the second
# including the first one and another library.
os.mkdir(self.tmpfile('libx'))
os.mkdir(self.tmpfile('liby'))
self.libx_files = [self.tmpfile('libx', Obj(f)) for f in ['g', 'h', 'i']]
self.liby_files = [self.tmpfile('liby', Obj(f)) for f in ['j', 'k', 'l']] + [self.tmpfile('liby', Lib('z'))]
self.touch(self.libx_files + self.liby_files)
with open(self.tmpfile('libx', Lib('x') + config.LIBS_DESC_SUFFIX), 'w') as f:
f.write(str(generate(self.libx_files)))
with open(self.tmpfile('liby', Lib('y') + config.LIBS_DESC_SUFFIX), 'w') as f:
f.write(str(generate(self.liby_files + [self.tmpfile('libx', Lib('x'))])))
# Create various objects and libraries
self.arg_files = [self.tmpfile(f) for f in [Lib('a'), Obj('b'), Obj('c'), Lib('d'), Obj('e')]]
# We always give library names (LIB_PREFIX/SUFFIX), even for
# dynamic/import libraries
self.files = self.arg_files + [self.tmpfile(ImportLib('f'))]
self.arg_files += [self.tmpfile(Lib('f'))]
self.touch(self.files)
def assertRelEqual(self, args1, args2):
self.assertEqual(args1, [relativize(a) for a in args2])
class TestExpandArgs(TestExpandInit):
def test_expand(self):
'''Test library expansion'''
# Expanding arguments means libraries with a descriptor are expanded
# with the descriptor content, and import libraries are used when
# a library doesn't exist
args = ExpandArgs(['foo', '-bar'] + self.arg_files + [self.tmpfile('liby', Lib('y'))])
self.assertRelEqual(args, ['foo', '-bar'] + self.files + self.liby_files + self.libx_files)
# When a library exists at the same time as a descriptor, we just use
# the library
self.touch([self.tmpfile('libx', Lib('x'))])
args = ExpandArgs(['foo', '-bar'] + self.arg_files + [self.tmpfile('liby', Lib('y'))])
self.assertRelEqual(args, ['foo', '-bar'] + self.files + self.liby_files + [self.tmpfile('libx', Lib('x'))])
self.touch([self.tmpfile('liby', Lib('y'))])
args = ExpandArgs(['foo', '-bar'] + self.arg_files + [self.tmpfile('liby', Lib('y'))])
self.assertRelEqual(args, ['foo', '-bar'] + self.files + [self.tmpfile('liby', Lib('y'))])
class TestExpandLibsDeps(TestExpandInit):
def test_expandlibsdeps(self):
'''Test library expansion for dependencies'''
# Dependency list for a library with a descriptor is equivalent to
# the arguments expansion, to which we add each descriptor
args = self.arg_files + [self.tmpfile('liby', Lib('y'))]
self.assertRelEqual(ExpandLibsDeps(args), ExpandArgs(args) + [self.tmpfile('libx', Lib('x') + config.LIBS_DESC_SUFFIX), self.tmpfile('liby', Lib('y') + config.LIBS_DESC_SUFFIX)])
# When a library exists at the same time as a descriptor, the
# descriptor is not a dependency
self.touch([self.tmpfile('libx', Lib('x'))])
args = self.arg_files + [self.tmpfile('liby', Lib('y'))]
self.assertRelEqual(ExpandLibsDeps(args), ExpandArgs(args) + [self.tmpfile('liby', Lib('y') + config.LIBS_DESC_SUFFIX)])
self.touch([self.tmpfile('liby', Lib('y'))])
args = self.arg_files + [self.tmpfile('liby', Lib('y'))]
self.assertRelEqual(ExpandLibsDeps(args), ExpandArgs(args))
class TestExpandArgsMore(TestExpandInit):
def test_makelist(self):
'''Test grouping object files in lists'''
# ExpandArgsMore does the same as ExpandArgs
with ExpandArgsMore(['foo', '-bar'] + self.arg_files + [self.tmpfile('liby', Lib('y'))]) as args:
self.assertRelEqual(args, ['foo', '-bar'] + self.files + self.liby_files + self.libx_files)
# But also has an extra method replacing object files with a list
args.makelist()
# self.files has objects at #1, #2, #4
self.assertRelEqual(args[:3], ['foo', '-bar'] + self.files[:1])
self.assertRelEqual(args[4:], [self.files[3]] + self.files[5:] + [self.tmpfile('liby', Lib('z'))])
# Check the list file content
objs = [f for f in self.files + self.liby_files + self.libx_files if f.endswith(config.OBJ_SUFFIX)]
if config.EXPAND_LIBS_LIST_STYLE == "linkerscript":
self.assertNotEqual(args[3][0], '@')
filename = args[3]
content = ['INPUT("%s")' % relativize(f) for f in objs]
with open(filename, 'r') as f:
self.assertEqual([l.strip() for l in f.readlines() if len(l.strip())], content)
elif config.EXPAND_LIBS_LIST_STYLE == "list":
self.assertEqual(args[3][0], '@')
filename = args[3][1:]
content = objs
with open(filename, 'r') as f:
self.assertRelEqual([l.strip() for l in f.readlines() if len(l.strip())], content)
tmp = args.tmp
# Check that all temporary files are properly removed
self.assertEqual(True, all([not os.path.exists(f) for f in tmp]))
def test_extract(self):
'''Test library extraction'''
# Divert subprocess.call
subprocess_call = subprocess.call
extracted = {}
def call(args, **kargs):
# The command called is always AR_EXTRACT
ar_extract = config.AR_EXTRACT.split()
self.assertRelEqual(args[:len(ar_extract)], ar_extract)
# Remaining argument is always one library
self.assertRelEqual([os.path.splitext(arg)[1] for arg in args[len(ar_extract):]], [config.LIB_SUFFIX])
# Simulate AR_EXTRACT extracting one object file for the library
lib = os.path.splitext(os.path.basename(args[len(ar_extract)]))[0]
extracted[lib] = os.path.join(kargs['cwd'], "%s" % Obj(lib))
self.touch([extracted[lib]])
subprocess.call = call
# ExpandArgsMore does the same as ExpandArgs
self.touch([self.tmpfile('liby', Lib('y'))])
with ExpandArgsMore(['foo', '-bar'] + self.arg_files + [self.tmpfile('liby', Lib('y'))]) as args:
self.assertRelEqual(args, ['foo', '-bar'] + self.files + [self.tmpfile('liby', Lib('y'))])
# ExpandArgsMore also has an extra method extracting static libraries
# when possible
args.extract()
files = self.files + self.liby_files + self.libx_files
if not len(config.AR_EXTRACT):
# If we don't have an AR_EXTRACT, extract() expands libraries with a
# descriptor when the corresponding library exists (which ExpandArgs
# alone doesn't)
self.assertRelEqual(args, ['foo', '-bar'] + files)
else:
# With AR_EXTRACT, it uses the descriptors when there are, and actually
# extracts the remaining libraries
self.assertRelEqual(args, ['foo', '-bar'] + [extracted[os.path.splitext(os.path.basename(f))[0]] if f.endswith(config.LIB_SUFFIX) else f for f in files])
tmp = args.tmp
# Check that all temporary files are properly removed
self.assertEqual(True, all([not os.path.exists(f) for f in tmp]))
# Restore subprocess.call
subprocess.call = subprocess_call
class FakeProcess(object):
def __init__(self, out, err = ''):
self.out = out
self.err = err
def communicate(self):
return (self.out, self.err)
OBJDUMPS = {
'foo.o': '''
00000000 g F .text\t00000001 foo
00000000 g F .text._Z6foobarv\t00000001 _Z6foobarv
00000000 g F .text.hello\t00000001 hello
00000000 g F .text._ZThn4_6foobarv\t00000001 _ZThn4_6foobarv
''',
'bar.o': '''
00000000 g F .text.hi\t00000001 hi
00000000 g F .text.hot._Z6barbazv\t00000001 .hidden _Z6barbazv
''',
}
PRINT_ICF = '''
ld: ICF folding section '.text.hello' in file 'foo.o'into '.text.hi' in file 'bar.o'
ld: ICF folding section '.foo' in file 'foo.o'into '.foo' in file 'bar.o'
'''
class SubprocessPopen(object):
def __init__(self, test):
self.test = test
def __call__(self, args, stdout = None, stderr = None):
self.test.assertEqual(stdout, subprocess.PIPE)
self.test.assertEqual(stderr, subprocess.PIPE)
if args[0] == 'objdump':
self.test.assertEqual(args[1], '-t')
self.test.assertTrue(args[2] in OBJDUMPS)
return FakeProcess(OBJDUMPS[args[2]])
else:
return FakeProcess('', PRINT_ICF)
class TestSectionFinder(unittest.TestCase):
def test_getSections(self):
'''Test SectionFinder'''
# Divert subprocess.Popen
subprocess_popen = subprocess.Popen
subprocess.Popen = SubprocessPopen(self)
config.EXPAND_LIBS_ORDER_STYLE = 'linkerscript'
config.OBJ_SUFFIX = '.o'
config.LIB_SUFFIX = '.a'
finder = SectionFinder(['foo.o', 'bar.o'])
self.assertEqual(finder.getSections('foobar'), [])
self.assertEqual(finder.getSections('_Z6barbazv'), ['.text.hot._Z6barbazv'])
self.assertEqual(finder.getSections('_Z6foobarv'), ['.text._Z6foobarv', '.text._ZThn4_6foobarv'])
self.assertEqual(finder.getSections('_ZThn4_6foobarv'), ['.text._Z6foobarv', '.text._ZThn4_6foobarv'])
subprocess.Popen = subprocess_popen
class TestSymbolOrder(unittest.TestCase):
def test_getOrderedSections(self):
'''Test ExpandMoreArgs' _getOrderedSections'''
| |
@property
def pretty_platforms(self):
""" Returns a prettified list of related pipelines """
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list([sample.accession_code for sample in self.samples.all() if sample.is_processed == True])
@property
def organism_names(self):
""" Get a list of unique organism names that has at least one downloadable sample """
result = self.samples\
.filter(is_processed=True, organism__qn_target__isnull=False)\
.values_list('organism__name', flat=True)\
.distinct()
return list(result)
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(self.samples.filter(is_processed=True, organism__qn_target__isnull=False)\
.values_list('accession_code', flat=True))
class ExperimentAnnotation(models.Model):
""" Semi-standard information associated with an Experiment """
class Meta:
db_table = "experiment_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ExperimentAnnotation, self).save(*args, **kwargs)
class Pipeline(models.Model):
"""Pipeline that is associated with a series of ComputationalResult records."""
name = models.CharField(max_length=255)
steps = ArrayField(models.IntegerField(), default=list)
class Meta:
db_table = "pipelines"
class Processor(models.Model):
"""Processor associated with a certain ComputationalResult."""
name = models.CharField(max_length=255)
version = models.CharField(max_length=64)
docker_image = models.CharField(max_length=255)
environment = JSONField(default=dict)
class Meta:
db_table = "processors"
unique_together = ('name', 'version', 'docker_image', 'environment')
def __str__(self):
return "Processor: %s (version: %s, docker_image: %s)" % (self.name, self.version, self.docker_image)
class ComputationalResult(models.Model):
""" Meta-information about the output of a computer process. (Ex Salmon) """
class Meta:
db_table = "computational_results"
base_manager_name = 'public_objects'
def __str__(self):
processor_name_str = ""
if self.processor:
processor_name_str = ": " + str(self.processor.name)
return "ComputationalResult " + str(self.pk) + processor_name_str
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
commands = ArrayField(models.TextField(), default=list)
processor = models.ForeignKey(Processor, blank=True, null=True, on_delete=models.CASCADE)
samples = models.ManyToManyField('Sample', through='SampleResultAssociation')
# The Organism Index used to process the sample.
organism_index = models.ForeignKey('OrganismIndex', blank=True, null=True, on_delete=models.SET_NULL)
is_ccdl = models.BooleanField(default=True)
# Stats
time_start = models.DateTimeField(blank=True, null=True)
time_end = models.DateTimeField(blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResult, self).save(*args, **kwargs)
def remove_computed_files_from_s3(self):
""" Removes all associated computed files from S3. Use this before deleting a computational result. """
for computed_file in self.computedfile_set.all():
computed_file.delete_s3_file()
class ComputationalResultAnnotation(models.Model):
""" Non-standard information associated with an ComputationalResult """
class Meta:
db_table = "computational_result_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResultAnnotation, self).save(*args, **kwargs)
# Compendium Computational Result
class CompendiumResult(models.Model):
""" Computational Result For A Compendium """
class Meta:
db_table = "compendium_results"
base_manager_name = "public_objects"
def __str__(self):
return "CompendiumResult " + str(self.pk)
SVD_ALGORITHM_CHOICES = (
('NONE', 'None'),
('RANDOMIZED', 'randomized'),
('ARPACK', 'arpack'),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(ComputationalResult,
blank=False,
null=False,
related_name='compendium_result',
on_delete=models.CASCADE)
primary_organism = models.ForeignKey(Organism,
blank=False,
null=False,
related_name='primary_compendium_results',
on_delete=models.CASCADE)
organisms = models.ManyToManyField(Organism,
related_name='compendium_results',
through='CompendiumResultOrganismAssociation')
# Properties
quant_sf_only = models.BooleanField(default=False)
compendium_version = models.IntegerField(blank=True, null=True)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text='The SVD algorithm that was used to impute the compendium result.'
)
# Common Properties
is_public = models.BooleanField(default=True)
#helper
def get_computed_file(self):
""" Short hand method for getting the computed file for this compendium"""
return ComputedFile.objects.filter(result=self.result).first()
# TODO
# class Gene(models.Model):
""" A representation of a Gene """
# class Meta:
# db_table = "genes"
class OrganismIndex(models.Model):
""" A special type of process result, necessary for processing other SRA samples """
class Meta:
db_table = "organism_index"
base_manager_name = 'public_objects'
def __str__(self):
return "OrganismIndex " + str(self.pk) + ": " + self.organism.name + \
' [' + self.index_type + '] - ' + str(self.salmon_version)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
# ex., "TRANSCRIPTOME_LONG", "TRANSCRIPTOME_SHORT"
index_type = models.CharField(max_length=255)
# This corresponds to Ensembl's release number:
# http://ensemblgenomes.org/info/about/release_cycle
# Determined by hitting:
# http://rest.ensembl.org/info/software?content-type=application/json
source_version = models.CharField(max_length=255, default="93")
# The name of the genome assembly used which corresponds to 'GRCh38' in:
# ftp://ftp.ensembl.org/pub/release-93/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
assembly_name = models.CharField(max_length=255, default="UNKNOWN")
# This matters, for instance salmon 0.9.0 indexes don't work with 0.10.0
salmon_version = models.CharField(max_length=255, default=CURRENT_SALMON_VERSION)
# We keep the director unextracted on the shared filesystem so all
# Salmon jobs can access it.
absolute_directory_path = models.CharField(max_length=255, blank=True, null=True, default="")
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_computed_file(self):
""" Short hand method for getting the computed file for this organism index"""
return self.result.computedfile_set.first()
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OrganismIndex, self).save(*args, **kwargs)
"""
# Files
These are the database representations of files
which live on local disk, on ephemeral storage,
or on AWS cloud services.
"""
class OriginalFile(models.Model):
""" A representation of a file from an external source """
class Meta:
db_table = "original_files"
indexes = [
models.Index(fields=['filename']),
models.Index(fields=['source_filename']),
]
def __str__(self):
return "OriginalFile: " + self.get_display_name()
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# File Properties
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
size_in_bytes = models.BigIntegerField(blank=True, null=True)
sha1 = models.CharField(max_length=64)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Relations
samples = models.ManyToManyField('Sample', through='OriginalFileSampleAssociation')
processor_jobs = models.ManyToManyField('data_refinery_common.ProcessorJob', through='ProcessorJobOriginalFileAssociation')
downloader_jobs = models.ManyToManyField('data_refinery_common.DownloaderJob', through='DownloaderJobOriginalFileAssociation')
# Historical Properties
source_url = models.TextField()
is_archive = models.BooleanField(default=True)
source_filename = models.CharField(max_length=255, blank=False)
# Scientific Properties
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Crunch Properties
is_downloaded = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OriginalFile, self).save(*args, **kwargs)
def set_downloaded(self, absolute_file_path, filename = None):
""" Marks the file as downloaded, if `filename` is not provided it will
be parsed from the `absolute_file_path` """
self.is_downloaded = True
self.is_archive = FileUtils.is_archive(absolute_file_path)
self.absolute_file_path = absolute_file_path
self.filename = filename if filename else os.path.basename(absolute_file_path)
self.calculate_size()
self.calculate_sha1()
self.save()
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def get_display_name(self):
""" For dev convenience """
if not self.filename:
return self.source_filename
else:
return self.filename
def get_extension(self):
""" Returns the lowercased extension of the filename
Thanks to https://stackoverflow.com/a/541408/763705 """
return FileUtils.get_extension(self.filename)
def is_blacklisted(self):
return self.get_extension() in [".xml", ".chp", ".exp"]
def delete_local_file(self):
""" Deletes this file from the local file system."""
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception("Unexpected delete file exception.",
absolute_file_path=self.absolute_file_path
)
self.is_downloaded = False
self.save()
def has_blocking_jobs(self, own_processor_id=None) -> bool:
# If the file has a processor job that should not have been
# retried, then it still shouldn't be retried.
# Exclude the ones that were aborted.
no_retry_processor_jobs = self.processor_jobs.filter(no_retry=True).exclude(abort=True)
# If the file has a processor job that hasn't even started
# yet, then it doesn't need another.
incomplete_processor_jobs = self.processor_jobs.filter(
end_time__isnull=True,
success__isnull=True,
retried=False
)
if own_processor_id:
incomplete_processor_jobs = incomplete_processor_jobs.exclude(id=own_processor_id)
# Check if there's any jobs which should block another
# processing attempt.
blocking_jobs = no_retry_processor_jobs | incomplete_processor_jobs
return blocking_jobs.first() is not None
def needs_processing(self, own_processor_id=None) -> bool:
"""Returns False if original_file has been or is being processed.
Returns True otherwise.
If own_processor_id is supplied then it will be ignored so
that processor jobs can use this function without their job
being counted as currently processing | |
####################################################################
# Author: <EMAIL>
# License: BSD
####################################################################
from __future__ import print_function, absolute_import
import os, sys
import cgi
import inspect
import re
import types
import threading
from werkzeug import Request as OriginalRequest, Response as OriginalResponse
from werkzeug import ClosingIterator, Local, LocalManager, BaseResponse
from werkzeug.exceptions import HTTPException, NotFound, BadRequest, InternalServerError
from werkzeug.routing import Map
import json as jsn
from . import template
from .js import json_dumps
from . import dispatch
from uliweb.utils.storage import Storage
from uliweb.utils.common import (pkg, log, import_attr,
myimport, wraps, norm_path)
import uliweb.utils.pyini as pyini
from uliweb.i18n import gettext_lazy, i18n_ini_convertor
from uliweb.utils.localproxy import LocalProxy, Global
from uliweb import UliwebError
# from rules import Mapping, add_rule
from . import rules
CONTENT_TYPE_JSON = 'application/json; charset=utf-8'
CONTENT_TYPE_TEXT = 'text/plain; charset=utf-8'
from ..utils._compat import string_types, callable, import_, get_class, ismethod, import_
try:
set
except:
from sets import Set as set
local = Local()
local.request = None
local.response = None
__global__ = Global()
local_manager = LocalManager([local])
url_map = Map(strict_slashes=False)
static_views = []
use_urls = False
url_adapters = {}
__app_dirs__ = {}
__app_alias__ = {}
_xhr_redirect_json = True
r_callback = re.compile(r'^[\w_]+$')
# Initialize pyini env
pyini.set_env({
'env':{'_':gettext_lazy, 'gettext_lazy':gettext_lazy},
'convertors':i18n_ini_convertor,
})
__global__.settings = pyini.Ini(lazy=True)
#User can defined decorator functions in settings DECORATORS
#and user can user @decorators.function_name in views
#and this usage need settings be initialized before decorator invoking
class Finder(object):
def __init__(self, section):
self.__objects = {}
self.__section = section
def __contains__(self, name):
if name in self.__objects:
return True
if name not in settings[self.__section]:
return False
else:
return True
def __getattr__(self, name):
if name in self.__objects:
return self.__objects[name]
if name not in settings[self.__section]:
raise UliwebError("Object %s is not existed!" % name)
obj = import_attr(settings[self.__section].get(name))
self.__objects[name] = obj
return obj
def __setitem__(self, name, value):
if isinstance(value, string_types):
value = import_attr(value)
self.__objects[name] = value
decorators = Finder('DECORATORS')
functions = Finder('FUNCTIONS')
class Request(OriginalRequest):
GET = OriginalRequest.args
POST = OriginalRequest.form
params = OriginalRequest.values
FILES = OriginalRequest.files
def json(self):
"""
Return json data, need front send json data
:return: dict
"""
return jsn.loads(self.data)
class Response(OriginalResponse):
def write(self, value):
self.stream.write(value)
class HTTPError(Exception):
def __init__(self, errorpage=None, **kwargs):
self.errorpage = errorpage or settings.GLOBAL.ERROR_PAGE
self.errors = kwargs
def __str__(self):
return repr(self.errors)
def redirect(location, code=302):
global _xhr_redirect_json, request
if _xhr_redirect_json and request.is_xhr:
response = json({'success':False, 'redirect':location}, status=500)
else:
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(cgi.escape(location), cgi.escape(location)), status=code, content_type='text/html')
response.headers['Location'] = location
return response
class RedirectException(Exception):
"""
This is an exception, which can be raised in view function
"""
def __init__(self, location, code=302):
self.response = redirect(location, code)
def get_response(self):
return self.response
def Redirect(url):
raise RedirectException(url)
def error(message='', errorpage=None, request=None, appname=None, **kwargs):
kwargs.setdefault('message', message)
if request:
kwargs.setdefault('link', functions.request_url())
raise HTTPError(errorpage, **kwargs)
def function(fname, *args, **kwargs):
func = settings.get_var('FUNCTIONS/'+fname)
if func:
if args or kwargs:
return import_attr(func)(*args, **kwargs)
else:
return import_attr(func)
else:
raise UliwebError("Can't find the function [%s] in settings" % fname)
def json(data, **json_kwargs):
def set_content_type():
from uliweb import request
if 'content_type' not in json_kwargs:
if request and 'Accept' in request.headers:
Accept = request.headers['Accept']
if Accept == '*/*':
json_kwargs['content_type'] = CONTENT_TYPE_JSON
else:
if 'application/json' in [x.strip() for x in request.headers['Accept'].split(',')]:
json_kwargs['content_type'] = CONTENT_TYPE_JSON
else:
json_kwargs['content_type'] = CONTENT_TYPE_TEXT
else:
json_kwargs['content_type'] = CONTENT_TYPE_TEXT
if callable(data):
@wraps(data)
def f(*arg, **kwargs):
set_content_type()
ret = data(*arg, **kwargs)
return Response(json_dumps(ret), **json_kwargs)
return f
else:
set_content_type()
return Response(json_dumps(data), **json_kwargs)
def jsonp(data, **json_kwargs):
"""
jsonp is callback key name
"""
from uliweb import request
if 'jsonp' in json_kwargs:
cb = json_kwargs.pop('jsonp')
else:
cb = 'callback'
begin = str(request.GET.get(cb))
if not begin:
raise BadRequest("Can't found %s parameter in request's query_string" % cb)
if not r_callback.match(begin):
raise BadRequest("The callback name is not right, it can be alphabetic, number and underscore only")
if callable(data):
@wraps(data)
def f(*arg, **kwargs):
ret = data(*arg, **kwargs)
return Response(begin + '(' + json_dumps(ret) + ');', **json_kwargs)
return f
else:
return Response(begin + '(' + json_dumps(data) + ');', **json_kwargs)
def CORS(func=None):
"""
CORS support
"""
def w(r=None):
from uliweb import request, response
if request.method == 'OPTIONS':
response = Response(status=204)
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
response.headers['Access-Control-Max-Age'] = 24*3600
response.headers['Content-Type'] = 'text/plain; charset=utf-8'
response.headers['Content-Length'] = 0
return response
elif request.method in ('GET', 'POST'):
if isinstance(r, Response):
response = r
response.headers['Access-Control-Allow-Credentials'] = 'true'
if 'Origin' in request.headers:
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
response.headers['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range'
if callable(func):
@wraps(func)
def f(*arg, **kwargs):
if request.method == 'OPTIONS':
return w()
ret = func(*arg, **kwargs)
w(ret)
return ret
return f
else:
w()
def expose(rule=None, **kwargs):
e = rules.Expose(rule, **kwargs)
if e.parse_level == 1:
return rule
else:
return e
def POST(rule, **kw):
kw['methods'] = ['POST']
return expose(rule, **kw)
def GET(rule, **kw):
kw['methods'] = ['GET']
return expose(rule, **kw)
def get_url_adapter(_domain_name):
"""
Fetch a domain url_adapter object, and bind it to according domain
"""
from werkzeug._compat import wsgi_decoding_dance
domain = application.domains.get(_domain_name, {})
server_name = None
if domain.get('domain', ''):
server_name = domain['domain']
try:
env = {}
environ = request.environ
env['url_scheme'] = environ['wsgi.url_scheme']
env['default_method'] = environ['REQUEST_METHOD']
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, "utf-8")
env['script_name'] = _get_wsgi_string('SCRIPT_NAME')
env['path_info'] = _get_wsgi_string('PATH_INFO')
env['query_args'] = _get_wsgi_string('QUERY_STRING')
except:
env = {}
adapter = url_map.bind(server_name, **env)
else:
try:
env = request.environ
except:
#this env if for testing only
env = {
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch',
'HTTP_ACCEPT_LANGUAGE': 'uk,en-US;q=0.8,en;q=0.6',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
# 'HTTP_HOST': 'localhost:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (X11; Linux i686)',
# 'PATH_INFO': '/',
# 'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'REQUEST_URI': '/',
'SCRIPT_NAME': '',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.errors': None,
'wsgi.file_wrapper': None,
# 'wsgi.input': BytesIO(ntob('', 'utf-8')),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
adapter = url_map.bind_to_environ(env)
return adapter
def get_rule(url):
from werkzeug.test import EnvironBuilder
builder = EnvironBuilder(url)
env = builder.get_environ()
url_adapter = url_map.bind_to_environ(env)
result = {}
try:
rule, values = url_adapter.match(return_rule=True)
result['rule'] = rule.rule
result['endpoint'] = rule.endpoint
result['doc'] = ''
mod, handler_cls, func = application.get_handler(rule.endpoint)
if func.__doc__:
result['doc'] = func.__doc__.strip()
except NotFound:
pass
return result
def _sub(matcher):
return '{%s}' % matcher.group().strip('<>').strip().split(':')[-1]
def url_for(endpoint, **values):
urljoin = import_('urllib.parse', 'urljoin')
point = rules.get_endpoint(endpoint)
#if the endpoint is string format, then find and replace
#the module prefix with app alias which matched
for k, v in __app_alias__.items():
if point.startswith(k):
point = v + point[len(k):]
break
if point in rules.__url_names__:
point = rules.__url_names__[point]
_domain_name = values.pop('_domain_name', 'default')
_external = values.pop('_external', False)
domain = application.domains.get(_domain_name, {})
if not _external:
_external = domain.get('display', False)
adapter = get_url_adapter(_domain_name)
#process format
#it'll replace <argu> to {argu} so that you can use format
#to create url
_format = values.pop('_format', None)
if _format:
#then replace argument with {name} format
_rules = url_map._rules_by_endpoint.get(point)
if _rules:
rule = _rules[0]
url = re.sub(r'<.*?>', _sub, rule.rule)
if _external:
url = urljoin(domain.get('domain', ''), url)
return url
else:
raise ValueError("Can't found rule of endpoint %s" % point)
return adapter.build(point, values, force_external=_external)
def get_app_dir(app):
"""
Get an app's directory
"""
path = __app_dirs__.get(app)
if path is not None:
return path
else:
p = app.split('.')
try:
path = pkg.resource_filename(p[0], '')
except ImportError as e:
log.error("Can't import app %s" % app)
log.exception(e)
path = ''
if len(p) > 1:
path = os.path.join(path, *p[1:])
__app_dirs__[app] = path
return path
def get_app_depends(app, existed_apps=None, installed_apps=None):
installed_apps = installed_apps or []
if existed_apps is None:
s = set()
else:
s = existed_apps
if app in s:
raise StopIteration
if isinstance(app, (tuple, list)):
app, name = app
__app_alias__[name+'.'] = app + '.'
configfile = os.path.join(get_app_dir(app), 'config.ini')
if os.path.exists(configfile):
x = pyini.Ini(configfile)
apps = x.get_var('DEPENDS/REQUIRED_APPS', [])
for i in apps:
if i not in s and i not in installed_apps:
for j in get_app_depends(i, s, installed_apps):
yield j
s.add(app)
yield app
def set_var(key, value):
"""
Default set_var function
"""
from uliweb import settings
settings.set_var(key, value)
def get_var(key, default=None):
"""
Default get_var function
"""
from uliweb import settings
return settings.get_var(key, default)
def get_local_cache(key, creator=None):
global local
if not hasattr(local, 'local_cache'):
local.local_cache = {}
value = local.local_cache.get(key)
if value:
return value
if callable(creator):
value = creator()
else:
value = creator
if value:
local.local_cache[key] = value
return value
def get_apps(apps_dir, include_apps=None, settings_file='settings.ini', local_settings_file='local_settings.ini'):
include_apps = include_apps or []
inifile = norm_path(os.path.join(apps_dir, settings_file))
apps = []
visited = | |
<gh_stars>1-10
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# these are system modules
import sys
# these are my local ones
from env import gidgetConfigVars
import chrArms
import refData
import tsvIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# this function looks for the gene or genes that overlap the segment defined
# by the input 'curRowLabel' coordinates
def overlap(curRowLabel, GAF_geneCoordDict_bySymbol):
tokenList = curRowLabel.split(':')
geneList = []
if (tokenList[3] != ''):
# first try to parse out genomic coordinates from
# the current row label ... it is possible that
# the row label does not have coordinates
try:
chrName = tokenList[3]
chrStart = int(tokenList[4])
chrStop = int(tokenList[5])
# print chrName, chrStart, chrStop
except:
return ([])
# if we get here, then we have coordinates so we now
# loop over the genes in our GAF_geneCoordDict_bySymbol and look for
# any that overlap ...
for aGene in GAF_geneCoordDict_bySymbol:
# if this gene is not even on the same chromosome we're done ...
if (not GAF_geneCoordDict_bySymbol[aGene].startswith(chrName + ':')):
continue
# but if it is, then we need to check start/stop
posInfo = refData.parseCoordinates(GAF_geneCoordDict_bySymbol[aGene])
if (chrStop < posInfo[1]):
continue
if (chrStart > posInfo[2]):
continue
# print posInfo
# there seem to be some "bad" gene names ???
if (aGene == '?'):
continue
if (len(aGene) == 1):
print " how is this happening ??? "
print curRowLabel, aGene, GAF_geneCoordDict_bySymbol[aGene]
sys.exit(-1)
geneList += [aGene]
if (len(geneList) > 0):
if (0):
if (len(geneList) > 1):
print " got multiple genes ... ", geneList, tokenList
return (geneList)
else:
# print " in overlap ... nada? ", curRowLabel
return ([])
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def stripLastNameFromDir(d1Name):
ii = len(d1Name) - 1
while (d1Name[ii] != '/'):
ii -= 1
tumorType = d1Name[ii + 1:]
return (tumorType)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def read_pairs_from_file(SLpairsFile):
genePairs = []
fh = file(SLpairsFile)
for aLine in fh:
aLine = aLine.strip()
tokenList = aLine.split()
if (len(tokenList) != 2):
continue
if (tokenList[0] == tokenList[1]):
continue
if (tokenList[0] < tokenList[1]):
curPair = (tokenList[0], tokenList[1])
else:
curPair = (tokenList[1], tokenList[0])
if (curPair not in genePairs):
genePairs += [curPair]
return (genePairs)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def lookAtDict(pairAssocDict):
print " "
print " "
print " in lookAtDict ... ", len(pairAssocDict)
keyList = pairAssocDict.keys()
keyList.sort()
# print keyList[:5]
# print keyList[-5:]
maxCount = 0
maxKeys = []
for aKey in keyList:
# print aKey, len(pairAssocDict[aKey])
# print pairAssocDict[aKey]
typeCounts = {}
for aTuple in pairAssocDict[aKey]:
aType = (aTuple[0], aTuple[1])
if (aType not in typeCounts):
typeCounts[aType] = 0
typeCounts[aType] += 1
curCount = len(pairAssocDict[aKey])
if (0):
if (curCount > 3):
print curCount, aKey, typeCounts
# write out the number of types of associations, then pair, and then
# the typeCounts
print len(typeCounts), aKey, typeCounts
if (curCount > maxCount):
maxCount = curCount
maxKeys = [aKey]
elif (curCount == maxCount):
maxKeys += [aKey]
if (0):
# so, at first I was looking at the number of associations, but that
# really depends mostly on the # of different features and in particular
# there could be a lot of GNAB or CNVR or METH features for one gene
# and only one of each for another ...
print " "
print " keys with the most associations: ", maxCount
for aKey in maxKeys:
typeCounts = {}
for aTuple in pairAssocDict[aKey]:
aType = (aTuple[0], aTuple[1])
if (aType not in typeCounts):
typeCounts[aType] = 0
typeCounts[aType] += 1
curCount = len(pairAssocDict[aKey])
print curCount, aKey, typeCounts
print " "
print " keys with at least %d associations: " % (maxCount / 2)
for aKey in keyList:
typeCounts = {}
for aTuple in pairAssocDict[aKey]:
aType = (aTuple[0], aTuple[1])
if (aType not in typeCounts):
typeCounts[aType] = 0
typeCounts[aType] += 1
curCount = len(pairAssocDict[aKey])
if (curCount >= (maxCount / 2)):
print curCount, aKey, typeCounts
print " "
print " "
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def hasSpecialChar(aName):
if (aName.find(":") >= 0):
return (1)
if (aName.find("?") >= 0):
return (1)
if (aName.find("'") >= 0):
return (1)
if (aName.find("|") >= 0):
return (1)
return (0)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# curLabel : N:RPPA:ACACA:::::ACC1-R-C
# curGene : ACACA
# posInfo : chr17:32516040-32841015:-
def annotateLabel(curLabel, curGene, posString):
print " in annotateLabel ... "
print curLabel
print curGene
print posString
i1 = posString.find(":")
i2 = posString.find("-", i1 + 1)
i3 = posString.find(":", i1 + 1)
chrName = posString[:i1].lower()
if (chrName.endswith('x')):
chrName = chrName[:-1] + "X"
elif (chrName.endswith('y')):
chrName = chrName[:-1] + "Y"
iStart = int(posString[i1 + 1:i2])
iStop = int(posString[i2 + 1:i3])
aStrand = posString[-1]
# print chrName, iStart, iStop, aStrand
if (0):
# before, we were assuming that the gene name did not change ...
i1 = curLabel.find(":", 7)
newLabel = curLabel[:i1] + ":" + chrName + ":" + \
str(iStart) + ":" + str(iStop) + \
":" + aStrand + curLabel[(i1 + 4):]
else:
# but now we are allowing for the incoming curGene to be a new symbol ...
# print curLabel
tokenList = curLabel.split(':')
newLabel = tokenList[0]
newLabel += ":" + tokenList[1]
newLabel += ":" + curGene
newLabel += ":" + chrName + ":" + \
str(iStart) + ":" + str(iStop) + ":" + aStrand
if (len(tokenList) > 7):
newLabel += ":" + tokenList[7]
# print newLabel
if (len(tokenList) > 8):
print " ERROR ??? too many tokens ??? "
print curLabel
print len(tokenList), tokenList
print newLabel
sys.exit(-1)
print " --> newLabel : ", newLabel
return (newLabel)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getCytobandLabel(curLabel, cytoDict):
tokenList = curLabel.split(':')
chrName = tokenList[3].lower()
if (not chrName.startswith("chr")):
print " chrName does not start with chr ??? ", chrName
sys.exit(-1)
return ("")
try:
iStart = int(tokenList[4])
if (tokenList[5] != ''):
iStop = int(tokenList[5])
else:
iStop = iStart
# print chrName, iStart, iStop
# print cytoDict[chrName]
tList = []
oList = []
for aTuple in cytoDict[chrName]:
if (aTuple[1] > iStop):
continue
if (aTuple[2] < iStart):
continue
tList += [aTuple[0]]
oLap = min(aTuple[2], iStop) - max(aTuple[1], iStart) + 1
oList += [oLap]
except:
print " failed to find cytoband ??? ", curLabel
print cytoDict.keys()
print cytoDict[chrName]
sys.exit(-1)
return ("")
if (0):
print " "
print " curLabel : ", curLabel
print " tList : ", tList
print " oList : ", oList
if (len(tList) == 0):
print " why didn't we find any cytobands ??? "
print cytoDict.keys()
print cytoDict[chrName]
return ("")
# print len(tList), tList, oList
if (len(tList) == 1):
cbName = tList[0]
if (chrName.startswith("chr")):
# tack on the chromosome # before returning ...
if ( chrName[3].lower() == "x" ):
cbName = "X" + cbName
elif ( chrName[3].lower() == "y" ):
cbName = "Y" + cbName
else:
cbName = chrName[3:] + cbName
return (cbName)
else:
print " ERROR ??? ", chrName, cbName
sys.exit(-1)
# if we have more than one label, then we need to find the
# shortest common substring ...
cbName = ""
minLen = len(tList[0])
for aName in tList:
if (minLen > len(aName)):
minLen = len(aName)
done = 0
ii = 0
while not done:
sameFlag = 1
for aName in tList:
if (aName[ii] != tList[0][ii]):
sameFlag = 0
done = 1
if (sameFlag):
cbName += aName[ii]
ii += 1
if (ii >= minLen):
done = 1
if (len(cbName) > 1):
if (cbName[-1] == '.'):
cbName = cbName[:-1]
## elif (cbName.find('.') > 0):
## print " CHECK THIS : ", cbName, tList
## elif (cbName.find('.') < 0):
## print " AND THIS TOO : ", cbName, tList, len(cbName)
# print " --> cbName : ", cbName
if (len(cbName) < 3):
# or if that didn't work well, then choose the one
# cytoband with the largest overlap ...
# FIXME: OR, we could just go down to 'p' or 'q' ???
# OR, if there is a list of several, try to remove one and
# then look again for the common substring?
maxOlap = 0
for ii in range(len(tList)):
if (maxOlap < oList[ii]):
maxOlap = oList[ii]
cbName = tList[ii]
## print " SWITCHING TO: ", cbName, maxOlap
if (chrName.startswith("chr")):
# tack on the chromosome # before returning ...
if ( chrName[3].lower() == "x" ):
cbName = "X" + cbName
elif ( chrName[3].lower() == "y" ):
cbName = "Y" + cbName
else:
| |
#Main
#<NAME>
#aqui se guardan los puntajes de cada seccion
puntos_mate = 0
puntos_cien = 0
puntos_esp = 0
def main():
print('Bienvenido a tu examen prueba')
print('Se desplegarán ciertos menus a elegir, selecciona la seccion que quieras realizar:')
print('\nMENU')
print('\n1. MATEMATICAS')
print('2. ESPAÑOL ')
print('3. CIENCIAS')
print('0. SALIR')
print("\n SELECCION:")
puntaje_total = 0
for veces in range (3):
op = int(input(" \n Ingresa tu menu a elegir: "))
#variables de puntaje
#Pregunta Matematicas
if (op == 1):
for veces in range (5):
pazar = random.choice(banco_mate)
puntaje_total = mostrar_pregunta_mate(puntaje_total, pazar[0], pazar[1], pazar[2], pazar[3])
banco_mate.remove(pazar)
puntos_mate = puntaje_total
if puntaje_total == 100:
print("Haz dominado matemáticas")
elif puntaje_total == 60 or puntaje_total == 80:
print("Te hace falta un poco de estudio")
else:
print("Te hace falta mucha practica en matemáticas")
#parte de espanol
elif(op == 2):
for filas in range (4):
if 0 <= filas <=3:
archivo_G = open('Lectura_gripe.txt', 'r')
for linea in archivo_G:
print(f' {linea}', end = '')
archivo_G.close()
elif 4 <= filas <=7 :
archivo_P = open('Lectura_pies_secos.txt', 'r')
for linea in archivo_P:
print(f' {linea}', end = '')
archivo_P.close()
else:
archivo_D = open('Lectura_dientes.txt', 'r')
for linea in archivo_D:
print(f' {linea}', end = '')
archivo_D.close()
puntaje_total = mostrar_pregunta_espanol(puntaje_total, p_español[filas][0], p_español[filas][1], p_español[filas][2], p_español[filas][3], p_español[filas][4], p_español[filas][5], p_español[filas][6])
puntos_esp = puntaje_total
if puntaje_total == 100:
print("Haz dominado español")
elif puntaje_total == 75:
print("Te hace falta un poco de estudio")
else:
print("Te hace falta mucha practica en español")
#parte de ciencias
elif (op == 3):
for veces in range (5):
pazar = random.choice(banco_ciencias)
puntaje_total = mostrar_pregunta_ciencias(puntaje_total, pazar[0], pazar[1], pazar[2], pazar[3])
banco_ciencias.remove(pazar)
puntos_cien = puntaje_total
if puntaje_total == 100:
print("Haz dominado ciencias")
elif puntaje_total == 60 or puntaje_total == 80:
print("Te hace falta un poco de estudio")
else:
print("Te hace falta mucha practica en ciencias")
elif (op == 0):
print("\nSALIDA")
print("¡Hasta luego!")
quit()
else:
print("Opción inválida")
return puntos_mate, puntos_esp, puntos_cien
#Preguntas de español
#<NAME>
def mostrar_pregunta_espanol(ptotal, preg, opc1, opc2, opc3, opc4, correcta, puntaje):
print (preg)
print (opc1, opc2, opc3, opc4)
resp_usuario = input ('Teclea tu respuesta: ')
if resp_usuario== correcta:
ptotal = ptotal + puntaje
return ptotal
p_español = [['\n ¿Cuál de las siguientes afirmaciones describe una característica del programa de inmunización de ACOL contra la gripe?\n',
' a) Se darán clases de ejercicio físico durante el invierno.\n' ,
'b) La vacunación se llevará a cabo durante las horas de trabajo.\n' ,
'c) Se ofrecerá un pequeño bono a los participantes.\n',
'd) Un médico pondrá las inyecciones \n',
'b',
25], ['\n Esta hoja informativa sugiere que si uno quiere protegerse del virus de la gripe, la inyección de una vacuna de la gripe es...\n',
'a) Más eficaz que el ejercicio y una dieta saludable, pero más arriesgada.\n' ,
'b) Una buena idea, pero no un sustituto del ejercicio y la dieta saludable.\n' ,
'c) Tan eficaz como el ejercicio y una dieta saludable y menos problemática.\n',
'd) No es necesaria si se hace ejercicio y se sigue una dieta sana.\n',
'b',
25], ['\n Según la hoja informativa, ¿cuál de estos empleados de la empresa debería contactar con Raquel?\n',
'a) Ramón, del almacén, que no quiere vacunarse porque prefiere confiar en su sistema inmunológico natural.\n' ,
'b) Julia, de ventas, que quiere saber si el programa de vacunación es obligatorio.\n' ,
'c) Alicia, de recepción, que querría vacunarse este invierno pero dará a luz dentro de dos meses.\n',
'd) Miguel, de contabilidad, al que le gustaría vacunarse pero tiene que salir de viaje la semana del 17 de mayo. \n',
'd',
25], ['\n Podemos hablar sobre el contenido de un escrito (lo que dice). Podemos hablar sobre su estilo (el modo en el que se presenta). Raquel quería que esta hoja informativa tuviera un estilo cordial y que animase a vacunarse. ¿Crees que lo consiguió?\n',
'a) No, no funciona. \n' ,
'b) Sí, el estilo es relajado e informal Utiliza sus propios términos (“relajado”, “informal”) para valorar uno de los aspectos mencionados en el enunciado de la pregunta.\n' ,
'c) No, porque una parte de la información no es correcta. Alicia, de recepción, que querría vacunarse este invierno pero dará a luz dentro de dos meses.\n',
'd) Sí, las ilustraciones animan a la vacunación y el estilo de la nota también es aceptable.\n',
'b',
25],['\n ¿Qué intenta demostrar el autor en este texto? \n',
'a) Que la calidad de muchas zapatillas deportivas ha mejorado mucho.\n' ,
'b) Que es mejor no jugar al fútbol si eres menor de 12 años.\n' ,
'c) Que los jóvenes sufren cada vez más lesiones debido a su baja forma física.\n',
'd) Que es muy importante para los deportistas jóvenes calzar unas buenas zapatillas deportivas.\n',
'd',
25],['\n Según el artículo, ¿por qué no deberían ser demasiado rígidas las zapatillas deportivas? \n',
'a) Impiden que puedas correr fácilmente.\n' ,
'b) Para evitar lesiones.\n' ,
'c) No pueden sujetar el pie.\n',
'd) Porque necesita apoyar el pie y el tobillo.\n',
'a',
25],['\n Fíjate en esta frase que está casi al final del artículo. Aquí se presenta en dos partes: “Para evitar molestias menores, pero dolorosas, como ampollas, grietas o “pie de atleta” (infección por hongos)” (primera parte). “el calzado debe permitir la evaporación del sudor y evitar que penetre la humedad exterior” (segunda parte). ¿Cuál es la relación entre la primera y la segunda parte de la frase? La segunda parte…\n',
'a) Contradice la primera parte.\n' ,
'b) Repite la primera parte.\n' ,
'c) Describe el problema planteado en la primera parte.\n',
'd) Describe la solución al problema planteado en la primera parte. \n',
'd',
25], ['\n Una parte del artículo afirma: “Un buen calzado deportivo debe cumplir cuatro requisitos.” ¿Cuáles son esos requisitos?\n',
'a) Hacer frente a las desigualdades del terreno.\n' ,
'b) Mantener el pie caliente y seco.\n' ,
'c) Tienen que proteger tu pie de los golpes.\n',
'd) Sujetar el pie.\n',
'c',
25],['\n ¿De qué trata el artículo? \n',
'a) De la mejor manera de cepillarse los dientes.\n' ,
'b) Del mejor tipo de cepillo de dientes a utilizar.\n' ,
'c) De la importancia de una buena dentadura.\n',
'd) De la manera en que las distintas personas se cepillan los dientes.\n',
'a',
25], ['\n ¿Qué recomiendan los investigadores británicos? \n',
'a) Cepillarse los dientes tanto como sea posible. \n' ,
'b) No intentar cepillarse la lengua. \n' ,
'c) No cepillarse los dientes demasiado fuerte.\n',
'd) De la manera en que las distintas personas se cepillan los dientes.\n',
'c',
25],['\n Según Bente Hansen, ¿por qué debes cepillarte la lengua? \n',
'a) Para que no te olvides. \n' ,
'b) Para eliminar las bacterias y por tanto evitar que tengas mal aliento.\n' ,
'c) Para quitar los restos de comida.\n',
'd) Para eliminar la placa dental.\n',
'b',
25], ['\n ¿Por qué se menciona un bolígrafo en el texto? \n',
'a) Para ayudarte a comprender cómo se sujeta un cepillo de dientes.\n' ,
'b) Porque comienzas por una esquina tanto con el bolígrafo como con el cepillo de dientes.\n' ,
'c) Para mostrarte que puedes cepillarte los dientes de muchas formas diferentes.\n',
'd) Porque debes tomarte el cepillo de dientes tan en serio como la escritura.\n',
'b',
25],['\n ¿Cuál es el nivel actual de profundidad del lago Chad?\n',
'a) Alrededor de los dos metros. \n' ,
'b) Alrededor de los quince metros.\n' ,
'c) Alrededor de los cincuenta metros\n',
'd) Ha desaparecido por completo.\n',
'a',
25],['\n ¿Cuál es la fecha de comienzo del gráfico en la figura 1? \n',
'a) 10.000 a.C. \n' ,
'b) 20.000 a.C. \n' ,
'c) 11.000 a.C.\n',
'd) 8.000 a.C. \n',
'c',
25],['\n La desaparición en el arte rupestre sahariano del rinoceronte, el hipopótamo y el uro ocurrió... \n',
'a) A principios de la última era glacial.\n' ,
'b) A mediados del período en el que el | |
= info[re.search("Lower Right\s*\(\s*\-*\d+\.*\d*,\s*",info).end(0) : re.search("Lower Right\s*\(\s*\-*\d+\.*\d*,\s*\-*\d+\.*\d*",info).end(0)];
ll_1_x = info[re.search("Lower Left\s*\(\s*",info).end(0) : re.search("Lower Left\s*\(\s*\-*\d+\.*\d*",info).end(0)];
ll_1_y = info[re.search("Lower Left\s*\(\s*\-*\d+\.*\d*,\s*",info).end(0) : re.search("Lower Left\s*\(\s*\-*\d+\.*\d*,\s*\-*\d+\.*\d*",info).end(0)];
cmd = "\ngdalinfo " + later_image_path + "\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
info = pipe.read();
pipe.close();
info = info.decode("utf-8")
ul_2_x = info[re.search("Upper Left\s*\(\s*",info).end(0) : re.search("Upper Left\s*\(\s*\-*\d+\.*\d*",info).end(0)];
ul_2_y = info[re.search("Upper Left\s*\(\s*\-*\d+\.*\d*,\s*",info).end(0) : re.search("Upper Left\s*\(\s*\-*\d+\.*\d*,\s*\-*\d+\.*\d*",info).end(0)];
ur_2_x = info[re.search("Upper Right\s*\(\s*",info).end(0) : re.search("Upper Right\s*\(\s*\-*\d+\.*\d*",info).end(0)];
ur_2_y = info[re.search("Upper Right\s*\(\s*\-*\d+\.*\d*,\s*",info).end(0) : re.search("Upper Right\s*\(\s*\-*\d+\.*\d*,\s*\-*\d+\.*\d*",info).end(0)];
lr_2_x = info[re.search("Lower Right\s*\(\s*",info).end(0) : re.search("Lower Right\s*\(\s*\-*\d+\.*\d*",info).end(0)];
lr_2_y = info[re.search("Lower Right\s*\(\s*\-*\d+\.*\d*,\s*",info).end(0) : re.search("Lower Right\s*\(\s*\-*\d+\.*\d*,\s*\-*\d+\.*\d*",info).end(0)];
ll_2_x = info[re.search("Lower Left\s*\(\s*",info).end(0) : re.search("Lower Left\s*\(\s*\-*\d+\.*\d*",info).end(0)];
ll_2_y = info[re.search("Lower Left\s*\(\s*\-*\d+\.*\d*,\s*",info).end(0) : re.search("Lower Left\s*\(\s*\-*\d+\.*\d*,\s*\-*\d+\.*\d*",info).end(0)];
ul_x = str(max([float(ul_1_x), float(ul_2_x)]));
ul_y = str(min([float(ul_1_y), float(ul_2_y)]));
lr_x = str(min([float(lr_1_x), float(lr_2_x)]));
lr_y = str(max([float(lr_1_y), float(lr_2_y)]));
early_cut_path = early_image_path[ : early_image_path.rfind(".")] + "_cut.img";
later_cut_path = later_image_path[ : later_image_path.rfind(".")] + "_cut.img";
if not os.path.exists(early_cut_path):
cmd = "\ngdal_translate -of ENVI -ot Float32 -projwin " + ul_x + " " + ul_y + " " + lr_x + " " + lr_y + " " + early_image_path + " " + early_cut_path + "\n";
subprocess.call(cmd, shell=True);
if not os.path.exists(later_cut_path):
cmd = "\ngdal_translate -of ENVI -ot Float32 -projwin " + ul_x + " " + ul_y + " " + lr_x + " " + lr_y + " " + later_image_path + " " + later_cut_path + "\n";
subprocess.call(cmd,shell=True);
if not os.path.exists(early_cut_path) or not os.path.exists(later_cut_path):
print("\n***** ERROR: \"gdal_translate\" to cut images to common region unsuccessful skipping \"" + pair_label + "...\n");
ampcor_label = "r" + REF_X + "x" + REF_Y + "_s" + SEARCH_X + "x" + SEARCH_Y;
if not os.path.exists(pair_path + "/ampcor_" + ampcor_label + "_1.in"):
print("\nRunning \"splitAmpcor.py\" to create \"ampcor\" input files...\n");
splitAmpcor(early_cut_path, later_cut_path, pair_path, PROCESSORS, RESOLUTION, REF_X, REF_Y, SEARCH_X, SEARCH_Y, STEP);
else:
print("***** \"" + pair_path + "/ampcor_" + ampcor_label + "_1.in\" already exists, assuming \"ampcor\" input files already made...\n");
if not os.path.exists(pair_path + "/ampcor_" + ampcor_label + "_1.off"):
amp_file = open(pair_path+"/run_amp.cmd", 'w')
amps_complete = open(pair_path+"/amps_complete.txt", 'w')
amps_complete.close()
for i in range(1, int(PROCESSORS) + 1):
amp_file.write("(ampcor " + pair_path + "/ampcor_" + ampcor_label + "_" + str(i) + ".in rdf > " + pair_path + "/ampcor_" + ampcor_label + "_" + str(i) + ".out; echo " + str(i) + " >> amps_complete.txt) &\n")
amp_file.close()
# Options for processing with gnu parallel or as backgrounded processes
if GNU_PARALLEL == "True":
print("\n\"ampcor\" running as " + PROCESSORS + " separate processes, this step may take several hours to complete...\n");
# For what ever reason, ampcor will not run unless it is executed from within the pair_path.
# This is a slopy fix of just hopping in and out of the pair_path to run ampcor
cmd = "cd "+pair_path+"\n";
if NODE_LIST == "None":
cmd += '''\nawk '{$NF=""; print $0}' run_amp.cmd | parallel --workdir $PWD\n''';
else:
print("Using node file "+NODE_LIST)
cmd += '''\nawk '{$NF=""; print $0}' run_amp.cmd | parallel --sshloginfile ''' + NODE_LIST + ''' --workdir $PWD\n''';
cmd += "cd ../\n";
subprocess.call(cmd, shell=True);
# If not using gnu parallel, this try block will gracefully exit the script and give instructions
# the next step
elif GNU_PARALLEL == "False":
cmd = "cd " + pair_path + "\n";
cmd += "bash run_amp.cmd\n";
cmd += "cd ../\n"
subprocess.call(cmd, shell=True);
print("\n\"ampcor\" running as " + PROCESSORS + " separate processes, this step may take several hours to complete...\n");
print("After all ampcor processes have completed, please rerun the landsatPX.py script.\n")
return
pair_done = True;
with open(pair_path+"/amps_complete.txt") as ac:
amps_comp_num = ac.readlines()
if len(amps_comp_num) < int(PROCESSORS):
print("\n***** It looks like not all ampcor processes have completed.")
print(" Skipping....")
return
#for i in range(1, int(PROCESSORS) + 1):
# infile = open(pair_path + "/ampcor_" + ampcor_label + "_" + str(i) + ".in", "r");
# for line in infile:
# if line.find("Start, End and Skip Lines in Reference Image") > -1:
# end_line = line.split("=")[1].split()[1];
# infile.close();
# last_line_processed = tail(pair_path + "/ampcor_" + ampcor_label + "_" + str(i) + ".off", 1).split()[2];
# Checks if the .off files have completed. This does not work correctly now, so I will write a different section
# soon.
#if (int(end_line) - int(last_line_processed)) > (int(REF_Y) + int(SEARCH_Y) + 2000):
# print("\n***** ERROR, last line processed in \"" + pair_path + "/ampcor_" + ampcor_label + "_" + str(i) + ".off\" is " \
# + last_line_processed + ", last line to be processes is " + end_line + ", the difference is greater than the \
# search window size in lines plus the reference window size in lines (" \
# + str(int(REF_Y) + int(SEARCH_Y)) + "), pair might still be processing, skipping...\n");
# pair_done = False;
#if not pair_done:
# return;
print("\n***** Offset files in \"" + pair_path + "\" appear to have finished processing, composing results...\n");
if os.path.exists(pair_path + "/ampcor_" + ampcor_label + ".off"):
print("\n***** \"" + pair_path + "/ampcor_" + ampcor_label + ".off\" already exists, assuming it contains all offsets for this run...\n");
else:
cat_cmd = "\ncat ";
for i in range(1, int(PROCESSORS) + 1):
cmd = "\nsed -i '/\*/d' " + pair_path + "/ampcor_" + ampcor_label + "_" + str(i) + ".off\n";
cat_cmd += pair_path + "/ampcor_" + ampcor_label + "_" + str(i) + ".off ";
subprocess.call(cmd, shell=True);
cat_cmd += "> " + pair_path + "/ampcor_" + ampcor_label + ".off\n";
subprocess.call(cat_cmd, shell=True);
ref_samples = "";
ref_lines = "";
infile = open(early_cut_path.replace(".img",".hdr"), "r");
for line in infile:
if line.lower().find("samples") > -1:
ref_samples = line.split("=")[1].strip();
if line.lower().find("lines") > -1:
ref_lines = line.split("=")[1].strip();
infile.close();
east_name = pair_label + "_" + ampcor_label + "_eastxyz";
north_name = pair_label + "_" + ampcor_label + "_northxyz";
mag_name = pair_label + "_" + ampcor_label + "_mag";
east_xyz_path = pair_path + "/" + east_name + ".txt";
north_xyz_path = pair_path + "/" + north_name + ".txt";
if not os.path.exists(east_xyz_path):
print("\n***** \"getxyzs.py\" running to create E-W and N-S ASCII files with offsets (in m) in the third column and SNR in the 4th column\n \
***** NOTE: E-W MAY BE FLIPPED DEPENDING ON HEMISPHERE, PLEASE CHECK MANUALLY...\n");
getxyzs(pair_path, ampcor_label, STEP, STEP, "1", RESOLUTION, ref_samples, ref_lines, ul_x, ul_y, pair_label);
else:
print("\n***** \"" + east_xyz_path + "\" already exists, assuming E-W and N-S ASCII offsets (in m) files created properly for this run...\n");
east_grd_path = pair_path + "/" + east_name + ".grd";
north_grd_path = pair_path + "/" + north_name + ".grd";
mag_grd_path = pair_path + "/" + mag_name + ".grd";
snr_grd_path = pair_path + "/" + north_name.replace("north", "snr") + ".grd";
R = "-R" + ul_x + "/" + lr_y + "/" + lr_x + "/" + ul_y + "r";
if not os.path.exists(east_grd_path):
print("\n***** Creating \"" + east_grd_path + "\" and \"" + north_grd_path + "\" using \"xyz2grd\"...\n");
early_datetime = datetime.datetime(int(early_date[0:4]), int(early_date[4:6]), int(early_date[6:8]), \
int(early_date[8:10]), int(early_date[10:12]), int(early_date[12:14]));
later_datetime = datetime.datetime(int(later_date[0:4]), int(later_date[4:6]), int(later_date[6:8]), \
int(later_date[8:10]), int(later_date[10:12]), int(later_date[12:14]));
day_interval = str((later_datetime - early_datetime).total_seconds() / (60. * 60. * 24.));
cmd = "\nxyz2grd " + east_xyz_path + " " + R + " -G" + east_grd_path + " -I" + str(int(STEP) * int(RESOLUTION)) + "=\n";
cmd += "\nxyz2grd " + north_xyz_path + " " + R + " -G" + north_grd_path + " -I" + str(int(STEP) * int(RESOLUTION)) + "=\n";
cmd += "\ngawk '{print $1\" \"$2\" \"$4}' " + north_xyz_path + " | xyz2grd " + R + " \
-G" + snr_grd_path + " -I" + str(int(STEP) * int(RESOLUTION)) + "=\n";
cmd += "\ngrdmath " + east_grd_path + " " + day_interval + " DIV --IO_NC4_CHUNK_SIZE=c = " + east_grd_path + "\n";
cmd += "\ngrdmath " + north_grd_path + " " + day_interval + " DIV --IO_NC4_CHUNK_SIZE=c = " + north_grd_path + "\n";
cmd += "\ngrdmath " + north_grd_path + " " + east_grd_path + " HYPOT --IO_NC4_CHUNK_SIZE=c = " + mag_grd_path + "\n";
subprocess.call(cmd, shell=True);
else:
print("\n***** \"" + east_grd_path + "\" already exists, assuming m/day velocity grids already made for this run...\n");
| |
import random
import time
import numpy as np
import gym
from gym import error
from gym import spaces
from gym_bridge_auction.envs.solver_results import *
from gym_bridge_auction.envs.dynamic_space import Dynamic
from gym_bridge_auction.envs.render import Window
class AuctionEnv(gym.Env):
"""Środowisko wieloagentowe (czterech graczy) symulujące licytację brydżową.
Jest to przykład środowiska, gdzie poszczególni agenci nie dysponują pełnym zestawem informacji na temat stanu gry.
Mają dostęp tylko do historii licytacji oraz własnych kart, a ręce przeciwników nie są znane.
Agenci w ustalonej kolejności zegarowej (rozpoczyna rozdający) wykonują pojedyńcze akcje (licytują) wybierane z
dostępnej przestrzeni. Działania graczy są wartościowane za pomocą nagrody oceniającej skuteczność licytacji.
W każdym kroku zwracana jest różnica od przypadku idealnego. Definiując funkcję nagrody wspomagano się dostępnymi
narzędziami, czyli Double Dummy Solver. Cel każdego z epizodów to ustalenie kontraktu, który stanowi zobowiązanie
do wzięcia określonej liczby lew przez parę wygrywającą licytację.
Przestrzeń akcji:
Typ: Dynamic(38) - przestrzeń dziedzicząca po Discrete
| Liczba | Działanie |
| 0 | pass |
| 1 | 7NT |
| 2 | 7S |
| 3 | 7H |
| 4 | 7D |
| 5 | 7C |
| . | . |
| . | . |
| 31 | 1NT |
| 32 | 1S |
| 33 | 1H |
| 34 | 1D |
| 35 | 1C |
| 36 | double |
| 37 | redouble |
Typ Dynamic to specjalnie zdefiniowana klasa, dziedzicząca po Discrete, zapewniająca zmieniającą się przestrzeń
akcji w kolejnych krokach licytacji. Zawiera ona w danym momencie tylko takie odzywki lub zapowiedzi
(kontra, rekontra, pas), które może zgłosić gracz podczas licytacji, według zasad brydża. W każdym etapie na
przestrzeń akcji składają się: odzywki wyższe w hierarchi od ostatniej zgłoszonej, zapowiedź pas oraz kontra
(dostępna dla przeciwników pary, która zgłosiła ostatnią odzywkę) i rekontra (dostępna dla pary z najwyższą
obecnie zgłoszoną odzywką po kontrze przeciwników). Na początku licytacji dostępne są wszystkie odzywki
i zapowiedź pas.
Przestrzeń obserwacji:
Typ: Dict - zawierający stany: 'whose turn', 'whose next turn', 'LAST_contract', 'Player_contract',
'winning_pair', 'double/redouble', 'Players hands'.
Stan 'whose turn' - oznacza który z graczy licytował w danym kroku:
Typ: Discrete(4)
| Liczba | Nazwa gracza |
| 0 | N |
| 1 | E |
| 2 | S |
| 3 | W |
Stan 'whose next turn' - oznacza gracza, który ma licytować następny w kolejności:
Typ: Discrete(4)
Oznaczenia liczb zgodne ze stanem 'whose turn'.
Stan 'LAST_contract' - oznacza najwyższy zgłoszony kontrakt po każdym z kroków, a po zakończeniu kontrakt
ostateczny:
Typ: Discrete(36)
Oznaczenia liczb są zgodne z tymi przyjętymi w przestrzeni akcji (oprócz wartości 36 i 37, które w tym
przypadku nie występują).
Stan 'Player_contract' - oznacza odzywkę/zapowiedź gracza licytującego w danym kroku:
Typ: Discrete(38)
Oznaczenia liczb są zgodne z tymi przyjętymi w przestrzeni akcji.
Stan 'winning_pair' - oznacza, która z par graczy ma w danym kroku najwyższy zgłoszony kontrakt
(aktualnie wygrywa licytację):
Typ: Discrete(2)
| Liczba | Nazwa pary |
| 0 | N/S |
| 1 | E/W |
Stan 'double/redouble' - oznacza czy wystąpiła kontra, rekontra lub żadne z nich:
Typ: Discrete(3)
| Liczba | Obserwacja |
| 0 | no double/redouble |
| 1 | double - 'X' |
| 2 | redouble - 'XX' |
Stan 'Players hands' - oznacza reprezentację rąk graczy w formie 0/1:
Typ: Tuple(MultiDiscrete, MultiDiscrete, MultiDiscrete, MultiDiscrete)
Kolejność rąk graczy jest następująca: N, E, S, W.
Reprezentacja ręki danego gracza jest w formie listy 52-elementowej. Każdy jej element to jedna z cyfr:
0 (nie posiada karty), 1 (posiada kartę).
Karty ustawione są od 2 do A kolejno kolorami trefl, karo, kier i na końcu pik:
[2♣, ..., A♣, 2♦, ..., A♦, 2♥, ..., A♥, 2♠, ..., A♠].
Nagroda:
W każdym kroku wyznaczona jest nagroda wartościująca działania agentów.
Postać: Lista 2-elementowa, gdzie elementy to liczby całkowite z zakresu od -8520 do 8520.
| Indeks | Nazwa pary |
| 0 | N/S |
| 1 | E/W |
Nagroda to różnica pomiędzy otrzymanym zapisem brydżowym pary wygrywającej licytację a optymalną dla niej
wartością punktową (wynik z Double Dummy Solver), gdy wszyscy gracze licytują idealnie. Zapis brydżowy jest
wyznaczony na podstawie zgłoszonego kontraktu i rezultatów z Double Dummy Solver dotyczących realizowalności
obowiązującego zobowiązania, co do ilości lew jakie może wziąć dana para przy ustalonym kolorze atutowym.
Nagroda dla pary, która przegrywa licytację jest wartością przeciwną nagrody pary wygrywającej.
Stan początkowy środowiska:
Po zresetowaniu środowiska do stanu początkowego ustalone zostają następujące stany przestrzeni obserwacji:
- 'Players hands' - reprezentacja rąk graczy w formie 0/1 jest dostępna tylko po użyciu funkcji reset(),
podczas kolejnych kroków epizodu przestaje być dostępna (należy ją od razu przypisać do innej zmiennej)
- ’double/redouble’ - gdzie wstawiono liczbę 0, oznaczającą, że nie wystąpiła jeszcze kontra lub rekontra
- 'whose next turn' - indeks rozdającego, który rozpoczyna licytację
Pozostałe stany przestrzeni obserwacji pozostają niedefiniowane (wartość None).
Przestrzeń akcji zostaje przywrócona do początkowej ilości elementów - wszystkie odzywki plus zapowiedź pas
(brak możliwości kontry lub rekontry).
Koniec epizodu (licytacji):
Licytacja, czyli jeden epizod kończy się w następujących przypadkach:
- wystąpienie kolejno trzech pasów po ustalonym kontrakcie,
- nie ustalono kontraktu - na początku licytacji wszyscy gracze spasowali,
- ostateczny kontrakt to 7NT, a po tym nastąpiła kontra i rekontra."""
metadata = {'render.modes': ['human', 'console'], 'video.frames_per_second': 0.5}
def __init__(self):
self._win = None # instancja interfejsu graficznego
self._n_players = 4 # liczba graczy
self._deck = Deck() # utworzenie talii
self._dealer_name = '' # nazwa gracza, który jest rozdającym
self._players_order = [] # lista graczy ustawionych w odpowiedniej kolejności licytowania
self._index_order = None # indeks aktualnie licytującego gracza (według listy self._players_order)
self._deck.shuffle() # tasowanie talii
hands = self._deck.deal(self._n_players) # rozdanie kart dla graczy
self._players = [Player(NAMES[i], hands[i]) for i in range(0, self._n_players)] # utworzenie listy graczy
# rozdzielenie rąk graczy ze względu na kolor karty (w każdym wierszu figury/numery w danym kolorze)
for j in range(0, len(self._players)):
self._players[j].split_hand()
for i in range(0, 4):
self._players[j].hand_splitted[i] = self._players[j].hand_to_display(self._players[j].hand_splitted[i])
self._choose_dealer_and_order() # wybór rozdającego i ustalenie kolejności licytacji
# utworzenie dostępnych kontraktów (lista obiektów typu Contract)
self._available_contracts = create_available_contracts()
self._optimum_contract_score = [None, None] # optymalne punkty dla par według solvera
self._insert_solver_results() # wstawienie wyników z solvera dla poszczególnych graczy
self._viewer = None # zmienna pomocnicza do renderowania
self._last_contract = None # najwyższy zgłoszony kontrakt w danym momencie licytacji
self._first_bind_pass = False # czy pierwsza odzywka była pasem
self._double = False # czy była kontra
self._redouble = False # czy była rekontra
self._pass_number = 0 # licznik zgłoszonych kolejno pasów
self._score = [0, 0] # zapis dla par w danym momencie licytacji
# maksymalna odzywka według solvera dla gracza zgłaszającego najwyższy kontrakt w danym momencie licytacji
self._max_contract = None
# maksymalna liczba lew według solvera, jaką może wziąć gracz zgłaszający najwyższy kontrakt w danym momencie
# licytacji
self._max_number_of_tricks = None
self._reward = [None, None] # nagroda dla par
self.reward_range = (-8520, 8520) # zakres wartości nagrody
# przestrzeń obserwacji
self.observation_space = spaces.Dict({'whose turn': spaces.Discrete(self._n_players),
'whose next turn': spaces.Discrete(self._n_players),
'LAST_contract': spaces.Discrete(36),
'Player_contract': spaces.Discrete(38),
'winning_pair': spaces.Discrete(self._n_players / 2),
'double/redouble': spaces.Discrete(3),
'Players hands': spaces.Tuple(
[spaces.MultiDiscrete([2 for _ in range(0, len(self._deck.deck))])
for _ in range(0, self._n_players)])})
self.action_space = Dynamic(38) # przestrzeń dostępnych działań agenta
self._info = {} # dodatkowe informacje na temat środowiska nie dostępne dla agenta
self.reset()
def step(self, action):
"""Przesuwa licytację o krok do gracza następnego w kolejności oraz wyznacza dostępną dla niego przestrzeń
akcji.
Parametr:
action (int) - działanie agenta (zgłoszona odzywka z dostępnej przestrzeni akcji), który zgodnie z ustaloną
kolejnością powinien licytować
Zwraca:
state, reward, done, info : tuple
state (dict) - stan środowiska po wykonaniu działania przez | |
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = ["Matrix"]
import os
from typing import Dict, Optional, Sequence, Tuple, Union, TYPE_CHECKING, Iterable
import numpy as np
from py4j.java_gateway import JavaObject, JVMView
from systemds.operator import OperationNode, Scalar
from systemds.utils.consts import VALID_INPUT_TYPES
from systemds.utils.converters import numpy_to_matrix_block, matrix_block_to_numpy
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES, BINARY_OPERATIONS, VALID_ARITHMETIC_TYPES
class Matrix(OperationNode):
_np_array: np.array
def __init__(self, sds_context: 'SystemDSContext', operation: str,
unnamed_input_nodes: Union[str,
Iterable[VALID_INPUT_TYPES]] = None,
named_input_nodes: Dict[str, VALID_INPUT_TYPES] = None,
local_data: np.array = None) -> 'Matrix':
is_python_local_data = False
if local_data is not None:
self._np_array = local_data
is_python_local_data = True
else:
self._np_array = None
super().__init__(sds_context, operation, unnamed_input_nodes,
named_input_nodes, OutputType.MATRIX, is_python_local_data)
def pass_python_data_to_prepared_script(self, sds, var_name: str, prepared_script: JavaObject) -> None:
assert self.is_python_local_data, 'Can only pass data to prepared script if it is python local!'
if self._is_numpy():
prepared_script.setMatrix(var_name, numpy_to_matrix_block(
sds, self._np_array), True) # True for reuse
def code_line(self, var_name: str, unnamed_input_vars: Sequence[str],
named_input_vars: Dict[str, str]) -> str:
code_line = super().code_line(var_name, unnamed_input_vars, named_input_vars)
if self._is_numpy():
code_line = code_line.format(file_name=var_name)
return code_line
def compute(self, verbose: bool = False, lineage: bool = False) -> Union[np.array]:
if self._is_numpy():
if verbose:
print('[Numpy Array - No Compilation necessary]')
return self._np_array
else:
return super().compute(verbose, lineage)
def _parse_output_result_variables(self, result_variables):
return matrix_block_to_numpy(self.sds_context.java_gateway.jvm,
result_variables.getMatrixBlock(self._script.out_var_name[0]))
def _is_numpy(self) -> bool:
return self._np_array is not None
def __add__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '+', [self, other])
# Left hand side
def __radd__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '+', [other, self])
def __sub__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '-', [self, other])
# Left hand side
def __rsub__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '-', [other, self])
def __mul__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '*', [self, other])
def __rmul__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '*', [other, self])
def __truediv__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '/', [self, other])
def __rtruediv__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '/', [other, self])
def __floordiv__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '//', [self, other])
def __rfloordiv__(self, other: VALID_ARITHMETIC_TYPES) -> 'Matrix':
return Matrix(self.sds_context, '//', [other, self])
def __lt__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '<', [self, other])
def __rlt__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '<', [other, self])
def __le__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '<=', [self, other])
def __rle__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '<=', [other, self])
def __gt__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '>', [self, other])
def __rgt__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '>', [other, self])
def __ge__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '>=', [self, other])
def __rge__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '>=', [other, self])
def __eq__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '==', [self, other])
def __req__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '==', [other, self])
def __ne__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '!=', [self, other])
def __rne__(self, other) -> 'Matrix':
return Matrix(self.sds_context, '!=', [other, self])
def __matmul__(self, other: 'Matrix') -> 'Matrix':
return Matrix(self.sds_context, '%*%', [self, other])
def sum(self, axis: int = None) -> 'OperationNode':
"""Calculate sum of matrix.
:param axis: can be 0 or 1 to do either row or column sums
:return: `Matrix` representing operation
"""
if axis == 0:
return Matrix(self.sds_context, 'colSums', [self])
elif axis == 1:
return Matrix(self.sds_context, 'rowSums', [self])
elif axis is None:
return Scalar(self.sds_context, 'sum', [self])
raise ValueError(
f"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}")
def mean(self, axis: int = None) -> 'OperationNode':
"""Calculate mean of matrix.
:param axis: can be 0 or 1 to do either row or column means
:return: `Matrix` representing operation
"""
if axis == 0:
return Matrix(self.sds_context, 'colMeans', [self])
elif axis == 1:
return Matrix(self.sds_context, 'rowMeans', [self])
elif axis is None:
return Scalar(self.sds_context, 'mean', [self])
raise ValueError(
f"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}")
def var(self, axis: int = None) -> 'OperationNode':
"""Calculate variance of matrix.
:param axis: can be 0 or 1 to do either row or column vars
:return: `Matrix` representing operation
"""
if axis == 0:
return Matrix(self.sds_context, 'colVars', [self])
elif axis == 1:
return Matrix(self.sds_context, 'rowVars', [self])
elif axis is None:
return Scalar(self.sds_context, 'var', [self])
raise ValueError(
f"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}")
def abs(self) -> 'Matrix':
"""Calculate absolute.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'abs', [self])
def sin(self) -> 'Matrix':
"""Calculate sin.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'sin', [self])
def cos(self) -> 'Matrix':
"""Calculate cos.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'cos', [self])
def tan(self) -> 'Matrix':
"""Calculate tan.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'tan', [self])
def asin(self) -> 'Matrix':
"""Calculate arcsin.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'asin', [self])
def acos(self) -> 'Matrix':
"""Calculate arccos.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'acos', [self])
def atan(self) -> 'Matrix':
"""Calculate arctan.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'atan', [self])
def sinh(self) -> 'Matrix':
"""Calculate sin.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'sinh', [self])
def cosh(self) -> 'Matrix':
"""Calculate cos.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'cosh', [self])
def tanh(self) -> 'Matrix':
"""Calculate tan.
:return: `Matrix` representing operation
"""
return Matrix(self.sds_context, 'tanh', [self])
def moment(self, moment: int, weights: OperationNode = None) -> 'Matrix':
unnamed_inputs = [self]
if weights is not None:
unnamed_inputs.append(weights)
unnamed_inputs.append(moment)
return Matrix(self.sds_context, 'moment', unnamed_inputs, output_type=OutputType.DOUBLE)
def cholesky(self, safe: bool = False) -> 'Matrix':
""" Computes the Cholesky decomposition of a symmetric, positive definite matrix
:param safe: default value is False, if flag is True additional checks to ensure
that the matrix is symmetric positive definite are applied, if False, checks will be skipped
:return: the OperationNode representing this operation
"""
return Matrix(self.sds_context, 'cholesky', [self])
def to_one_hot(self, num_classes: int) -> 'Matrix':
""" OneHot encode the matrix.
It is assumed that there is only one column to encode, and all values are whole numbers > 0
:param num_classes: The number of classes to encode into. max value contained in the matrix must be <= num_classes
:return: The OperationNode containing the oneHotEncoded values
"""
if num_classes < 2:
raise ValueError("Number of classes should be larger than 1")
named_input_nodes = {"X": self, "numClasses": num_classes}
return Matrix(self.sds_context, 'toOneHot', named_input_nodes=named_input_nodes)
def rbind(self, other) -> 'Matrix':
"""
Row-wise matrix concatenation, by concatenating the second matrix as additional rows to the first matrix.
:param: The other matrix to bind to the right hand side
:return: The OperationNode containing the concatenated matrices/frames.
"""
return Matrix(self.sds_context, "rbind", [self, other])
def cbind(self, other) -> 'Matrix':
"""
Column-wise matrix concatenation, by concatenating the second matrix as additional columns to the first matrix.
:param: The other matrix to bind to the right hand side.
:return: The OperationNode containing the concatenated matrices/frames.
"""
return Matrix(self.sds_context, "cbind", [self, other])
def t(self) -> 'Matrix':
""" Transposes the input
:return: the OperationNode representing this operation
"""
return Matrix(self.sds_context, 't', [self])
def order(self, by: int = 1, decreasing: bool = False,
index_return: bool = False) -> 'Matrix':
""" Sort by a column of the matrix X in increasing/decreasing order and returns either the index or data
:param by: sort matrix by this column number
:param decreasing: If true the matrix will be sorted in decreasing order
:param index_return: If true, the index numbers will be returned
:return: the OperationNode representing this operation
"""
named_input_nodes = {'target': self, 'by': by, 'decreasing': str(decreasing).upper(),
'index.return': str(index_return).upper()}
return Matrix(self.sds_context, 'order', [], named_input_nodes=named_input_nodes)
def | |
#!/usr/bin/env/python
"""
Usage:
CCGVAE.py [options]
Options:
-h --help Show this screen
--dataset NAME Dataset name: ZINC or QM9
--config-file FILE Hyperparameter configuration file path (in JSON format)
--config CONFIG Hyperparameter configuration dictionary (in JSON format)
--data_dir NAME Data dir name
--restore FILE File to restore weights from.
--freeze-graph-model Freeze weights of graph model components
--restrict_data NAME [0,1] Load only a subset of the entire dataset
"""
import os
import sys
import time
import traceback
import tensorflow as tf
from docopt import docopt
from rdkit import Chem
from rdkit.Chem import QED
from model.GGNN_core import ChemModel
from model.data_augmentation import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' # 0, 1, 2, 3
"""
Comments provide the expected tensor shapes where helpful.
Key to symbols in comments:
---------------------------
[...]: a tensor
; ; : a list
b: batch size
e: number of edege types (3)
es: maximum number of BFS transitions in this batch
v: number of vertices per graph in this batch
h: GNN hidden size
"""
class CCGVAE(ChemModel):
def __init__(self, args):
super().__init__(args)
@classmethod
def default_params(cls):
params = dict(super().default_params())
params.update({
'suffix': None,
'log_dir': './results',
'task_sample_ratios': {},
'use_edge_bias': True, # whether use edge bias in gnn
'clamp_gradient_norm': 1.0,
'out_layer_dropout_keep_prob': 1.0,
'tie_fwd_bkwd': True,
'task_ids': [0], # id of property prediction
'random_seed': 0, # fixed for reproducibility
'batch_size': 13 if dataset == 'zinc' else 150, # qm9 128->8431Mb 150->14403
# zinc 8->8431Mb 13->14401
"qed_trade_off_lambda": 10, # originale 10
'prior_learning_rate': 0.05,
'stop_criterion': 0.01,
'num_epochs': 1000 if dataset == 'zinc' else 1000,
'num_teacher_forcing': 1000 if dataset == 'zinc' else 1000,
'number_of_generation': 20000,
'optimization_step': 0,
'maximum_distance': 50,
"use_argmax_nodes": False, # use random sampling or argmax during node sampling
"use_argmax_bonds": False, # use random sampling or argmax during bonds generations
'use_mask': False, # true to use node mask
'residual_connection_on': True, # whether residual connection is on
'residual_connections': {
2: [0],
4: [0, 2],
6: [0, 2, 4],
8: [0, 2, 4, 6],
10: [0, 2, 4, 6, 8],
12: [0, 2, 4, 6, 8, 10],
14: [0, 2, 4, 6, 8, 10, 12],
16: [0, 2, 4, 6, 8, 10, 12, 14],
18: [0, 2, 4, 6, 8, 10, 12, 14, 16],
20: [0, 2, 4, 6, 8, 10, 12, 14, 16, 18],
},
'num_timesteps': 12, # gnn propagation step
'hidden_size_decoder': 200, # decoder hidden size dimension
'hidden_size_encoder': 100, # encoder hidden size dimension
"kl_trade_off_lambda": 0.05, # kl tradeoff originale 0.3
'learning_rate': 0.001,
'graph_state_dropout_keep_prob': 1,
"compensate_num": 1, # how many atoms to be added during generation
'train_file': 'data/molecules_train_%s.json' % dataset,
'valid_file': 'data/molecules_valid_%s.json' % dataset,
'test_file': 'data/molecules_test_%s.json' % dataset,
'try_different_starting': True,
"num_different_starting": 6,
'generation': 0, # 0 = do training, 1 = do only gen, 2 = do only rec
'reconstruction_en': 20, # number of encoding in reconstruction
'reconstruction_dn': 1, # number of decoding in reconstruction
'use_graph': True, # use gnn
'gin_epsilon': 0, # gin epsilon
"label_one_hot": False, # one hot label or not
"multi_bfs_path": False, # whether sample several BFS paths for each molecule
"bfs_path_count": 30,
"path_random_order": False, # False: canonical order, True: random order
"sample_transition": False, # whether use transition sampling
'edge_weight_dropout_keep_prob': 1,
'check_overlap_edge': False,
"truncate_distance": 10,
"use_gpu": True,
"use_rec_multi_threads": True,
})
return params
def prepare_specific_graph_model(self) -> None:
# params
num_symbols = self.params['num_symbols']
h_dim_en = self.params['hidden_size_encoder']
h_dim_de = self.params['hidden_size_decoder']
expanded_h_dim = h_dim_de + h_dim_en + 1 # 1 for focus bit
hist_dim = self.histograms['hist_dim']
self.placeholders['graph_state_keep_prob'] = tf.placeholder(tf.float32, None, name='graph_state_keep_prob')
self.placeholders['edge_weight_dropout_keep_prob'] = tf.placeholder(tf.float32, None,
name='edge_weight_dropout_keep_prob')
# mask out invalid node
self.placeholders['node_mask'] = tf.placeholder(tf.float32, [None, None], name='node_mask') # [b x v]
self.placeholders['num_vertices'] = tf.placeholder(tf.int32, (), name="num_vertices")
# adj for encoder
self.placeholders['adjacency_matrix'] = tf.placeholder(tf.float32, [None, self.num_edge_types, None, None],
name="adjacency_matrix") # [b, e, v, v]
# labels for node symbol prediction
self.placeholders['node_symbols'] = tf.placeholder(tf.float32, [None, None, self.params[
'num_symbols']]) # [b, v, edge_type]
# mask out cross entropies in decoder
self.placeholders['iteration_mask'] = tf.placeholder(tf.float32, [None, None]) # [b, es]
# adj matrices used in decoder
self.placeholders['incre_adj_mat'] = tf.placeholder(tf.float32, [None, None, self.num_edge_types, None, None],
name='incre_adj_mat') # [b, es, e, v, v]
# distance
self.placeholders['distance_to_others'] = tf.placeholder(tf.int32, [None, None, None],
name='distance_to_others') # [b, es,v]
# maximum iteration number of this batch
self.placeholders['max_iteration_num'] = tf.placeholder(tf.int32, [], name='max_iteration_num') # number
# node number in focus at each iteration step
self.placeholders['node_sequence'] = tf.placeholder(tf.float32, [None, None, None],
name='node_sequence') # [b, es, v]
# mask out invalid edge types at each iteration step
self.placeholders['edge_type_masks'] = tf.placeholder(tf.float32, [None, None, self.num_edge_types, None],
name='edge_type_masks') # [b, es, e, v]
# ground truth edge type labels at each iteration step
self.placeholders['edge_type_labels'] = tf.placeholder(tf.float32, [None, None, self.num_edge_types, None],
name='edge_type_labels') # [b, es, e, v]
# mask out invalid edge at each iteration step
self.placeholders['edge_masks'] = tf.placeholder(tf.float32, [None, None, None],
name='edge_masks') # [b, es, v]
# ground truth edge labels at each iteration step
self.placeholders['edge_labels'] = tf.placeholder(tf.float32, [None, None, None],
name='edge_labels') # [b, es, v]
# ground truth labels for whether it stops at each iteration step
self.placeholders['local_stop'] = tf.placeholder(tf.float32, [None, None], name='local_stop') # [b, es]
# z_prior sampled from standard normal distribution
self.placeholders['z_prior'] = tf.placeholder(tf.float32, [None, None, h_dim_en],
name='z_prior') # the prior of z sampled from normal distribution
# put in front of kl latent loss
self.placeholders['kl_trade_off_lambda'] = tf.placeholder(tf.float32, [], name='kl_trade_off_lambda') # number
# overlapped edge features
self.placeholders['overlapped_edge_features'] = tf.placeholder(tf.int32, [None, None, None],
name='overlapped_edge_features') # [b, es, v]
# weights for encoder and decoder GNN.
if self.params['use_graph']:
if self.params["residual_connection_on"]:
# weights for encoder and decoder GNN. Different weights for each iteration
for scope in ['_encoder', '_decoder']:
if scope == '_encoder':
new_h_dim = h_dim_en
else:
new_h_dim = expanded_h_dim
# For each GNN iteration
for iter_idx in range(self.params['num_timesteps']):
with tf.variable_scope("gru_scope" + scope + str(iter_idx), reuse=False):
self.weights['edge_weights' + scope + str(iter_idx)] = tf.Variable(
glorot_init([self.num_edge_types, new_h_dim, new_h_dim]))
if self.params['use_edge_bias']:
self.weights['edge_biases' + scope + str(iter_idx)] = tf.Variable(
np.zeros([self.num_edge_types, 1, new_h_dim]).astype(np.float32))
cell = tf.contrib.rnn.GRUCell(new_h_dim)
cell = tf.nn.rnn_cell.DropoutWrapper(cell, state_keep_prob=self.placeholders[
'graph_state_keep_prob'])
self.weights['node_gru' + scope + str(iter_idx)] = cell
else:
for scope in ['_encoder', '_decoder']:
if scope == '_encoder':
new_h_dim = h_dim_en
else:
new_h_dim = expanded_h_dim
self.weights['edge_weights' + scope] = tf.Variable(
glorot_init([self.num_edge_types, new_h_dim, new_h_dim]))
if self.params['use_edge_bias']:
self.weights['edge_biases' + scope] = tf.Variable(
np.zeros([self.num_edge_types, 1, new_h_dim]).astype(np.float32))
with tf.variable_scope("gru_scope" + scope):
cell = tf.contrib.rnn.GRUCell(new_h_dim)
cell = tf.nn.rnn_cell.DropoutWrapper(cell,
state_keep_prob=self.placeholders['graph_state_keep_prob'])
self.weights['node_gru' + scope] = cell
# Weights final part encoder. They map all nodes in one point in the latent space
self.weights['mean_weights'] = tf.Variable(glorot_init([h_dim_en, h_dim_en]), name="mean_weights")
self.weights['mean_biases'] = tf.Variable(np.zeros([1, h_dim_en]).astype(np.float32), name="mean_biases")
self.weights['variance_weights'] = tf.Variable(glorot_init([h_dim_en, h_dim_en]), name="variance_weights")
self.weights['variance_biases'] = tf.Variable(np.zeros([1, h_dim_en]).astype(np.float32),
name="variance_biases")
# histograms for the first part of the decoder
self.placeholders['histograms'] = tf.placeholder(tf.int32, (None, hist_dim), name="histograms")
self.placeholders['n_histograms'] = tf.placeholder(tf.int32, (None), name="n_histograms")
self.placeholders['hist'] = tf.placeholder(tf.int32, (None, hist_dim), name="hist")
self.weights['latent_space_dec0'] = tf.Variable(glorot_init([h_dim_en + 2 * hist_dim, h_dim_en]))
self.weights['latent_space_bias_dec0'] = tf.Variable(np.zeros([1, h_dim_en]).astype(np.float32))
# The weights for generating nodes symbol logits
self.weights['node_symbol_weights'] = tf.Variable(glorot_init([h_dim_de, self.params['num_symbols']]))
self.weights['node_symbol_biases'] = tf.Variable(np.zeros([1, self.params['num_symbols']]).astype(np.float32))
feature_dimension = 6 * expanded_h_dim
# record the total number of features
self.params["feature_dimension"] = 6
# weights for generating edge type logits
for i in range(self.num_edge_types):
self.weights['edge_type_%d' % i] = tf.Variable(glorot_init([feature_dimension, feature_dimension]))
self.weights['edge_type_biases_%d' % i] = tf.Variable(np.zeros([1, feature_dimension]).astype(np.float32))
self.weights['edge_type_output_%d' % i] = tf.Variable(glorot_init([feature_dimension, 1]))
# weights for generating edge logits
self.weights['edge_iteration'] = tf.Variable(glorot_init([feature_dimension, feature_dimension]))
self.weights['edge_iteration_biases'] = tf.Variable(np.zeros([1, feature_dimension]).astype(np.float32))
self.weights['edge_iteration_output'] = tf.Variable(glorot_init([feature_dimension, 1]))
# Weights for the stop node
self.weights["stop_node"] = tf.Variable(glorot_init([1, expanded_h_dim]))
# Weight for distance embedding
self.weights['distance_embedding'] = tf.Variable(glorot_init([self.params['maximum_distance'], expanded_h_dim]))
# Weight for overlapped edge feature
self.weights["overlapped_edge_weight"] = tf.Variable(glorot_init([2, expanded_h_dim]))
# weights for linear projection on qed prediction input
self.weights['qed_weights'] = tf.Variable(glorot_init([h_dim_en, h_dim_en]))
self.weights['qed_biases'] = tf.Variable(np.zeros([1, h_dim_en]).astype(np.float32))
# use node embeddings
self.weights["node_embedding"] = tf.Variable(glorot_init([self.params["num_symbols"], h_dim_en]))
# graph state mask
self.ops['graph_state_mask'] = tf.expand_dims(self.placeholders['node_mask'], 2)
# transform one hot vector to dense embedding vectors
def get_node_embedding_state(self, one_hot_state):
node_nums = tf.argmax(one_hot_state, axis=2)
return tf.nn.embedding_lookup(self.weights["node_embedding"], node_nums) * self.ops['graph_state_mask']
def compute_final_node_representations_with_residual(self, h, adj, scope_name): # scope_name: _encoder or _decoder
# h: initial representation, adj: adjacency matrix, different GNN parameters for encoder and decoder
v = self.placeholders['num_vertices']
# _decoder uses a larger latent space because concat of symbol and latent representation
if scope_name == "_decoder":
h_dim = self.params['hidden_size_encoder'] + self.params['hidden_size_decoder'] + 1
else:
h_dim = self.params['hidden_size_encoder']
h = tf.reshape(h, [-1, h_dim]) # [b*v, h]
# record all hidden states at each iteration
all_hidden_states = [h]
for iter_idx | |
'''Body Composition is a Slicer module that allows to segment different parts of the lungs in a manual or semi-automatic basis
with the help of a customized Slicer Editor.
It also performs a set of operations to analyze the different structures of
the volume based on its label map, like Area, Mean, Std.Dev., etc.
First version: <NAME> (ACIL, <EMAIL>). 11/2014'''
import qt, vtk, ctk, slicer
import numpy as np
from slicer.ScriptedLoadableModule import *
from CIP.logic.SlicerUtil import SlicerUtil
import CIP.ui as CIPUI
class CIP_Calibration(ScriptedLoadableModule):
"""Module that allows to segment different parts of the lungs in a manual or semi-automatic basis"""
def __init__(self, parent):
"""Constructor for main class"""
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Calibration"
self.parent.categories = SlicerUtil.CIP_ModulesCategory
self.parent.dependencies = [SlicerUtil.CIP_ModuleName]
self.parent.contributors = ["<NAME> (<EMAIL>)", "Applied Chest Imaging Laboratory", "Brigham and Women's Hospital"]
self.parent.helpText = "Calibrate a scan with air and blood"
self.parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText
######################################
# CIP_StructuresDetectionWidget
#######################################
class CIP_CalibrationWidget(ScriptedLoadableModuleWidget):
"""GUI object"""
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
# from functools import partial
# def __onNodeAddedObserver__(self, caller, eventId, callData):
# """Node added to the Slicer scene"""
# # if callData.GetClassName() == 'vtkMRMLScalarVolumeNode' \
# # and slicer.util.mainWindow().moduleSelector().selectedModule == self.moduleName:
# # self.__onNewVolumeLoaded__(callData)
# #if callData.GetClassName() == 'vtkMRMLLabelMapVolumeNode':
# self._onNewLabelmapLoaded_(callData)
#
#
# self.__onNodeAddedObserver__ = partial(__onNodeAddedObserver__, self)
# self.__onNodeAddedObserver__.CallDataType = vtk.VTK_OBJECT
self.firstLoad = True
self.activeEditorTools = None
self.pendingChangesIdsList = []
@property
def labelmapNodeNameExtension(self):
return "calibrationLabelMap"
################
# Main methods
################
def setup(self):
"""Init the widget """
# self.firstLoad = True
ScriptedLoadableModuleWidget.setup(self)
self.disableEvents = False
# Create objects that can be used anywhere in the module. Example: in most cases there should be just one
# object of the logic class
self._initLogic_()
##########
# Main area
self.mainAreaCollapsibleButton = ctk.ctkCollapsibleButton()
self.mainAreaCollapsibleButton.text = "Main area"
self.layout.addWidget(self.mainAreaCollapsibleButton, SlicerUtil.ALIGNMENT_VERTICAL_TOP)
# self.layout.addWidget(self.mainAreaCollapsibleButton)
# self.mainLayout = qt.QGridLayout(self.mainAreaCollapsibleButton)
self.mainLayout = qt.QFormLayout(self.mainAreaCollapsibleButton)
row = 0
# Node selector
volumeLabel = qt.QLabel("Active volume: ")
volumeLabel.setStyleSheet("margin-left:5px")
# self.mainLayout.addWidget(volumeLabel, row, 0)
self.volumeSelector = slicer.qMRMLNodeComboBox()
self.volumeSelector.nodeTypes = ("vtkMRMLScalarVolumeNode", "")
self.volumeSelector.selectNodeUponCreation = True
self.volumeSelector.autoFillBackground = True
self.volumeSelector.addEnabled = False
self.volumeSelector.noneEnabled = False
self.volumeSelector.removeEnabled = False
self.volumeSelector.showHidden = False
self.volumeSelector.showChildNodeTypes = False
self.volumeSelector.setMRMLScene(slicer.mrmlScene)
self.volumeSelector.setMinimumWidth(150)
# self.volumeSelector.setStyleSheet("margin: 15px 0")
# self.volumeSelector.selectNodeUponCreation = False
#self.mainLayout.addWidget(self.volumeSelector, row, 1)
self.mainLayout.addRow(volumeLabel, self.volumeSelector)
self.volumeSelector.connect('currentNodeChanged(vtkMRMLNode*)', self._onMainVolumeChanged_)
row += 1
lb = qt.QLabel("Click to select the calibration type and, if needed, modify the HU value expected for that area")
lb.setStyleSheet("margin:10px 0 10px 5px")
self.mainLayout.addRow(lb)
#self.mainLayout.addWidget(lb, row, 0, 1, 2)
self.typeRadioButtonGroup = qt.QButtonGroup()
self.typeRadioButtonGroup.connect("buttonClicked (QAbstractButton*)", self.__onTypeRadioButtonClicked__)
row += 1
self.rbAir = qt.QRadioButton("Air")
self.rbAir.setStyleSheet("margin-left:10px; margin-top: 5px")
self.typeRadioButtonGroup.addButton(self.rbAir, 1)
# self.mainLayout.addWidget(self.rbAir, row, 0)
self.txtAir = qt.QLineEdit()
self.txtAir.setText("-1000")
self.txtAir.setFixedWidth(80)
self.txtAir.setValidator(qt.QIntValidator())
self.mainLayout.addRow(self.rbAir, self.txtAir)
row += 1
self.rbBlood = qt.QRadioButton("Blood")
self.rbBlood.setStyleSheet("margin-left:10px; margin-top: 5px")
self.typeRadioButtonGroup.addButton(self.rbBlood, 2)
# self.mainLayout.addWidget(self.rbBlood, row, 0)
self.txtBlood = qt.QLineEdit()
self.txtBlood.setText("50")
self.txtBlood.setFixedWidth(80)
self.txtBlood.setValidator(qt.QIntValidator())
# self.mainLayout.addWidget(self.txtBlood, row, 1)
self.mainLayout.addRow(self.rbBlood, self.txtBlood)
row += 1
# Calibrate button
self.calibrateButton = ctk.ctkPushButton()
self.calibrateButton.setText("Calibrate")
self.calibrateButton.toolTip = "Run the calibration"
self.calibrateButton.setIcon(qt.QIcon("{0}/scale.png".format(SlicerUtil.CIP_ICON_DIR)))
self.calibrateButton.setIconSize(qt.QSize(20, 20))
self.calibrateButton.setFixedWidth(135)
self.mainLayout.addRow(None, self.calibrateButton)
self.calibrateButton.connect('clicked()', self._onCalibrateButtonClicked_)
self._createEditorWidget_()
self.setEditorValues()
@property
def currentVolumeLoaded(self):
return self.volumeSelector.currentNode()
@property
def colorNode(self):
nodeName = "{}_colorNode".format(self.moduleName)
colorTableNode = SlicerUtil.getNode(nodeName)
if colorTableNode is None:
colorTableNode = self.logic.createColormapNode(nodeName)
return colorTableNode
def _initLogic_(self):
"""Create a new logic object for the plugin"""
self.logic = CIP_CalibrationLogic()
def checkMasterAndLabelMapNodes(self):
"""Set an appropiate MasterNode LabelMapNode to the Editor.
The options are:
- There is no masterNode => try to load the one that the user is watching right now, and go on if so.
- There is masterNode and there is no label map => create a default label map node with the name "MasterNodeName_structuresDetection" and set the StructuresDetectionColorMap
- There is masterNode and there is label map => check if the name of the label map is "MasterNodeName_structuresDetection".
- If so: set this one
- Otherwise: create a new labelmap with the name 'MasterNodeName_structureslabelMap' """
if self.disableEvents: return # To avoid infinite loops
if self.editorWidget.masterVolume:
masterNode = self.editorWidget.masterVolume
SlicerUtil.logDevelop("Master node in Editor = " + masterNode.GetName(), True)
else:
SlicerUtil.logDevelop("No master node in Editor. Retrieving it from the selector...", False)
masterNode = self.getCurrentGrayscaleNode()
if not masterNode:
# There is no any volume node that the user is watching
SlicerUtil.logDevelop("Still not master node. Exit", False)
return
labelmapNode = self.getOrCreateLabelmap(masterNode)
displayNode = labelmapNode.GetDisplayNode()
if displayNode:
displayNode.SetAndObserveColorNodeID(self.colorNode.GetID())
else:
SlicerUtil.logDevelop("There is no DisplayNode for label map " + labelmapNode.GetName(), True)
slicer.app.applicationLogic().PropagateVolumeSelection(0)
SlicerUtil.changeLabelmapOpacity(0.5)
# Set the right volumes
self.disableEvents = True
#self.editorWidget.masterVolume = masterNode
#self.editorWidget.labelmapVolume = labelmapNode
# trigger editor events
self.editorWidget.helper.setVolumes(masterNode, labelmapNode)
self.disableEvents = False
slicer.app.applicationLogic().FitSliceToAll()
def getOrCreateLabelmap(self, masterNode):
labelmapName = "{0}_{1}".format(masterNode.GetName(), self.labelmapNodeNameExtension)
labelmapNode = SlicerUtil.getNode(labelmapName)
if labelmapNode is None:
# Create a labelmap for this scalar
labelmapNode = slicer.modules.volumes.logic().CreateAndAddLabelVolume(slicer.mrmlScene, masterNode, labelmapName)
# Make sure that the labelmap has this name (no suffixes)
labelmapNode.SetName(labelmapName)
SlicerUtil.logDevelop("New label map node created: " + labelmapName, includePythonConsole=True)
else:
SlicerUtil.logDevelop("Labelmap loaded", includePythonConsole=True)
return labelmapNode
def getCurrentGrayscaleNode(self):
"""Get the grayscale node that is currently active in the widget"""
#return self.editorWidget.masterVolume
return self.volumeSelector.currentNode()
def getCurrentLabelMapNode(self):
"""Get the labelmap node that is currently active in the widget"""
return self.editorWidget.labelmapVolume
def setCurrentGrayscaleNode(self, node):
"""Get the grayscale node that is currently active in the widget"""
self.editorWidget.masterVolume = node
def setCurrentLabelMapNode(self, node):
"""Get the labelmap node that is currently active in the widget"""
self.editorWidget.labelmapVolume = node
def setEditorValues(self):
"""Set the right color in the editor"""
self.editorWidget.toolsColor.colorSpin.setValue(self.typeRadioButtonGroup.checkedId())
self.editorWidget.setActiveEffect("PaintEffect")
self.editorWidget.changePaintEffectRadius(1.5)
# Show the paint tools
self.editorWidget.editLabelMapsFrame.collapsed = False
##############
# Aux methods
##############
def _onMainVolumeChanged_(self, newVolumeNode):
""" A volume was changed in the main volume selector
:param newVolumeNode:
:return:
"""
if not self.disableEvents:
self.setCurrentGrayscaleNode(newVolumeNode)
self.checkMasterAndLabelMapNodes()
def _onPreNavigatorLabelmapLoaded_(self, volumeNodeName):
self.labelmapToBeRemoved = SlicerUtil.getNode(volumeNodeName)
def _onNavigatorLabelmapLoaded_(self, volumeNode, region, type):
"""When a labelmap is loaded in the CaseNavigator, remove possible preexisting nodes"""
if self.labelmapToBeRemoved:
slicer.mrmlScene.RemoveNode(self.labelmapToBeRemoved)
self.labelmapToBeRemoved = None
self.checkMasterAndLabelMapNodes()
def _createEditorWidget_(self):
"""Create and initialize a customize Slicer Editor which contains just some the tools that we need for the segmentation"""
if self.activeEditorTools is None:
# We don't want Paint effect by default
self.activeEditorTools = (
"DefaultTool", "DrawEffect", "PaintEffect", "RectangleEffect", "EraseLabel", "PreviousCheckPoint", "NextCheckPoint")
self.editorWidget = CIPUI.CIP_EditorWidget(self.parent, showVolumesFrame=True, activeTools=self.activeEditorTools)
self.editorWidget.setup()
self.editorWidget.setThresholds(-50000, 50000) # Remove thresholds
# Collapse Volumes selector by default
self.editorWidget.volumes.collapsed = True
# Remove current listeners for helper box and override them
self.editorWidget.helper.masterSelector.disconnect("currentNodeChanged(vtkMRMLNode*)")
self.editorWidget.helper.mergeSelector.disconnect("currentNodeChanged(vtkMRMLNode*)")
# Force to select always a node. It is important to do this at this point, when the events are disconnected,
# because otherwise the editor would display the color selector (just noisy for the user)
self.editorWidget.helper.masterSelector.noneEnabled = False
# Listen to the event when there is a Master Node selected in the HelperBox
self.editorWidget.helper.masterSelector.connect("currentNodeChanged(vtkMRMLNode*)", self._onMasterNodeSelect_)
def _collapseEditorWidget_(self, collapsed=True):
"""Collapse/expand the items in EditorWidget"""
self.editorWidget.volumes.collapsed = collapsed
self.editorWidget.editLabelMapsFrame.collapsed = collapsed
def _onCalibrateButtonClicked_(self):
error = self.logic.calibrate(self.currentVolumeLoaded, self.getCurrentLabelMapNode(), int(self.txtAir.text), int(self.txtBlood.text))
if error:
slicer.util.warningDisplay(error)
else:
slicer.util.infoDisplay("Calibration completed")
#########
# Events
#########
def enter(self):
"""Method that is invoked when we switch to the module in slicer user interface"""
self.disableEvents = False
if self.firstLoad:
self.firstLoad = False
else:
self.checkMasterAndLabelMapNodes()
self.editorWidget.helper.masterSelector.connect("currentNodeChanged(vtkMRMLNode*)", self._onMasterNodeSelect_)
def _onMasterNodeSelect_(self, node):
if node:
nodeName = node.GetName()
if self.getCurrentGrayscaleNode() and self.getCurrentGrayscaleNode().GetName() != nodeName:
SlicerUtil.logDevelop(
"There was a selection of a new master node: {0}. Previous: {1}. We will invoke checkMasterAndLabelMapNodes".
format(node.GetName(), self.editorWidget.masterVolume.GetName()), includePythonConsole=True)
# Update Editor Master node to perform the needed actions.
# We don't use "setVolumes" function because the interface must not be refeshed yet (it will be in checkMasterAndLabelMapNodes)
self.setCurrentGrayscaleNode(node)
# Remove label node to refresh the values properly
self.setCurrentLabelMapNode(None)
self.checkMasterAndLabelMapNodes()
else:
SlicerUtil.logDevelop("No master node selected. Trying to remove label map", False)
self.editorWidget.cleanVolumes()
self.setEditorValues()
def __onTypeRadioButtonClicked__(self, button):
""" One of the radio buttons has been pressed
:param button:
:return:
"""
self.setEditorValues()
def __onSceneClosed__(self, arg1, arg2):
self.pendingChangesIdsList = []
self.logic = CIP_CalibrationLogic()
def exit(self):
self.editorWidget.helper.masterSelector.disconnect("currentNodeChanged(vtkMRMLNode*)")
self.disableEvents = True
def cleanup(self):
pass
# CIP_StructuresDetectionLogic
# This class makes all the operations not related with the user interface (download and handle volumes, etc.)
#
class CIP_CalibrationLogic(ScriptedLoadableModuleLogic):
def __init__(self):
"""Constructor. """
ScriptedLoadableModuleLogic.__init__(self)
def createColormapNode(self, nodeName):
"""
Create a new colormap node for the editor
@param nodeName:
"""
colorNode = SlicerUtil.createNewColormapNode(nodeName, numberOfColors=3)
colorNode.SetColor(0, "Background", 0, 0, 0, 0)
colorNode.SetColor(1, "Air", 0, 1.0, 0)
colorNode.SetColor(2, "Blood", 1.0, 0, 0)
return colorNode
def calibrate(self, scalarNode, labelmapNode, air_output, blood_output):
"""
Calibrate the volume. Take the mean value of each region marked and rescale | |
import math
from collections import defaultdict
from typing import Any, Dict, List, Union
import cv2
import numpy as np
from .producing import FeatureProducerBase
from ..proto import Scene
from ..utils import (
get_tracks_polygons,
transform_2d_points,
transform_2d_vectors,
)
from ..utils.map import (
get_crosswalk_availability,
get_lane_availability,
repeated_points_to_array,
get_section_to_state,
)
MAX_HISTORY_LENGTH = 25
def _create_feature_maps(rows, cols, num_channels):
shape = [num_channels, rows, cols]
return np.zeros(shape, dtype=np.float32)
class FeatureMapRendererBase:
LINE_TYPE = cv2.LINE_AA
LINE_THICKNESS = 1
def __init__(
self,
config: List[str],
feature_map_params: Dict[str, Union[int, float]],
time_grid_params: Dict[str, int],
to_feature_map_tf: np.ndarray,
):
"""A base class for feature renderers.
Args:
config (List[str]): list of channels to render
feature_map_params (Dict[str, Union[int, float]]): feature map parameters dict
specifying pixel height, width and resolution in meters
time_grid_params (Dict[str, int]): time grid parameters dict
specifying number of historical steps to render
to_feature_map_tf (np.ndarray): transform to feature map coordinate system
"""
self._config = config
self._feature_map_params = feature_map_params
self._history_indices = self._get_history_indices(time_grid_params)
self._num_channels = self._get_num_channels()
self._to_feature_map_tf = to_feature_map_tf
def render(self, feature_map: np.ndarray, scene: Scene, to_track_transform: np.ndarray):
"""Renders objects from scene to the feature map using OpenCV.
All object coordinates are transformed from global coordinates system to feature map system
using to_track_transform and self._to_feature_map_transform.
Args:
feature_map (np.ndarray): input feature map for rendering
scene (Scene): input scene proto message
to_track_transform (np.ndarray): transform to agent-centric coordinates
Raises:
NotImplementedError: to overload in child classes
"""
raise NotImplementedError()
def _get_num_channels(self):
raise NotImplementedError()
def _get_history_indices(self, time_grid_params):
return list(range(
-time_grid_params['stop'] - 1,
-time_grid_params['start'],
time_grid_params['step'],
))
@property
def n_history_steps(self) -> int:
"""Number of history steps in the resulting feature map
Returns:
int:
"""
return len(self._history_indices)
@property
def num_channels(self) -> int:
"""Number of channels in the resulting feature map
Returns:
int:
"""
return self._num_channels
class TrackRendererBase(FeatureMapRendererBase):
"""A base class for pedestrian and vehicle tracks renderers.
"""
def _get_tracks_at_timestamp(self, scene, ts_ind):
raise NotImplementedError
def _get_fm_values(self, tracks, transform):
raise NotImplementedError
def render(self, feature_map: np.ndarray, scene: Scene, to_track_transform: np.ndarray):
"""Renders tracks as polygons on the feature map.
Args:
feature_map (np.ndarray): input feature map for rendering
scene (Scene): input scene proto message
to_track_transform (np.ndarray): transform to agent-centric coordinates
"""
transform = self._to_feature_map_tf @ to_track_transform
for ts_ind in self._history_indices:
tracks_at_frame = self._get_tracks_at_timestamp(scene, ts_ind)
if not tracks_at_frame:
continue
polygons = get_tracks_polygons(tracks_at_frame)
polygons = transform_2d_points(polygons.reshape(-1, 2), transform).reshape(-1, 4, 2)
polygons = np.around(polygons - 0.5).astype(np.int32)
fm_values = self._get_fm_values(tracks_at_frame, to_track_transform)
for channel_idx in range(fm_values.shape[1]):
fm_channel_slice = feature_map[ts_ind * self.num_channels + channel_idx, :, :]
for track_idx in range(fm_values.shape[0]):
cv2.fillPoly(
fm_channel_slice,
[polygons[track_idx]],
fm_values[track_idx, channel_idx],
lineType=self.LINE_TYPE,
)
def _get_occupancy_values(self, tracks):
return np.ones((len(tracks), 1), dtype=np.float32)
def _get_velocity_values(self, tracks, transform):
velocities = np.asarray(
[[track.linear_velocity.x, track.linear_velocity.y] for track in tracks],
dtype=np.float32)
velocities = transform_2d_vectors(velocities, transform)
return velocities
def _get_acceleration_values(self, tracks, transform):
accelerations = np.asarray(
[[track.linear_acceleration.x, track.linear_acceleration.y] for track in tracks],
dtype=np.float32)
accelerations = transform_2d_vectors(accelerations, transform)
return accelerations
def _get_yaw_values(self, tracks):
return np.asarray([track.yaw for track in tracks])[:, np.newaxis]
class VehicleTracksRenderer(TrackRendererBase):
def _get_num_channels(self):
num_channels = 0
if 'occupancy' in self._config:
num_channels += 1
if 'velocity' in self._config:
num_channels += 2
if 'acceleration' in self._config:
num_channels += 2
if 'yaw' in self._config:
num_channels += 1
return num_channels
def _get_tracks_at_timestamp(self, scene, ts_ind):
return [
track for track in scene.past_vehicle_tracks[ts_ind].tracks
] + [scene.past_ego_track[ts_ind]]
def _get_fm_values(self, tracks, transform):
values = []
if 'occupancy' in self._config:
values.append(self._get_occupancy_values(tracks))
if 'velocity' in self._config:
values.append(self._get_velocity_values(tracks, transform))
if 'acceleration' in self._config:
values.append(self._get_acceleration_values(tracks, transform))
if 'yaw' in self._config:
values.append(self._get_yaw_values(tracks))
return np.concatenate(values, axis=1).astype(np.float64)
class PedestrianTracksRenderer(TrackRendererBase):
def _get_num_channels(self):
num_channels = 0
if 'occupancy' in self._config:
num_channels += 1
if 'velocity' in self._config:
num_channels += 2
return num_channels
def _get_tracks_at_timestamp(self, scene, ts_ind):
return [
track for track in scene.past_pedestrian_tracks[ts_ind].tracks
]
def _get_fm_values(self, tracks, transform):
values = []
if 'occupancy' in self._config:
values.append(self._get_occupancy_values(tracks))
if 'velocity' in self._config:
values.append(self._get_velocity_values(tracks, transform))
return np.concatenate(values, axis=1).astype(dtype=np.float64)
class RoadGraphRenderer(FeatureMapRendererBase):
def render(self, feature_map: np.ndarray, scene: Scene, to_track_transform: np.ndarray):
"""Render path graph elements, such as lanes, crosswalks, road polygons,
as well as its properties (geometry, occupancy, etc.)
Args:
feature_map (np.ndarray): input feature map for rendering
scene (Scene): input scene proto message
to_track_transform (np.ndarray): transform to agent-centric coordinates
"""
transform = self._to_feature_map_tf @ to_track_transform
path_graph = scene.path_graph
for channel_ind in range(len(self._history_indices)):
traffic_light_sections = scene.traffic_lights[self._history_indices[channel_ind]]
if self._get_crosswalk_feature_map_size() > 0:
self._render_crosswalks(
feature_map[self._get_crosswalk_feature_map_slice(channel_ind), :, :],
path_graph,
traffic_light_sections,
transform,
)
if self._get_lane_feature_map_size() > 0:
self._render_lanes(
feature_map[self._get_lanes_feature_map_slice(channel_ind), :, :],
path_graph,
traffic_light_sections,
transform,
)
if self._get_road_feature_map_size() > 0:
self._render_road_polygons(
feature_map[self._get_road_polygon_feature_map_slice(channel_ind), :, :],
path_graph,
transform,
)
def _render_crosswalks(self, feature_map, path_graph, traffic_light_sections, transform):
crosswalk_polygons = []
for crosswalk in path_graph.crosswalks:
polygon = repeated_points_to_array(crosswalk.geometry)
polygon = transform_2d_points(polygon, transform)
polygon = np.around(polygon - 0.5).astype(np.int32)
crosswalk_polygons.append(polygon)
channel = 0
if 'crosswalk_occupancy' in self._config:
cv2.fillPoly(
feature_map[channel, ...],
crosswalk_polygons,
1.,
lineType=self.LINE_TYPE,
)
channel += 1
if 'crosswalk_availability' in self._config:
availability_to_polygons = defaultdict(list)
for i, crosswalk in enumerate(path_graph.crosswalks):
availability = get_crosswalk_availability(crosswalk, traffic_light_sections)
availability_to_polygons[availability].append(crosswalk_polygons[i])
for availability, polygons in availability_to_polygons.items():
cv2.fillPoly(
feature_map[channel, ...],
polygons,
availability,
lineType=self.LINE_TYPE,
)
def _render_lanes(self, feature_map, path_graph, traffic_light_sections, transform):
lane_lengths = []
lane_centers_concatenated = []
for lane in path_graph.lanes:
lane_lengths.append(len(lane.centers))
for p in lane.centers:
lane_centers_concatenated.append([p.x, p.y])
lane_centers_concatenated = np.array(lane_centers_concatenated, dtype=np.float32)
lane_centers_concatenated = transform_2d_points(lane_centers_concatenated, transform)
lane_centers_concatenated = np.around(lane_centers_concatenated - 0.5).astype(np.int32)
lane_centers = []
bounds = [0] + np.cumsum(lane_lengths).tolist()
for i in range(1, len(bounds)):
lane_centers.append(lane_centers_concatenated[bounds[i - 1]:bounds[i]])
channel = 0
if 'lane_availability' in self._config:
self._render_lane_availability(
feature_map[channel, ...], lane_centers, path_graph, traffic_light_sections)
channel += 1
if 'lane_direction' in self._config:
self._render_lane_direction(feature_map[channel, ...], lane_centers)
channel += 1
if 'lane_occupancy' in self._config:
self._render_lane_occupancy(feature_map[channel, ...], lane_centers)
channel += 1
if 'lane_priority' in self._config:
self._render_lane_priority(feature_map[channel, ...], lane_centers, path_graph)
channel += 1
if 'lane_speed_limit' in self._config:
self._render_lane_speed_limit(feature_map[channel, ...], lane_centers, path_graph)
channel += 1
def _render_lane_availability(self, feature_map, lane_centers, path_graph, tl_sections):
section_to_state = get_section_to_state(tl_sections)
availability_to_lanes = defaultdict(list)
for lane_idx, lane in enumerate(path_graph.lanes):
availability = get_lane_availability(lane, section_to_state)
availability_to_lanes[availability].append(lane_centers[lane_idx])
for v, lanes in availability_to_lanes.items():
cv2.polylines(
feature_map,
lanes,
isClosed=False,
color=v,
thickness=self.LINE_THICKNESS,
lineType=self.LINE_TYPE,
)
def _render_lane_direction(self, feature_map, lane_centers):
for lane in lane_centers:
for i in range(1, lane.shape[0]):
p1 = (lane[i - 1, 0], lane[i - 1, 1])
p2 = (lane[i, 0], lane[i, 1])
cv2.line(
feature_map,
p1,
p2,
math.atan2(p2[1] - p1[1], p2[0] - p1[0]),
thickness=self.LINE_THICKNESS,
lineType=self.LINE_TYPE,
)
def _render_lane_occupancy(self, feature_map, lane_centers):
cv2.polylines(
feature_map,
lane_centers,
isClosed=False,
color=1.,
thickness=self.LINE_THICKNESS,
lineType=self.LINE_TYPE,
)
def _render_lane_priority(self, feature_map, lane_centers, path_graph):
non_priority_lanes = []
for i, lane in enumerate(path_graph.lanes):
if lane.gives_way_to_some_lane:
non_priority_lanes.append(lane_centers[i])
cv2.polylines(
feature_map,
non_priority_lanes,
isClosed=False,
color=1.,
thickness=self.LINE_THICKNESS,
lineType=self.LINE_TYPE,
)
def _render_lane_speed_limit(self, feature_map, lane_centers, path_graph):
limit_to_lanes = defaultdict(list)
for i, lane in enumerate(path_graph.lanes):
limit_to_lanes[lane.max_velocity].append(lane_centers[i])
for limit, lanes in limit_to_lanes.items():
cv2.polylines(
feature_map,
lanes,
isClosed=False,
color=limit / 15.0,
thickness=self.LINE_THICKNESS,
lineType=self.LINE_TYPE,
)
def _render_road_polygons(self, feature_map, path_graph, transform):
road_polygons = []
for road_polygon in path_graph.road_polygons:
polygon = repeated_points_to_array(road_polygon.geometry)
polygon = transform_2d_points(polygon, transform)
polygon = np.around(polygon - 0.5).astype(np.int32)
road_polygons.append(polygon)
cv2.fillPoly(
feature_map[0, :, :],
road_polygons,
1.0,
lineType=self.LINE_TYPE,
)
def _get_num_channels(self):
return (
self._get_crosswalk_feature_map_size() +
self._get_lane_feature_map_size() +
self._get_road_feature_map_size()
)
def _get_crosswalk_feature_map_size(self):
num_channels = 0
if 'crosswalk_occupancy' in self._config:
num_channels += 1
if 'crosswalk_availability' in self._config:
num_channels += 1
return num_channels
def _get_crosswalk_feature_map_slice(self, ts_ind):
return slice(
ts_ind * self.num_channels,
ts_ind * self.num_channels + self._get_crosswalk_feature_map_size()
)
def _get_crosswalk_feature_map_values(self, crosswalk, traffic_light_sections):
values = []
if 'crosswalk_occupancy' in self._config:
values.append(1.)
if 'crosswalk_availability' in self._config:
values.append(get_crosswalk_availability(crosswalk, traffic_light_sections))
return values
def _get_lane_feature_map_size(self):
num_channels = 0
if 'lane_availability' in self._config:
num_channels += 1
if 'lane_direction' in self._config:
num_channels += 1
if 'lane_occupancy' in self._config:
num_channels += 1
if 'lane_priority' in self._config:
num_channels += 1
if 'lane_speed_limit' in self._config:
num_channels += 1
return num_channels
def _get_lanes_feature_map_slice(self, ts_ind):
offset = (
ts_ind * self._num_channels +
self._get_crosswalk_feature_map_size()
)
return slice(offset, offset + self._get_lane_feature_map_size())
def _get_road_feature_map_size(self):
num_channels = 0
if 'road_polygons' in self._config:
num_channels += 1
return num_channels
def _get_road_polygon_feature_map_slice(self, ts_ind):
offset = (
ts_ind * self._num_channels +
self._get_crosswalk_feature_map_size() +
self._get_lane_feature_map_size()
)
return slice(offset, offset + self._get_road_feature_map_size())
def _get_road_polygon_feature_map_values(self):
values = []
if 'road_polygons' in self._config:
values.append(1.0)
return values
class FeatureRenderer(FeatureProducerBase):
def __init__(self, config: Any):
"""A class implementing FeatureProducerBase interface for individual feature renderers.
Args:
config (Any): dict with feature map params and renderer groups params.
Find an example in the example.ipynb.
"""
self._feature_map_params = config['feature_map_params']
self._to_feature_map_tf = self._get_to_feature_map_transform()
self._renderers = self._create_renderers_list(config)
self._num_channels = self._get_num_channels()
def produce_features(
self, scene: Scene, to_track_frame_tf: np.ndarray) -> Dict[str, np.ndarray]:
"""Produces feature maps given the scene and the transform from global coordinates to
an actor-centric coordinates.
Args:
scene (Scene): current scene to render
to_track_frame_tf (np.ndarray): transform from global coordinated to actor-centric
system.
Returns:
| |
uncorrelated snapshot n \in 1..N_k[k] from state k.
for k in range(0,K):
# Determined indices of statistically independent configurations by analyzing the correlation structure of the timeseries data.
indices = timeseries.subsampleCorrelatedData(u_klt[k,k,0:T_k[k]])
# Subsample data.
N_k[k] = len(indices)
for l in range(0,K):
u_kln[k,l,0:N_k[k]] = u_klt[k,l,indices]
# Initialize MBAR with reduced energies u_kln and number of uncorrelated configurations from each state N_k.
#
# u_kln[k,l,n] is the reduced potential energy beta*U_l(x_kn), where U_l(x) is the potential energy function for state l,
# beta is the inverse temperature, and and x_kn denotes uncorrelated configuration n from state k.
#
# N_k[k] is the number of configurations from state k stored in u_knm
#
# Note that this step may take some time, as the relative dimensionless free energies f_k are determined at this point.
mbar = MBAR.mbar(u_kln, N_k)
# Extract dimensionless free energy differences and their statistical uncertainties.
(Deltaf_ij, dDeltaf_ij) = mbar.getFreeEnergyDifferences()
print 'Unit-bearing free energy difference between states 1 and K: %f +- %f' % ( (1./beta) * Deltaf_ij[0,K-1], (1./beta) * dDeltaf_ij[0,K-1])
# Compute the expectation of some observable A(x) at each state i, and associated uncertainty matrix.
# Here, A_kn[k,n] = A(x_{kn})
(A_k, dA_k) = mbar.computeExpectations(A_kn)
"""
#=============================================================================================
def __init__(self, u_kln, N_k, maximum_iterations=10000, relative_tolerance=1.0e-7, verbose=False, initial_f_k=None, method='adaptive', use_optimized=None, newton_first_gamma = 0.1, newton_self_consistent = 2, maxrange = 1.0e5, initialize='zeros'):
"""
Initialize multistate Bennett acceptance ratio (MBAR) on a set of simulation data.
Upon initialization, the dimensionless free energies for all states are computed.
This may take anywhere from seconds to minutes, depending upon the quantity of data.
After initialization, the computed free energies may be obtained by a call to 'getFreeEnergies()', or
free energies or expectation at any state of interest can be computed by calls to 'computeFreeEnergy()' or
'computeExpectations()'.
REQUIRED ARGUMENTS
u_kln (KxKxNmax float array) - u_kln[k,l,n] is the reduced potential energy of uncorrelated configuration n sampled from state k, evaluated at state l
N_k (K int array) - N_k[k] is the number of uncorrelated snapshots sampled from state k -- this can be zero if the expectation or free energy
of this state is desired but no samples were drawn from this state
NOTES
The reduced potential energy u_kln[k,l,n] = u_l(x_{kn}), where the reduced potential energy u_l(x) is defined (as in the text) by:
u_l(x) = beta_l [ U_l(x) + p_l V(x) + mu_l' n(x) ]
where
beta_l = 1/(kB T_l) is the inverse temperature of condition l, where kB is Boltzmann's constant
U_l(x) is the potential energy function for state l
p_l is the pressure at state l (if an isobaric ensemble is specified)
V(x) is the volume of configuration x
mu_l is the M-vector of chemical potentials for the various species, if a (semi)grand ensemble is specified, and ' denotes transpose
n(x) is the M-vector of numbers of the various molecular species for configuration x, corresponding to the chemical potential components of mu_m.
The configurations x_kn must be uncorrelated. This can be ensured by subsampling a correlated timeseries with a period larger than the statistical inefficiency,
which can be estimated from the potential energy timeseries {u_k(x_kn)}_{n=1}^{N_k} using the provided utility function 'statisticalInefficiency()'.
See the help for this function for more information.
OPTIONAL ARGUMENTS
maximum_iterations (int) - can be set to limit the maximum number of iterations performed (default 1000)
relative_tolerance (float) - can be set to determine the relative tolerance convergence criteria (default 1.0e-6)
verbosity (logical) - should be set to True if verbose debug output is desired (default False)
initial_f_k (numpy K float64 array) - should be set to a numpy K-array with initial dimensionless free energies to use as a guess (default None, which sets all f_k = 0)
method (string) - choose method for determination of dimensionless free energies: 'self-consistent-iteration','Newton-Raphson', or 'adaptive' (default: 'adaptive')
Newton-Raphson is deprecated and defaults to adaptive
use_optimized - if False, will explicitly disable use of C++ extensions; if None or True, extensions will be autodetected (default: None)
initialize (string) - option for initialization. if equal to 'BAR', use BAR between the pairwise state to initialize the free energies. Eventually, should specify a path; for now, it just does it zipping up the states. (default: 'zeros', unless specific values are passed in.)
newton_first_gamma (float) - initial gamma for newton-raphson (default = 0.1)
newton_self_consistent (int) - mininum number of self-consistent iterations before Newton-Raphson iteration (default = 2)
TEST
>>> import testsystems
>>> [x_kn, u_kln, N_k] = testsystems.HarmonicOscillatorsSample()
>>> mbar = MBAR(u_kln, N_k)
"""
if method == 'Newton-Raphson':
print "Warning: Newton-Raphson is deprecated. Switching to method 'adaptive' which uses the most quickly converging between Newton-Raphson and self-consistent iteration."
method = 'adaptive'
# Determine whether embedded C++ helper code is available
self.use_embedded_helper_code = False
if (use_optimized != None):
# If user specifies an option, use this.
self.use_embedded_helper_code = use_optimized
else:
# Test whether we can import the helper code.
try:
import _pymbar # import the helper code
self.use_embedded_helper_code = True # if we have succeeded, use it
if verbose: print "Using embedded C++ helper code."
except ImportError:
# import failed
self.use_embedded_helper_code = False
if verbose: print "Could not import working embedded C++ helper code -- using pure Python version instead."
# Store local copies of necessary data.
self.N_k = numpy.array(N_k, dtype=numpy.int32) # N_k[k] is the number of samples from state k
self.u_kln = numpy.array(u_kln, dtype=numpy.float64) # u_kln[k,l,n] is the reduced potential energy of sample n from state k evaluated at state l
# Get dimensions of reduced potential energy matrix.
[K, L, N_max] = self.u_kln.shape
if verbose: print "K = %d, L = %d, N_max = %d, total samples = %d" % (K, L, N_max, self.N_k.sum())
# Perform consistency checks on dimensions.
if K != L:
raise ParameterError('u_kln[0:K, 0:L, 0:N_max] must have dimensions K == L.')
if numpy.any(N_k > N_max):
raise ParameterError('All N_k must be <= N_max, the third dimension of u_kln[0:K, 0:L, 0:N_max].')
# Store local copies of other data
self.K = K # number of thermodynamic states
self.N_max = N_max # maximum number of configurations per state
self.N = sum(self.N_k) # N = \sum_{k=1}^K N_k is the total number of uncorrelated configurations pooled across all states
self.verbose = verbose # verbosity level -- if True, will print extra debug information
# perform consistency checks on the data.
# if, for any set of data, all reduced potential energies are the same,
# they are probably the same state. We check to within relative_tolerance.
self.samestates = []
if (self.K >100):
print 'Skipping check of whether the states have the same potential energy,'
print 'as the number of states is greater than 100'
else:
for k in range(K):
for l in range(k):
diffsum = 0
for j in range(K): # find the nonzero sets of data:
if (self.N_k[j] > 0):
uzero = u_kln[j,k,:] - u_kln[j,l,:]
diffsum += numpy.dot(uzero,uzero);
if (diffsum < relative_tolerance):
self.samestates.append([k,l])
self.samestates.append([l,k])
print ''
print 'Warning: states %d and %d have the same energies on the dataset.' % (l,k)
print 'They are therefore likely to to be the same thermodynamic state. This can occasionally cause'
print 'numerical problems with computing the covariance of their energy difference, which must be'
print 'identically zero in any case. Consider combining them into a single state.'
print ''
# Create a list of indices of all configurations in kn-indexing.
mask_kn = numpy.zeros([self.K,self.N_max], dtype=numpy.bool_)
for k in range(0,self.K):
mask_kn[k,0:N_k[k]] = True
# Create a list from this mask.
self.indices = numpy.where(mask_kn)
# Determine list of k indices for which N_k != 0
self.nonzero_N_k_indices = numpy.where(self.N_k != 0)[0]
self.nonzero_N_k_indices = self.nonzero_N_k_indices.astype(numpy.int32)
# Store versions of variables nonzero indices file
# Number of states with samples.
self.K_nonzero = self.nonzero_N_k_indices.size
if verbose:
print "There are %d states with samples." % self.K_nonzero
self.N_nonzero = self.N_k[self.nonzero_N_k_indices].copy()
# Print number of | |
will block indefinitely until data is available.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
device_ordinal: An optional `int`. Defaults to `-1`.
The TPU device to use. This should be -1 when the Op
is running on a TPU device, and >= 0 when the Op is running on the CPU
device.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "OutfeedDequeue", name,
tld.op_callbacks, "dtype", dtype, "shape", shape, "device_ordinal",
device_ordinal)
return _result
except _core._FallbackException:
try:
return outfeed_dequeue_eager_fallback(
dtype=dtype, shape=shape, device_ordinal=device_ordinal,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
shape = _execute.make_shape(shape, "shape")
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"OutfeedDequeue", dtype=dtype, shape=shape,
device_ordinal=device_ordinal, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("dtype", _op._get_attr_type("dtype"), "shape",
_op.get_attr("shape"), "device_ordinal",
_op._get_attr_int("device_ordinal"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"OutfeedDequeue", _inputs_flat, _attrs, _result)
_result, = _result
return _result
OutfeedDequeue = tf_export("raw_ops.OutfeedDequeue")(_ops.to_raw_op(outfeed_dequeue))
def outfeed_dequeue_eager_fallback(dtype, shape, device_ordinal, name, ctx):
dtype = _execute.make_type(dtype, "dtype")
shape = _execute.make_shape(shape, "shape")
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_inputs_flat = []
_attrs = ("dtype", dtype, "shape", shape, "device_ordinal", device_ordinal)
_result = _execute.execute(b"OutfeedDequeue", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"OutfeedDequeue", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def outfeed_dequeue_tuple(dtypes, shapes, device_ordinal=-1, name=None):
r"""Retrieve multiple values from the computation outfeed.
This operation will block indefinitely until data is available. Output `i`
corresponds to XLA tuple element `i`.
Args:
dtypes: A list of `tf.DTypes` that has length `>= 1`.
The element types of each element in `outputs`.
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shapes of each tensor in `outputs`.
device_ordinal: An optional `int`. Defaults to `-1`.
The TPU device to use. This should be -1 when the Op
is running on a TPU device, and >= 0 when the Op is running on the CPU
device.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "OutfeedDequeueTuple", name,
tld.op_callbacks, "dtypes", dtypes, "shapes", shapes,
"device_ordinal", device_ordinal)
return _result
except _core._FallbackException:
try:
return outfeed_dequeue_tuple_eager_fallback(
dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'outfeed_dequeue_tuple' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'outfeed_dequeue_tuple' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"OutfeedDequeueTuple", dtypes=dtypes, shapes=shapes,
device_ordinal=device_ordinal, name=name)
_result = _outputs[:]
if not _result:
return _op
if _execute.must_record_gradient():
_attrs = ("dtypes", _op.get_attr("dtypes"), "shapes",
_op.get_attr("shapes"), "device_ordinal",
_op._get_attr_int("device_ordinal"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"OutfeedDequeueTuple", _inputs_flat, _attrs, _result)
return _result
OutfeedDequeueTuple = tf_export("raw_ops.OutfeedDequeueTuple")(_ops.to_raw_op(outfeed_dequeue_tuple))
def outfeed_dequeue_tuple_eager_fallback(dtypes, shapes, device_ordinal, name, ctx):
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'outfeed_dequeue_tuple' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'outfeed_dequeue_tuple' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_inputs_flat = []
_attrs = ("dtypes", dtypes, "shapes", shapes, "device_ordinal",
device_ordinal)
_result = _execute.execute(b"OutfeedDequeueTuple", len(dtypes),
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"OutfeedDequeueTuple", _inputs_flat, _attrs, _result)
return _result
def outfeed_enqueue(input, name=None):
r"""Enqueue a Tensor on the computation outfeed.
Args:
input: A `Tensor`. A tensor that will be inserted into the outfeed queue.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "OutfeedEnqueue", name,
tld.op_callbacks, input)
return _result
except _core._FallbackException:
try:
return outfeed_enqueue_eager_fallback(
input, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"OutfeedEnqueue", input=input, name=name)
return _op
OutfeedEnqueue = tf_export("raw_ops.OutfeedEnqueue")(_ops.to_raw_op(outfeed_enqueue))
def outfeed_enqueue_eager_fallback(input, name, ctx):
_attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("dtype", _attr_dtype)
_result = _execute.execute(b"OutfeedEnqueue", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
_result = None
return _result
def outfeed_enqueue_tuple(inputs, name=None):
r"""Enqueue multiple Tensor values on the computation outfeed.
Args:
inputs: A list of `Tensor` objects.
A list of tensors that will be inserted into the outfeed queue as an
XLA tuple.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "OutfeedEnqueueTuple", name,
tld.op_callbacks, inputs)
return _result
except _core._FallbackException:
try:
return outfeed_enqueue_tuple_eager_fallback(
inputs, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"OutfeedEnqueueTuple", inputs=inputs, name=name)
return _op
OutfeedEnqueueTuple = tf_export("raw_ops.OutfeedEnqueueTuple")(_ops.to_raw_op(outfeed_enqueue_tuple))
def outfeed_enqueue_tuple_eager_fallback(inputs, name, ctx):
_attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx)
_inputs_flat = list(inputs)
_attrs = ("dtypes", _attr_dtypes)
_result = _execute.execute(b"OutfeedEnqueueTuple", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
_result = None
return _result
def prelinearize(input, shape=[], layout=[], name=None):
r"""An op which linearizes one Tensor value to an opaque variant tensor.
Args:
input: A `Tensor`. A tensor that will be linearized.
shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[]`.
The shape of the tensor.
layout: An optional list of `ints`. Defaults to `[]`.
A vector holding the requested layout in minor-to-major sequence. If a layout
attribute is passed but its values are all -1 the layout will be computed by
the infeed operation.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Prelinearize", name,
tld.op_callbacks, input, "shape", shape, "layout", layout)
return _result
except _core._FallbackException:
try:
return prelinearize_eager_fallback(
input, shape=shape, layout=layout, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if shape is None:
shape = []
shape = _execute.make_shape(shape, "shape")
if layout is None:
layout = []
if not isinstance(layout, (list, tuple)):
raise TypeError(
"Expected list for 'layout' argument to "
"'prelinearize' Op, not %r." % layout)
layout = [_execute.make_int(_i, "layout") for _i in layout]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Prelinearize", input=input, shape=shape, layout=layout, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("dtype", _op._get_attr_type("dtype"), "shape",
_op.get_attr("shape"), "layout", _op.get_attr("layout"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Prelinearize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Prelinearize = tf_export("raw_ops.Prelinearize")(_ops.to_raw_op(prelinearize))
def prelinearize_eager_fallback(input, shape, layout, name, ctx):
if shape is None:
shape = []
shape = _execute.make_shape(shape, "shape")
if layout is None:
layout = []
if not isinstance(layout, (list, tuple)):
raise TypeError(
"Expected list for 'layout' argument to "
"'prelinearize' Op, not %r." % layout)
layout = [_execute.make_int(_i, "layout") for _i in layout]
_attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("dtype", _attr_dtype, "shape", shape, "layout", layout)
_result = _execute.execute(b"Prelinearize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Prelinearize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def prelinearize_tuple(inputs, shapes, layouts=[], name=None):
r"""An op which linearizes multiple Tensor values to an opaque variant tensor.
Args:
inputs: A list of `Tensor` objects.
A list of tensors that will be provided using the infeed mechanism.
shapes: A list of shapes (each a `tf.TensorShape` or | |
self._is_closed:
return
if x_position is None:
x_position = self.width / 2
if y_position is None:
y_position = self.height - 20
anchor_point = Point(x_position, y_position)
text = Text(anchor_point, message)
# FIXME: Really should do all this on a per-RoseCanvas basis.
if self.initial_canvas:
text.attach_to(self.initial_canvas)
self.initial_canvas._renderShape(text, render_NOW=True)
click_position = self.get_next_mouse_click()
if erase_it and self.initial_canvas:
text.detach_from(self.initial_canvas)
if close_it:
self.close() # then close the window
return click_position
def get_next_mouse_click(self):
"""
Waits for the user to click in the window.
Then returns the rg.Point that represents the point where the user clicked.
Example:
If this method is called and then the user clicks near the upper-right corner of a 300 x 500 window,
this function would return something like rg.Point(295, 5).
"""
self.mouse.position = None
while True:
if self._is_closed:
return None
if self.mouse.position is not None:
break
self.update()
time.sleep(.05) # allow time for other events to be handled
click_point = self.mouse.position
self.mouse.position = None
return click_point
def _on_mouse_click(self, event):
self.mouse._update(event)
def _on_key_press(self, event):
self.keyboard._update(event)
# def add_canvas(self, width=None, height=None, background_color=0):
# FIXME: Set defaults based on the main canvas.
# new_canvas = RoseCanvas(self, background_color='white')
# self.widgets.append(new_canvas)
#
# _root.update()
def __serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
return _serialize_shapes(self)
class RoseWidget():
"""
A Widget is a thing that one can put on a Window,
e.g. a Canvas, FortuneTeller, etc.
"""
def __init__(self, window):
self._window = window
def get_window(self):
return self._window
class RoseCanvas(RoseWidget):
defaults = {'colors': [None, 'yellow', 'light blue', 'dark grey']}
count = 0
"""
A RoseCanvas is a RoseWidget (i.e., a thing on a RoseWindow)
upon which one can draw shapes and other Drawable things.
"""
def __init__(self, window, width=200, height=200,
background_color=0):
super().__init__(window)
RoseCanvas.count = RoseCanvas.count + 1
# FIXME: Deal with default background colors.
# FIXME: Store background color as a property
# so that modifying it changes the tkinter canvas.
# Ditto width and height.
# if background_color == 0:
# index = RoseCanvas.count % len(defaults['colors'])
# self.background_color = defaults['colors'][index]
# else:
# self.background_color = background_color
tk_canvas = tkinter.Canvas(window.toplevel,
width=width, height=height,
background=background_color)
self._tkinter_canvas = tk_canvas
# FIXME: Automate gridding better.
self._tkinter_canvas.grid(padx=5, pady=5)
self.shapes = []
def render(self, seconds_to_pause=None):
"""
Updates all the Shapes attached to this RoseCanvas, then draws all those Shapes.
After doing so, pauses the given number of seconds.
:type seconds_to_pause: int
"""
self._update_shapes()
self._window.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def _renderShape(self, shape, render_NOW=False):
"""Renders a shape."""
coordinates = shape._get_coordinates_for_drawing()
options = shape._get_options_for_drawing()
if shape.shape_id_by_canvas[self] is None:
shape.shape_id_by_canvas[self] = \
shape._method_for_drawing(self._tkinter_canvas, *coordinates)
try:
self._tkinter_canvas.coords(shape.shape_id_by_canvas[self],
*coordinates)
except tkinter.TclError:
msg = 'Could not place the shape\n'
msg += 'on the given window.\n'
msg += 'Did you accidentally close a window\n'
msg += 'that later needed to be rendered again?'
raise Exception(msg) from None
self._tkinter_canvas.itemconfigure(shape.shape_id_by_canvas[self],
options)
if render_NOW:
# redraw NOW
self._window.update()
def _draw(self, shape):
"""Queues a shape for being drawn. Does NOT draw it just yet."""
shapeInList = False
for listShape in self.shapes:
if listShape is shape:
shapeInList = True
break
if not shapeInList:
shape.shape_id_by_canvas[self] = None
self.shapes.append(shape)
def _undraw(self, shape):
if shape in self.shapes:
for i in range(len(self.shapes)):
if self.shapes[i] is shape:
self._tkinter_canvas.delete(shape.shape_id_by_canvas[self])
del self.shapes[i]
break
def _update_shapes(self):
for shape in self.shapes:
self._renderShape(shape)
class Mouse(object):
def __init__(self):
self.position = None
def _update(self, event):
self.position = Point(event.x, event.y)
class Keyboard(object):
def __init__(self):
self.key_pressed = None
def _update(self, event):
pass
class __FreezeClass__ (type):
"""Prevents class variable assignment."""
def __setattr__(self, name, _ignored): # last parameter is the value
err = "You tried to set the instance variable '" + name + "'\n"
err += " on the CLASS '" + self.__name__ + "'"
err += ", which is not an OBJECT.\n"
err += " Did you forget the () after the word "
err += self.__name__ + ",\n"
err += " on the line where you constructed the object?"
raise SyntaxError(err)
class _Shape(object, metaclass=__FreezeClass__):
"""
A Shape is a thing that can be drawn on a RoseCanvas
(which itself draws on a tkinter Canvas).
Its constructor provides the tkinter method to be used to
draw this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image, Line, Path, Polygon,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: None.
Public methods: attach_to.
"""
def __init__(self, method_for_drawing):
""" Arguments:
-- the tkinter method for drawing the Shape.
"""
self._method_for_drawing = method_for_drawing
self.shape_id_by_canvas = {}
def __eq__(self, other):
"""
Two Shape objects are equal (==) if all their attributes
are equal to each other.
"""
# check before we go deleting keys that may or may not exist
if(not isinstance(other, self.__class__)):
return False
self_dict = self.__dict__.copy()
other_dict = other.__dict__.copy()
del self_dict["shape_id_by_canvas"]
del other_dict["shape_id_by_canvas"]
return (self_dict == other_dict)
def __ne__(self, other):
return not self.__eq__(other)
def attach_to(self, window_or_canvas):
"""
'draws' this Shape. More precisely:
Attaches this Shape to the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered, this shape
will appear on that RoseWindow/RoseCanvas.
"""
if isinstance(window_or_canvas, RoseWindow):
window_or_canvas = window_or_canvas.initial_canvas
window_or_canvas._draw(self)
def detach_from(self, rose_canvas):
"""
'undraws' this Shape. More precisely:
Detaches this Shape from the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered,
this shape will no longer appear
on that RoseWindow/RoseCanvas.
"""
if type(rose_canvas) == RoseWindow:
rose_canvas = rose_canvas.initial_canvas
rose_canvas._undraw(self)
class _ShapeWithOutline(object):
"""
A Shape that has an interior (which can be filled with a color)
and an outline (which has a color and thickness).
This abstract type has concrete subclasses that include:
Arc, Circle, Ellipse, Image, Line, Path,
Polygon, Rectangle, Square, Text and Window.
Public data attributes: fill_color, outline_color, outline_thickness.
Public methods: _initialize_options.
"""
defaults = {'fill_color': None,
'outline_color': 'black',
'outline_thickness': 1}
def _initialize_options(self):
self.fill_color = _ShapeWithOutline.defaults['fill_color']
self.outline_color = _ShapeWithOutline.defaults['outline_color']
self.outline_thickness = _ShapeWithOutline.defaults[
'outline_thickness']
def _get_options_for_drawing(self):
options = {'fill': self.fill_color,
'outline': self.outline_color,
'width': self.outline_thickness}
# If a color is None, that means transparent here:
for option in ('fill', 'outline'):
if not options[option]:
options[option] = ''
return options
class _ShapeWithThickness(object):
"""
A Shape that can be (and almost always is) filled with a color
and has a thickness but no outline.
This abstract type has concrete subclasses that include:
Line and Path.
Public data attributes: color, thickness.
Public methods: _initialize_options.
"""
defaults = {'color': 'black',
'thickness': 1,
'arrow': None}
def _initialize_options(self):
self.color = _ShapeWithThickness.defaults['color']
self.thickness = _ShapeWithThickness.defaults['thickness']
self.arrow = _ShapeWithThickness.defaults['arrow']
def _get_options_for_drawing(self):
options = {'fill': self.color,
'width': self.thickness,
'arrow': self.arrow}
# If a color is None, that means 'black' here:
if options['fill'] is None:
options['fill'] = 'black'
return options
class _ShapeWithText(object):
"""
A Shape that has text and a font for displaying that text.
This abstract type has concrete subclasses that include:
Text.
Public data attributes: font_family, font_size,
is_bold, is_italic, is_underline, is_overstrike.
Public methods: _initialize_options.
"""
# FIXME: Add more to the above docstring.
defaults = {'font_family': 'helvetica',
'font_size': 14,
'weight': 'normal',
'slant': 'roman',
'underline': 0,
'overstrike': 0,
'justify': tkinter.CENTER,
'text_box_width': None,
'text_color': 'black',
'text': ''}
def _initialize_options(self):
self.font_family = _ShapeWithText.defaults['font_family']
self.font_size = _ShapeWithText.defaults['font_size']
self.is_bold = _ShapeWithText.defaults['weight'] == 'bold'
self.is_italic = _ShapeWithText.defaults['slant'] == 'italic'
self.is_underline = _ShapeWithText.defaults['underline'] == 1
self.is_overstrike = _ShapeWithText.defaults['overstrike'] == 1
self.justify = _ShapeWithText.defaults['justify']
self.text_box_width = _ShapeWithText.defaults['text_box_width']
self.text_color = _ShapeWithText.defaults['text_color']
self.text = _ShapeWithText.defaults['text']
def _get_options_for_drawing(self):
weight = 'bold' if self.is_bold else 'normal'
slant = 'italic' if self.is_italic else 'roman'
underline = 1 if self.is_underline else 0
overstrike = 1 if self.is_overstrike else 0
font = tkinter_font.Font(family=self.font_family,
size=self.font_size,
weight=weight,
slant=slant,
underline=underline,
overstrike=overstrike)
options = {'font': font,
'justify': self.justify,
'fill': self.text_color,
'text': self.text}
if self.text_box_width:
options['width'] = self.text_box_width
return options
class _ShapeWithCenter(_Shape):
"""
A Shape that has a center (and for which moving its center
moves the entire Shape). Its constructor provides the center
of the Shape along with its method for drawing this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: center.
Public methods: move_by, move_center_to.
"""
def __init__(self, center, method_for_drawing):
"""
Arguments:
-- the Point that is the center of the Shape
(the Shape stores a CLONE of that Point)
-- the tkinter method for drawing the Shape.
"""
# Clone | |
"綞",
"缎": "緞",
"缏": "緶",
"缑": "緱",
"缒": "縋",
"缓": "緩",
"缔": "締",
"缕": "縷",
"编": "編",
"缗": "緡",
"缘": "緣",
"缙": "縉",
"缚": "縛",
"缛": "縟",
"缜": "縝",
"缝": "縫",
"缞": "縗",
"缟": "縞",
"缠": "纏",
"缡": "縭",
"缢": "縊",
"缣": "縑",
"缤": "繽",
"缥": "縹",
"缦": "縵",
"缧": "縲",
"缨": "纓",
"缩": "縮",
"缪": "繆",
"缫": "繅",
"缬": "纈",
"缭": "繚",
"缮": "繕",
"缯": "繒",
"缱": "繾",
"缲": "繰",
"缳": "繯",
"缴": "繳",
"缵": "纘",
"罂": "罌",
"网": "網",
"罗": "羅",
"罚": "罰",
"罢": "罷",
"罴": "羆",
"羁": "羈",
"羟": "羥",
"翘": "翹",
"耢": "耮",
"耧": "耬",
"耸": "聳",
"耻": "恥",
"聂": "聶",
"聋": "聾",
"职": "職",
"聍": "聹",
"联": "聯",
"聩": "聵",
"聪": "聰",
"肃": "肅",
"肠": "腸",
"肤": "膚",
"肮": "骯",
"肴": "餚",
"肾": "腎",
"肿": "腫",
"胀": "脹",
"胁": "脅",
"胆": "膽",
"胧": "朧",
"胨": "腖",
"胪": "臚",
"胫": "脛",
"胶": "膠",
"脉": "脈",
"脍": "膾",
"脐": "臍",
"脑": "腦",
"脓": "膿",
"脔": "臠",
"脚": "腳",
"脱": "脫",
"脶": "腡",
"脸": "臉",
"腭": "齶",
"腻": "膩",
"腼": "靦",
"腽": "膃",
"腾": "騰",
"膑": "臏",
"臜": "臢",
"舆": "輿",
"舣": "艤",
"舰": "艦",
"舱": "艙",
"舻": "艫",
"艰": "艱",
"艳": "艷",
"艺": "藝",
"节": "節",
"芈": "羋",
"芗": "薌",
"芜": "蕪",
"芦": "蘆",
"苁": "蓯",
"苇": "葦",
"苈": "藶",
"苋": "莧",
"苌": "萇",
"苍": "蒼",
"苎": "苧",
"茎": "莖",
"茏": "蘢",
"茑": "蔦",
"茔": "塋",
"茕": "煢",
"茧": "繭",
"荆": "荊",
"荐": "薦",
"荙": "薘",
"荚": "莢",
"荛": "蕘",
"荜": "蓽",
"荞": "蕎",
"荟": "薈",
"荠": "薺",
"荣": "榮",
"荤": "葷",
"荥": "滎",
"荦": "犖",
"荧": "熒",
"荨": "蕁",
"荩": "藎",
"荪": "蓀",
"荫": "蔭",
"荬": "蕒",
"荭": "葒",
"荮": "葤",
"莅": "蒞",
"莱": "萊",
"莲": "蓮",
"莳": "蒔",
"莴": "萵",
"莶": "薟",
"莸": "蕕",
"莹": "瑩",
"莺": "鶯",
"萝": "蘿",
"萤": "螢",
"营": "營",
"萦": "縈",
"萧": "蕭",
"萨": "薩",
"葱": "蔥",
"蒇": "蕆",
"蒉": "蕢",
"蒋": "蔣",
"蒌": "蔞",
"蓝": "藍",
"蓟": "薊",
"蓠": "蘺",
"蓣": "蕷",
"蓥": "鎣",
"蓦": "驀",
"蔂": "虆",
"蔷": "薔",
"蔹": "蘞",
"蔺": "藺",
"蔼": "藹",
"蕰": "薀",
"蕲": "蘄",
"薮": "藪",
"䓕": "薳",
"藓": "蘚",
"蘖": "櫱",
"虏": "虜",
"虑": "慮",
"虚": "虛",
"虬": "虯",
"虮": "蟣",
"虽": "雖",
"虾": "蝦",
"虿": "蠆",
"蚀": "蝕",
"蚁": "蟻",
"蚂": "螞",
"蚕": "蠶",
"蚬": "蜆",
"蛊": "蠱",
"蛎": "蠣",
"蛏": "蟶",
"蛮": "蠻",
"蛰": "蟄",
"蛱": "蛺",
"蛲": "蟯",
"蛳": "螄",
"蛴": "蠐",
"蜕": "蛻",
"蜗": "蝸",
"蝇": "蠅",
"蝈": "蟈",
"蝉": "蟬",
"蝼": "螻",
"蝾": "蠑",
"螀": "螿",
"螨": "蟎",
"䗖": "螮",
"蟏": "蠨",
"衅": "釁",
"衔": "銜",
"补": "補",
"衬": "襯",
"衮": "袞",
"袄": "襖",
"袅": "裊",
"袆": "褘",
"袜": "襪",
"袭": "襲",
"袯": "襏",
"装": "裝",
"裆": "襠",
"裈": "褌",
"裢": "褳",
"裣": "襝",
"裤": "褲",
"裥": "襇",
"褛": "褸",
"褴": "襤",
"䙓": "襬",
"见": "見",
"观": "觀",
"觃": "覎",
"规": "規",
"觅": "覓",
"视": "視",
"觇": "覘",
"览": "覽",
"觉": "覺",
"觊": "覬",
"觋": "覡",
"觌": "覿",
"觍": "覥",
"觎": "覦",
"觏": "覯",
"觐": "覲",
"觑": "覷",
"觞": "觴",
"触": "觸",
"觯": "觶",
"訚": "誾",
"䜣": "訢",
"誉": "譽",
"誊": "謄",
"䜧": "譅",
"讠": "訁",
"计": "計",
"订": "訂",
"讣": "訃",
"认": "認",
"讥": "譏",
"讦": "訐",
"讧": "訌",
"讨": "討",
"让": "讓",
"讪": "訕",
"讫": "訖",
"讬": "託",
"训": "訓",
"议": "議",
"讯": "訊",
"记": "記",
"讱": "訒",
"讲": "講",
"讳": "諱",
"讴": "謳",
"讵": "詎",
"讶": "訝",
"讷": "訥",
"许": "許",
"讹": "訛",
"论": "論",
"讻": "訩",
"讼": "訟",
"讽": "諷",
"设": "設",
"访": "訪",
"诀": "訣",
"证": "證",
"诂": "詁",
"诃": "訶",
"评": "評",
"诅": "詛",
"识": "識",
"诇": "詗",
"诈": "詐",
"诉": "訴",
"诊": "診",
"诋": "詆",
"诌": "謅",
"词": "詞",
"诎": "詘",
"诏": "詔",
"诐": "詖",
"译": "譯",
"诒": "詒",
"诓": "誆",
"诔": "誄",
"试": "試",
"诖": "詿",
"诗": "詩",
"诘": "詰",
"诙": "詼",
"诚": "誠",
"诛": "誅",
"诜": "詵",
"话": "話",
"诞": "誕",
"诟": "詬",
"诠": "詮",
"诡": "詭",
"询": "詢",
"诣": "詣",
"诤": "諍",
"该": "該",
"详": "詳",
"诧": "詫",
"诨": "諢",
"诩": "詡",
"诪": "譸",
"诫": "誡",
"诬": "誣",
"语": "語",
"诮": "誚",
"误": "誤",
"诰": "誥",
"诱": "誘",
"诲": "誨",
"诳": "誑",
"诵": "誦",
"诶": "誒",
"请": "請",
"诸": "諸",
"诹": "諏",
"诺": "諾",
"读": "讀",
"诼": "諑",
"诽": "誹",
"课": "課",
"诿": "諉",
"谀": "諛",
"谁": "誰",
"谂": "諗",
"调": "調",
"谄": "諂",
"谅": "諒",
"谆": "諄",
"谇": "誶",
"谈": "談",
"谊": "誼",
"谋": "謀",
"谌": "諶",
"谍": "諜",
"谎": "謊",
"谏": "諫",
"谐": "諧",
"谑": "謔",
"谒": "謁",
"谓": "謂",
"谔": "諤",
"谕": "諭",
"谖": "諼",
"谗": "讒",
"谘": "諮",
"谙": "諳",
"谚": "諺",
"谛": "諦",
"谜": "謎",
"谝": "諞",
"谞": "諝",
"谟": "謨",
"谠": "讜",
"谡": "謖",
"谢": "謝",
"谤": "謗",
"谥": "謚",
"谦": "謙",
"谧": "謐",
"谨": "謹",
"谩": "謾",
"谪": "謫",
"谬": "謬",
"谭": "譚",
"谮": "譖",
"谯": "譙",
"谰": "讕",
"谱": "譜",
"谲": "譎",
"谳": "讞",
"谴": "譴",
"谵": "譫",
"谶": "讖",
"豮": "豶",
"䝙": "貙",
"䞐": "賰",
"贝": "貝",
"贞": "貞",
"负": "負",
"贠": "貟",
"贡": "貢",
"财": "財",
"责": "責",
"贤": "賢",
"败": "敗",
"账": "賬",
"货": "貨",
"质": "質",
"贩": "販",
"贪": "貪",
"贫": "貧",
"贬": "貶",
"购": "購",
"贮": "貯",
"贯": "貫",
"贰": "貳",
"贱": "賤",
"贲": "賁",
"贳": "貰",
"贴": "貼",
"贵": "貴",
"贶": "貺",
"贷": "貸",
"贸": "貿",
"费": "費",
"贺": "賀",
"贻": "貽",
"贼": "賊",
"贽": "贄",
"贾": "賈",
"贿": "賄",
"赀": "貲",
"赁": "賃",
"赂": "賂",
"资": "資",
"赅": "賅",
"赆": "贐",
"赇": "賕",
"赈": "賑",
"赉": "賚",
"赊": "賒",
"赋": "賦",
"赌": "賭",
"赎": "贖",
"赏": "賞",
"赐": "賜",
"赑": "贔",
"赒": "賙",
"赓": "賡",
"赔": "賠",
"赕": "賧",
"赖": "賴",
"赗": "賵",
"赘": "贅",
"赙": "賻",
"赚": "賺",
"赛": "賽",
"赜": "賾",
"赞": "贊",
"赟": "贇",
"赠": "贈",
"赡": "贍",
"赢": "贏",
"赣": "贛",
"赪": "赬",
"赵": "趙",
"赶": "趕",
"趋": "趨",
"趱": "趲",
"趸": "躉",
"跃": "躍",
"跄": "蹌",
"跞": "躒",
"践": "踐",
"跶": "躂",
"跷": "蹺",
"跸": "蹕",
"跹": "躚",
"跻": "躋",
"踊": "踴",
"踌": "躊",
"踪": "蹤",
"踬": "躓",
"踯": "躑",
"蹑": "躡",
"蹒": "蹣",
"蹰": "躕",
"蹿": "躥",
"躏": "躪",
"躜": "躦",
"躯": "軀",
"车": "車",
"轧": "軋",
"轨": "軌",
"轩": "軒",
"轪": "軑",
"轫": "軔",
"转": "轉",
"轭": "軛",
"轮": "輪",
"软": "軟",
"轰": "轟",
"轱": "軲",
"轲": "軻",
"轳": "轤",
"轴": "軸",
"轵": "軹",
"轶": "軼",
"轷": "軤",
"轸": "軫",
"轹": "轢",
"轺": "軺",
"轻": "輕",
"轼": "軾",
"载": "載",
"轾": "輊",
"轿": "轎",
"辀": "輈",
"辁": "輇",
"辂": "輅",
"较": "較",
"辄": "輒",
"辅": "輔",
"辆": "輛",
"辇": "輦",
"辈": "輩",
"辉": "輝",
"辊": "輥",
"辋": "輞",
"辌": "輬",
"辍": "輟",
"辎": "輜",
"辏": "輳",
"辐": "輻",
"辑": "輯",
"辒": "轀",
"输": "輸",
"辔": "轡",
"辕": "轅",
"辖": "轄",
"辗": "輾",
"辘": "轆",
"辙": "轍",
"辚": "轔",
"辞": "辭",
"辩": "辯",
"辫": "辮",
"边": "邊",
"辽": "遼",
"达": "達",
"迁": "遷",
"过": "過",
"迈": "邁",
"运": "運",
"还": "還",
"这": "這",
"进": "進",
"远": "遠",
"违": "違",
"连": "連",
"迟": "遲",
"迩": "邇",
"迳": "逕",
"迹": "跡",
"选": "選",
"逊": "遜",
"递": "遞",
"逦": "邐",
"逻": "邏",
"遗": "遺",
"遥": "遙",
"邓": "鄧",
"邝": "鄺",
"邬": "鄔",
"邮": "郵",
"邹": "鄒",
"邺": "鄴",
"邻": "鄰",
"郏": "郟",
"郐": "鄶",
"郑": "鄭",
"郓": "鄆",
"郦": "酈",
"郧": "鄖",
"郸": "鄲",
"酂": "酇",
"酦": "醱",
"酱": "醬",
"酽": "釅",
"酾": "釃",
"酿": "釀",
"释": "釋",
"鉴": "鑒",
"銮": "鑾",
"錾": "鏨",
"𨱏": "鎝",
"钅": "釒",
"钆": "釓",
"钇": "釔",
"针": "針",
"钉": "釘",
"钊": "釗",
"钋": "釙",
"钌": "釕",
"钍": "釷",
"钎": "釺",
"钏": "釧",
"钐": "釤",
"钑": "鈒",
"钒": "釩",
"钓": "釣",
"钔": "鍆",
"钕": "釹",
"钖": "鍚",
"钗": "釵",
"钘": "鈃",
"钙": "鈣",
"钚": "鈈",
"钛": "鈦",
"钜": "鉅",
"钝": "鈍",
"钞": "鈔",
"钠": "鈉",
"钡": "鋇",
"钢": "鋼",
"钣": "鈑",
"钤": "鈐",
"钥": "鑰",
"钦": "欽",
"钧": "鈞",
"钨": "鎢",
"钪": "鈧",
"钫": "鈁",
"钬": "鈥",
"钭": "鈄",
"钮": "鈕",
"钯": "鈀",
"钰": "鈺",
"钱": "錢",
"钲": "鉦",
"钳": "鉗",
"钴": "鈷",
"钶": "鈳",
"钷": "鉕",
"钸": "鈽",
"钹": "鈸",
"钺": "鉞",
"钻": "鑽",
"钼": "鉬",
"钽": "鉭",
"钾": "鉀",
"钿": "鈿",
"铀": "鈾",
"铁": "鐵",
"铂": "鉑",
"铃": "鈴",
"铄": "鑠",
"铅": "鉛",
"铆": "鉚",
"铇": "鉋",
"铈": "鈰",
"铉": "鉉",
"铊": "鉈",
"铋": "鉍",
"铌": "鈮",
"铍": "鈹",
"铎": "鐸",
"铏": "鉶",
"铐": "銬",
"铑": "銠",
"铒": "鉺",
"铓": "鋩",
"铔": "錏",
"铕": "銪",
"铖": "鋮",
"铗": "鋏",
"铘": "鋣",
"铙": "鐃",
"铚": "銍",
"铛": "鐺",
"铜": "銅",
"铝": "鋁",
"铞": "銱",
"铟": "銦",
"铠": "鎧",
"铡": "鍘",
"铢": "銖",
"铣": "銑",
"铤": "鋌",
"铥": "銩",
"铦": "銛",
"铧": "鏵",
"铨": "銓",
"铩": "鎩",
"铪": "鉿",
"铫": "銚",
"铬": "鉻",
"铭": "銘",
"铮": "錚",
"铯": "銫",
"铰": "鉸",
"铱": "銥",
"铲": "鏟",
"铳": "銃",
"铴": "鐋",
"铵": "銨",
"银": "銀",
"铷": "銣",
"铸": "鑄",
"铹": "鐒",
"铺": "鋪",
"铻": "鋙",
"铼": "錸",
"铽": "鋱",
"链": "鏈",
"铿": "鏗",
"销": "銷",
"锁": "鎖",
"锂": "鋰",
"锃": "鋥",
"锄": "鋤",
"锅": "鍋",
"锆": "鋯",
"锇": "鋨",
"锉": "銼",
"锊": "鋝",
"锋": "鋒",
"锌": "鋅",
"锍": "鋶",
"锎": "鐦",
"锏": "鐧",
"锑": "銻",
"锒": "鋃",
"锓": "鋟",
"锔": "鋦",
"锕": "錒",
"锖": "錆",
"锗": "鍺",
"锘": "鍩",
"错": "錯",
"锚": "錨",
"锛": "錛",
"锜": "錡",
"锝": "鍀",
"锞": "錁",
"锟": "錕",
"锠": "錩",
"锡": "錫",
"锢": "錮",
"锣": "鑼",
"锥": "錐",
"锦": "錦",
"锧": "鑕",
"锩": "錈",
"锪": "鍃",
"锫": "錇",
"锬": "錟",
"锭": "錠",
"键": "鍵",
"锯": "鋸",
"锰": "錳",
"锱": "錙",
"锲": "鍥",
"锳": "鍈",
"锴": "鍇",
"锵": "鏘",
"锶": "鍶",
"锷": "鍔",
"锸": "鍤",
"锹": "鍬",
"锺": "鍾",
"锻": "鍛",
"锼": "鎪",
"锽": "鍠",
"锾": "鍰",
"锿": "鎄",
"镀": "鍍",
"镁": "鎂",
"镂": "鏤",
"镃": "鎡",
"镄": "鐨",
"镅": "鎇",
"镆": "鏌",
"镇": "鎮",
"镈": "鎛",
"镉": "鎘",
"镊": "鑷",
"镋": "鎲",
"镍": "鎳",
"镎": "鎿",
"镏": "鎦",
"镐": "鎬",
"镑": "鎊",
"镒": "鎰",
"镓": "鎵",
"镔": "鑌",
"镕": "鎔",
"镖": "鏢",
"镗": "鏜",
"镘": "鏝",
"镙": "鏍",
"镚": "鏰",
"镛": "鏞",
"镜": "鏡",
"镝": "鏑",
"镞": "鏃",
"镟": "鏇",
"镠": "鏐",
"镡": "鐔",
"镣": "鐐",
"镤": "鏷",
"镥": "鑥",
"镦": "鐓",
"镧": "鑭",
"镨": "鐠",
"镩": "鑹",
"镪": "鏹",
"镫": "鐙",
"镬": "鑊",
"镭": "鐳",
"镮": "鐶",
"镯": "鐲",
"镰": "鐮",
"镱": "鐿",
"镲": "鑔",
"镳": "鑣",
"镴": "鑞",
"镵": "鑱",
"镶": "鑲",
"长": "長",
"门": "門",
"闩": "閂",
"闪": "閃",
"闫": "閆",
"闬": "閈",
"闭": "閉",
"问": "問",
"闯": "闖",
"闰": "閏",
"闱": "闈",
"闲": "閑",
"闳": "閎",
"间": "間",
"闵": "閔",
"闶": "閌",
"闷": "悶",
"闸": "閘",
"闹": "鬧",
"闺": "閨",
"闻": "聞",
"闼": "闥",
"闽": "閩",
"闾": "閭",
"闿": "闓",
"阀": "閥",
"阁": "閣",
"阂": "閡",
"阃": "閫",
"阄": "鬮",
"阆": "閬",
"阇": "闍",
"阈": "閾",
"阉": "閹",
"阊": "閶",
"阋": "鬩",
"阌": "閿",
"阍": "閽",
"阎": "閻",
"阏": "閼",
"阐": "闡",
"阑": "闌",
"阒": "闃",
"阓": "闠",
"阔": "闊",
"阕": "闋",
"阖": "闔",
"阗": "闐",
"阘": "闒",
"阙": "闕",
"阚": "闞",
"阛": "闤",
"队": "隊",
"阳": "陽",
"阴": "陰",
"阵": "陣",
"阶": "階",
"际": "際",
"陆": "陸",
"陇": "隴",
"陈": "陳",
"陉": "陘",
"陕": "陝",
"陧": "隉",
"陨": "隕",
"险": "險",
"随": "隨",
"隐": "隱",
"隶": "隸",
"隽": "雋",
"难": "難",
"雏": "雛",
"雠": "讎",
"雳": "靂",
"雾": "霧",
"霁": "霽",
"霡": "霢",
"霭": "靄",
"靓": "靚",
"静": "靜",
"靥": "靨",
"䩄": "靦",
"鞑": "韃",
"鞒": "鞽",
"鞯": "韉",
"韦": "韋",
"韧": "韌",
"韨": "韍",
"韩": "韓",
"韪": "韙",
"韫": "韞",
"韬": "韜",
"韵": "韻",
"页": "頁",
"顶": "頂",
"顷": "頃",
"顸": "頇",
"项": "項",
"顺": "順",
"顼": "頊",
"顽": "頑",
"顾": "顧",
"顿": "頓",
"颀": "頎",
"颁": "頒",
"颂": "頌",
"颃": "頏",
"预": "預",
"颅": "顱",
"领": "領",
"颇": "頗",
"颈": "頸",
"颉": "頡",
"颊": "頰",
"颋": "頲",
"颌": "頜",
"颍": "潁",
"颎": "熲",
"颏": "頦",
"颐": "頤",
"频": "頻",
"颒": "頮",
"颔": "頷",
"颕": "頴",
"颖": "穎",
"颗": "顆",
"题": "題",
"颙": "顒",
"颚": "顎",
"颛": "顓",
"额": "額",
"颞": "顳",
"颟": "顢",
"颠": "顛",
"颡": "顙",
"颢": "顥",
"颤": "顫",
"颥": "顬",
"颦": "顰",
"颧": "顴",
"风": "風",
"飏": "颺",
"飐": "颭",
"飑": "颮",
"飒": "颯",
"飓": "颶",
"飔": "颸",
"飕": "颼",
"飖": "颻",
"飗": "飀",
"飘": "飄",
"飙": "飆",
"飚": "飈",
"飞": "飛",
"飨": "饗",
"餍": "饜",
"饣": "飠",
"饤": "飣",
"饦": "飥",
"饧": "餳",
"饨": "飩",
"饩": "餼",
"饪": "飪",
"饫": "飫",
"饬": "飭",
"饭": "飯",
"饮": "飲",
"饯": "餞",
"饰": "飾",
"饱": "飽",
"饲": "飼",
"饳": "飿",
"饴": "飴",
"饵": "餌",
"饶": "饒",
"饷": "餉",
"饸": "餄",
"饹": "餎",
"饺": "餃",
"饻": "餏",
"饼": "餅",
"饽": "餑",
"饾": "餖",
"饿": "餓",
"馀": "餘",
"馁": "餒",
"馂": "餕",
"馃": "餜",
"馄": "餛",
"馅": "餡",
"馆": "館",
"馇": "餷",
"馈": "饋",
"馉": "餶",
"馊": "餿",
"馋": "饞",
"馌": "饁",
"馍": "饃",
"馎": "餺",
"馏": "餾",
"馐": "饈",
"馑": "饉",
"馒": "饅",
"馓": "饊",
"馔": "饌",
"馕": "饢",
"䯄": "騧",
"马": "馬",
"驭": "馭",
"驮": "馱",
"驯": "馴",
"驰": "馳",
"驱": "驅",
"驲": "馹",
"驳": "駁",
"驴": "驢",
"驵": "駔",
"驶": "駛",
"驷": "駟",
"驸": "駙",
"驹": "駒",
"驺": "騶",
"驻": "駐",
"驼": "駝",
"驽": "駑",
"驾": "駕",
"驿": "驛",
"骀": "駘",
"骁": "驍",
"骃": "駰",
"骄": "驕",
"骅": "驊",
"骆": "駱",
"骇": "駭",
"骈": "駢",
"骉": "驫",
"骊": "驪",
"骋": "騁",
"验": "驗",
"骍": "騂",
"骎": "駸",
"骏": "駿",
"骐": "騏",
"骑": "騎",
"骒": "騍",
"骓": "騅",
"骔": "騌",
"骕": "驌",
"骖": "驂",
"骗": "騙",
"骘": "騭",
"骙": "騤",
"骚": "騷",
"骛": "騖",
"骜": "驁",
"骝": "騮",
"骞": "騫",
"骟": "騸",
"骠": "驃",
"骡": "騾",
"骢": "驄",
"骣": "驏",
"骤": "驟",
"骥": "驥",
"骦": "驦",
"骧": "驤",
"髅": "髏",
"髋": "髖",
"髌": "髕",
"鬓": "鬢",
"魇": "魘",
"魉": "魎",
"鱼": "魚",
"鱽": "魛",
"鱾": "魢",
"鱿": "魷",
"鲀": "魨",
"鲁": "魯",
"鲂": "魴",
"鲃": "䰾",
"鲄": "魺",
"鲅": "鮁",
"鲆": "鮃",
"鲈": "鱸",
"鲉": "鮋",
"鲊": "鮓",
"鲋": "鮒",
"鲌": "鮊",
"鲍": "鮑",
"鲎": "鱟",
"鲏": "鮍",
"鲐": "鮐",
"鲑": "鮭",
"鲒": "鮚",
"鲓": "鮳",
"鲔": "鮪",
"鲕": "鮞",
"鲖": "鮦",
"鲗": "鰂",
"鲘": "鮜",
"鲙": "鱠",
"鲚": "鱭",
"鲛": "鮫",
"鲜": "鮮",
"鲝": "鮺",
"鲟": "鱘",
"鲠": "鯁",
"鲡": "鱺",
"鲢": "鰱",
"鲣": "鰹",
"鲤": "鯉",
"鲥": "鰣",
"鲦": "鰷",
"鲧": "鯀",
"鲨": "鯊",
"鲩": "鯇",
"鲪": "鮶",
"鲫": "鯽",
"鲬": "鯒",
"鲭": "鯖",
"鲮": "鯪",
"鲯": "鯕",
"鲰": "鯫",
"鲱": "鯡",
"鲲": "鯤",
"鲳": "鯧",
"鲴": "鯝",
"鲵": "鯢",
"鲶": "鯰",
"鲷": "鯛",
"鲸": "鯨",
"鲹": "鰺",
"鲺": "鯴",
"鲻": "鯔",
"鲼": "鱝",
"鲽": "鰈",
"鲾": "鰏",
"鲿": "鱨",
"鳀": "鯷",
"鳁": "鰮",
"鳂": "鰃",
"鳃": "鰓",
"鳅": "鰍",
"鳆": "鰒",
"鳇": "鰉",
"鳈": "鰁",
"鳉": "鱂",
"鳊": "鯿",
"鳋": "鰠",
"鳌": "鰲",
"鳍": "鰭",
"鳎": "鰨",
"鳏": "鰥",
"鳐": "鰩",
"鳑": "鰟",
"鳒": "鰜",
"鳓": "鰳",
"鳔": "鰾",
"鳕": "鱈",
"鳖": "鱉",
"鳗": "鰻",
"鳘": "鰵",
"鳙": "鱅",
"鳚": "䲁",
"鳛": "鰼",
"鳜": "鱖",
"鳝": "鱔",
"鳞": "鱗",
"鳟": "鱒",
"鳠": "鱯",
"鳡": "鱤",
"鳢": "鱧",
"鳣": "鱣",
"䴓": "鳾",
"䴕": "鴷",
"䴔": "鵁",
"䴖": "鶄",
"䴗": "鶪",
"䴘": "鷈",
"䴙": "鷿",
"㶉": "鸂",
"鸟": "鳥",
"鸠": "鳩",
"鸢": "鳶",
"鸣": "鳴",
"鸤": "鳲",
"鸥": "鷗",
"鸦": "鴉",
"鸧": "鶬",
"鸨": "鴇",
"鸩": "鴆",
"鸪": "鴣",
"鸫": "鶇",
"鸬": "鸕",
"鸭": "鴨",
"鸮": "鴞",
"鸯": "鴦",
"鸰": "鴒",
"鸱": "鴟",
"鸲": "鴝",
"鸳": "鴛",
"鸴": "鷽",
"鸵": "鴕",
"鸶": "鷥",
"鸷": "鷙",
"鸸": "鴯",
"鸹": "鴰",
"鸺": "鵂",
"鸻": "鴴",
"鸼": "鵃",
"鸽": "鴿",
"鸾": "鸞",
"鸿": "鴻",
"鹀": "鵐",
"鹁": "鵓",
"鹂": "鸝",
"鹃": "鵑",
"鹄": "鵠",
"鹅": "鵝",
"鹆": "鵒",
"鹇": "鷳",
"鹈": "鵜",
"鹉": "鵡",
"鹊": "鵲",
"鹋": "鶓",
"鹌": "鵪",
"鹍": "鵾",
"鹎": "鵯",
"鹏": "鵬",
"鹐": "鵮",
"鹑": "鶉",
"鹒": "鶊",
"鹓": "鵷",
"鹔": "鷫",
"鹕": "鶘",
"鹖": "鶡",
"鹗": "鶚",
"鹘": "鶻",
"鹙": "鶖",
"鹛": "鶥",
"鹜": "鶩",
"鹝": "鷊",
"鹞": "鷂",
"鹟": | |
an event of this type.
event_schema_version = ndb.IntegerProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""RateExplorationEventLogEntryModels are anonymized, and cannot be tied
back to an individual user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_user_id_migration_policy():
"""RateExplorationEventLogEntryModel doesn't have any field with
user ID.
"""
return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE
@classmethod
def get_new_event_entity_id(cls, exp_id, user_id):
"""Generates entity ID for a new rate exploration event based on its
exploration_id and user_id of the learner.
Args:
exp_id: str. ID of the exploration currently being played.
user_id: str. ID of the user.
Returns:
str. New unique ID for this entity class.
"""
timestamp = datetime.datetime.utcnow()
return cls.get_new_id('%s:%s:%s' % (
utils.get_time_in_millisecs(timestamp),
exp_id,
user_id))
@classmethod
def create(cls, exp_id, user_id, rating, old_rating):
"""Creates a new rate exploration event and then writes it to the
datastore.
Args:
exp_id: str. ID of the exploration currently being played.
user_id: str. ID of the user.
rating: int. Value of rating assigned to exploration.
old_rating: int or None. Will be None if the user rates an
exploration for the first time.
"""
entity_id = cls.get_new_event_entity_id(
exp_id, user_id)
cls(
id=entity_id,
event_type=feconf.EVENT_TYPE_RATE_EXPLORATION,
exploration_id=exp_id,
rating=rating,
old_rating=old_rating,
event_schema_version=feconf.CURRENT_EVENT_MODELS_SCHEMA_VERSION
).put()
@staticmethod
def get_export_policy():
"""Model does not contain user data."""
return base_models.EXPORT_POLICY.NOT_APPLICABLE
class StateHitEventLogEntryModel(base_models.BaseModel):
"""An event triggered by a student getting to a particular state. The
definitions of the fields are as follows:
- event_type: 'state_hit'.
- exploration_id: ID of exploration currently being played.
- exploration_version: Version of exploration.
- state_name: Name of current state.
- play_type: 'normal'.
- event_schema_version: 1.
- session_id: ID of current student's session.
- params: Current parameter values, in the form of a map of parameter name
to its value.
NOTE TO DEVELOPERS: Unlike other events, this event does not have a
client_time_spent_in_secs. Instead, it is the reference event for
all other client_time_spent_in_secs values, which each represent the
amount of time between this event (i.e., the learner entering the
state) and the other event.
"""
# Which specific type of event this is.
event_type = ndb.StringProperty(indexed=True)
# Id of exploration currently being played.
exploration_id = ndb.StringProperty(indexed=True)
# Current version of exploration.
exploration_version = ndb.IntegerProperty(indexed=True)
# Name of current state.
state_name = ndb.StringProperty(indexed=True)
# ID of current student's session.
session_id = ndb.StringProperty(indexed=True)
# Current parameter values, map of parameter name to value.
params = ndb.JsonProperty(indexed=False)
# Which type of play-through this is (editor preview, or learner view).
# Note that the 'playtest' option is legacy, since editor preview
# playthroughs no longer emit events.
play_type = ndb.StringProperty(indexed=True,
choices=[feconf.PLAY_TYPE_PLAYTEST,
feconf.PLAY_TYPE_NORMAL])
# The version of the event schema used to describe an event of this type.
event_schema_version = ndb.IntegerProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""StateHitEventLogEntryModels are anonymized, and cannot be tied back
to an individual user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_user_id_migration_policy():
"""StateHitEventLogEntryModel doesn't have any field with user ID."""
return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE
@classmethod
def get_new_event_entity_id(cls, exp_id, session_id):
"""Generates entity ID for a new event based on its
exploration and session ID.
Args:
exp_id: str. ID of the exploration currently being played.
session_id: str. ID of current student's session.
Returns:
str. New unique ID for this entity class.
"""
timestamp = datetime.datetime.utcnow()
return cls.get_new_id('%s:%s:%s' % (
utils.get_time_in_millisecs(timestamp),
exp_id,
session_id))
@classmethod
def create(
cls, exp_id, exp_version, state_name, session_id, params,
play_type):
"""Creates a new state hit event entity and then writes
it to the datastore.
Args:
exp_id: str. ID of the exploration currently being played.
exp_version: int. Version of exploration.
state_name: str. Name of current state.
session_id: str. ID of current student's session.
params: dict. Current parameter values, map of parameter name
to value.
play_type: str. Type of play-through.
Returns:
str. The ID of the entity.
"""
# TODO(sll): Some events currently do not have an entity ID that was
# set using this method; it was randomly set instead due to an error.
# Might need to migrate them.
entity_id = cls.get_new_event_entity_id(exp_id, session_id)
state_event_entity = cls(
id=entity_id,
event_type=feconf.EVENT_TYPE_STATE_HIT,
exploration_id=exp_id,
exploration_version=exp_version,
state_name=state_name,
session_id=session_id,
params=params,
play_type=play_type,
event_schema_version=feconf.CURRENT_EVENT_MODELS_SCHEMA_VERSION)
state_event_entity.put()
return entity_id
@staticmethod
def get_export_policy():
"""Model does not contain user data."""
return base_models.EXPORT_POLICY.NOT_APPLICABLE
class StateCompleteEventLogEntryModel(base_models.BaseModel):
"""An event triggered by a student completing a state."""
# Id of exploration currently being played.
exp_id = ndb.StringProperty(indexed=True)
# Current version of exploration.
exp_version = ndb.IntegerProperty(indexed=True)
# Name of current state.
state_name = ndb.StringProperty(indexed=True)
# ID of current student's session.
session_id = ndb.StringProperty(indexed=True)
# Time since start of this state before this event occurred (in sec).
time_spent_in_state_secs = ndb.FloatProperty()
# The version of the event schema used to describe an event of this type.
event_schema_version = ndb.IntegerProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""StateCompleteEventLogEntryModels are anonymized, and cannot be tied
back to an individual user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_user_id_migration_policy():
"""StateCompleteEventLogEntryModel doesn't have any field with
user ID.
"""
return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE
@classmethod
def get_new_event_entity_id(cls, exp_id, session_id):
"""Generates a unique id for the event model of the form
'[timestamp]:[exp_id]:[session_id]'.
"""
timestamp = datetime.datetime.utcnow()
return cls.get_new_id('%s:%s:%s' % (
utils.get_time_in_millisecs(timestamp),
exp_id,
session_id))
@classmethod
def create(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
"""Creates a new state complete event."""
entity_id = cls.get_new_event_entity_id(
exp_id, session_id)
state_finish_event_entity = cls(
id=entity_id,
exp_id=exp_id,
exp_version=exp_version,
state_name=state_name,
session_id=session_id,
time_spent_in_state_secs=time_spent_in_state_secs,
event_schema_version=feconf.CURRENT_EVENT_MODELS_SCHEMA_VERSION)
state_finish_event_entity.put()
return entity_id
@staticmethod
def get_export_policy():
"""Model does not contain user data."""
return base_models.EXPORT_POLICY.NOT_APPLICABLE
class LeaveForRefresherExplorationEventLogEntryModel(base_models.BaseModel):
"""An event triggered by a student leaving for a refresher exploration."""
# ID of exploration currently being played.
exp_id = ndb.StringProperty(indexed=True)
# ID of the refresher exploration.
refresher_exp_id = ndb.StringProperty(indexed=True)
# Current version of exploration.
exp_version = ndb.IntegerProperty(indexed=True)
# Name of current state.
state_name = ndb.StringProperty(indexed=True)
# ID of current student's session.
session_id = ndb.StringProperty(indexed=True)
# Time since start of this state before this event occurred (in sec).
time_spent_in_state_secs = ndb.FloatProperty()
# The version of the event schema used to describe an event of this type.
event_schema_version = ndb.IntegerProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""LeaveForRefresherExplorationEventLogEntryModels are anonymized, and
cannot be tied back to an individual user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_user_id_migration_policy():
"""LeaveForRefresherExplorationEventLogEntryModel doesn't have any field
with user ID.
"""
return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE
@classmethod
def get_new_event_entity_id(cls, exp_id, session_id):
"""Generates a unique id for the event model of the form
'[timestamp]:[exp_id]:[session_id]'.
"""
timestamp = datetime.datetime.utcnow()
return cls.get_new_id('%s:%s:%s' % (
utils.get_time_in_millisecs(timestamp),
exp_id,
session_id))
@classmethod
def create(
cls, exp_id, refresher_exp_id, exp_version, state_name,
session_id, time_spent_in_state_secs):
"""Creates a new leave for refresher exploration event."""
entity_id = cls.get_new_event_entity_id(
exp_id, session_id)
leave_for_refresher_exp_entity = cls(
id=entity_id,
exp_id=exp_id,
refresher_exp_id=refresher_exp_id,
exp_version=exp_version,
state_name=state_name,
session_id=session_id,
time_spent_in_state_secs=time_spent_in_state_secs,
event_schema_version=feconf.CURRENT_EVENT_MODELS_SCHEMA_VERSION)
leave_for_refresher_exp_entity.put()
return entity_id
@staticmethod
def get_export_policy():
"""Model does not contain user data."""
return base_models.EXPORT_POLICY.NOT_APPLICABLE
class ExplorationStatsModel(base_models.BaseModel):
"""Model for storing analytics data for an exploration. This model contains
statistics data aggregated from version 1 to the version given in the key.
The ID of instances of this class has the form [exp_id].[exp_version].
"""
# ID of exploration.
exp_id = ndb.StringProperty(indexed=True)
# Version of exploration.
exp_version = ndb.IntegerProperty(indexed=True)
# Number of learners starting the exploration (v1 - data collected before
# Dec 2017).
num_starts_v1 = ndb.IntegerProperty(indexed=True)
num_starts_v2 = ndb.IntegerProperty(indexed=True)
# Number of students who actually attempted the exploration. Only learners
# who spent a minimum time on the exploration are considered to have
# actually started the exploration (v1 - data collected before Dec 2017).
num_actual_starts_v1 = ndb.IntegerProperty(indexed=True)
num_actual_starts_v2 = ndb.IntegerProperty(indexed=True)
# Number of students who completed the exploration (v1 - data collected
# before Dec 2017).
num_completions_v1 = ndb.IntegerProperty(indexed=True)
num_completions_v2 = ndb.IntegerProperty(indexed=True)
# Keyed by state name that describes the analytics for that state.
# {state_name: {
# 'total_answers_count_v1': ...,
# 'total_answers_count_v2': ...,
# 'useful_feedback_count_v1': ...,
# 'useful_feedback_count_v2': ...,
# 'total_hit_count_v1': ...,
# 'total_hit_count_v2': ...,
# 'first_hit_count_v1': ...,
# 'first_hit_count_v2': ...,
# 'num_times_solution_viewed_v2': ...,
# 'num_completions_v1': ...,
# 'num_completions_v2': ...}}
state_stats_mapping = ndb.JsonProperty(indexed=False)
@staticmethod
def get_deletion_policy():
"""ExplorationStatsModels are aggregated and anonymized, and cannot be
tied back to an individual user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_user_id_migration_policy():
"""ExplorationStatsModel doesn't have any field with user ID."""
return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE
@classmethod
def get_entity_id(cls, exp_id, exp_version):
"""Generates an ID for the instance of the form
'[exp_id].[exp_version]'.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
Returns:
str. ID of the new ExplorationStatsModel instance.
"""
return '%s.%s' % | |
#////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
# Name :
# Author : Avi
# Revision : $Revision: #10 $
#
# Copyright 2009-2020 ECMWF.
# This software is licensed under the terms of the Apache Licence version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
# code for testing errors in creation of defs file in python
import os
from ecflow import Day, Date, Meter, Event, Queue, Clock, Variable, Label, Limit, InLimit, \
RepeatDate, RepeatEnumerated, RepeatInteger, RepeatString, \
Task, Family, Suite, Defs, Client, debug_build, Trigger
import ecflow_test_util as Test
def check_day(day):
try:
Day(day)
return True
except RuntimeError:
return False
def check_date(day,month,year):
try:
Date(day,month,year)
return True
except IndexError:
return False
def check_date_str(str_date):
try:
Date( str_date)
return True
except IndexError:
return False
except RuntimeError:
return False
def check_meter(name,min_meter_value,max_meter_value,color_change):
try:
Meter(name,min_meter_value,max_meter_value,color_change)
return True
except IndexError:
return False
except RuntimeError:
return False
def check_queue(name,queue_items):
try:
Queue(name, queue_items)
return True
except IndexError:
return False
except RuntimeError:
return False
def check_event_number_and_name(number,name):
try:
Event(number,name)
return True
except:
return False
def check_event(number):
try:
Event(number)
return True
except RuntimeError:
return False
def check_clock(day_of_month,month,year):
try:
Clock(day_of_month,month,year)
return True
except IndexError:
return False
def check_variable(name,value):
try:
Variable(name,value)
return True
except RuntimeError:
return False
def check_label(name,value):
try:
Label(name,value)
return True
except RuntimeError:
return False
def check_limit(name,int_token):
try:
Limit(name,int_token)
return True
except RuntimeError:
return False
except TypeError:
return False
def check_inlimit(name,path_to_node,int_token):
try:
InLimit(name,path_to_node,int_token)
return True
except RuntimeError:
return False
except TypeError:
return False
def check_repeat_date(name, start, end, step):
try:
RepeatDate(name,start,end,step)
return True
except RuntimeError:
return False
def check_repeat_integer(name, start, end, step):
try:
RepeatInteger(name,start,end,step)
return True
except RuntimeError:
return False
except TypeError:
return False
def check_repeat_enumerated(name, list_of_strings):
try:
RepeatEnumerated(name,list_of_strings)
return True
except RuntimeError:
return False
except TypeError:
return False
def check_repeat_string(name, list_of_strings):
try:
RepeatString(name,list_of_strings)
return True
except RuntimeError:
return False
except TypeError:
return False
def check_node_name(name):
try:
Task(name)
Family(name)
Suite(name)
return True;
except RuntimeError:
return False
def check_defs(path_to_defs):
try:
Defs(path_to_defs)
return True
except RuntimeError:
return False
if __name__ == "__main__":
Test.print_test_start(os.path.basename(__file__))
# Names with leading '.' should not be allowed. Will interfere with triggers
# Empty names not allowed
# Spaces not allowed
invalid_names = [ ".", "", " "," ", "fred doc", "1 "]
# Allow names with leading underscore
valid_names = [ "_", "__", "_._", "1.2", "fred.doc", "_.1"]
assert check_day("monday"), "Expected valid day"
assert check_day("tuesday"), "Expected valid day"
assert check_day("wednesday"), "Expected valid day"
assert check_day("thursday"), "Expected valid day"
assert check_day("friday"), "Expected valid day"
assert check_day("saturday"), "Expected valid day"
assert check_day("sunday"), "Expected valid day"
assert check_day("") == False, "Expected exeception"
assert check_day("sunday1") == False, "Expected exeception"
assert check_day("2") == False, "Expected exeception"
assert check_date(0,1,2010), "Expected valid date"
assert check_date(10,0,2010), "Expected valid date"
assert check_date(10,1,0), "Expected valid date"
assert check_date(0,0,0), "Expected valid date"
assert check_date(40,1,2010) == False, "Expected exception since day > 31"
assert check_date(-10,1,2010) == False, "Expected exception since day >= 0"
assert check_date(1,14,2010) == False, "Expected exception since month > 12"
assert check_date(1,-1,2010) == False, "Expected exception since month >= 0"
assert check_date(1,1,-2) == False, "Expected exception since year >= 0"
assert check_date_str("*.1.2010"), "Expected valid date"
assert check_date_str("10.*.2010"), "Expected valid date"
assert check_date_str("10.1.*"), "Expected valid date"
assert check_date_str("*.*.*"), "Expected valid date"
assert check_date_str("40.1.2010") == False, "Expected exception since day > 31"
assert check_date_str("-10.1.2010") == False, "Expected exception since day >= 0"
assert check_date_str("1.14.2010") == False, "Expected exception since month > 12"
assert check_date_str("1.-1.2010") == False, "Expected exception since month >= 0"
assert check_date_str("1.1.-2") == False, "Expected exception since year >= 0"
# clock do not support wild carding hence we cant use 0 like in Date
assert check_clock(12,1,2010), "Expected valid date"
assert check_clock(10,1,2010), "Expected valid date"
assert check_clock(10,1,1400), "Expected valid date"
assert check_clock(31,12,2010), "Expected valid date"
assert check_clock(40,1,2010) == False, "Expected exception since day > 31"
assert check_clock(-10,1,2010) == False, "Expected exception since day >= 0"
assert check_clock(1,14,2010) == False, "Expected exception since month > 12"
assert check_clock(1,-1,2010) == False, "Expected exception since month >= 0"
assert check_clock(1,1,-2) == False, "Expected exception since year >= 0"
assert check_meter("m",0,100,100), "Expected valid Meter"
assert check_meter("m",0,100,0), "Expected valid Meter"
assert check_meter("m",200,100,100) == False, "Expected exception since min > max"
assert check_meter("m",0,100,-20) == False, "Expected exception since color_change should between min-max"
assert check_meter("m",0,100,200) == False, "Expected exception since color_change should between min-max"
assert check_meter("",0,100,100) == False, "Expected exception since no name specified"
assert check_meter(" ",0,100,100) == False, "Expected Exception cannot have spaces for a name"
assert check_queue("m",["a"]), "Expected valid Queue"
assert check_queue("m",["a","b"]), "Expected valid Queue"
assert check_queue("",["a","b"]) == False, "Expected exception queue name is empty"
assert check_queue(" ",["a","b"]) == False, "Expected Exception cannot have spaces for a name"
assert check_queue(".",["a","b"]) == False, "Expected Exception cannot start name with a ."
assert check_queue("m",[]) == False, "Expected Exception queue items list is empty"
assert check_event(1), "Expected valid Event"
assert check_event(2), "Expected valid Event"
assert check_event_number_and_name(2,"fred"), "Expected valid Event"
assert check_event_number_and_name(2,2) == False, "Expected failure since the name is not a string"
assert check_repeat_date("m",20000101,20001201,200), "Expected valid repeat"
assert check_repeat_date("m",20001201,20000101,200) == False, "Expected exception since end YMD > start YMD"
assert check_repeat_date("m",200001011,20001201,200)== False, "Expected Exception since start is invalid."
assert check_repeat_date("m",20000101,200012013,200)== False, "Expected Exception since send is invalid."
assert check_repeat_date("m",00000000,00000000,200)== False, "Expected Exception since start/end are not valid dates is invalid."
assert check_repeat_date("",20000101,20001201,200)==False, "Expected Exception since no name specified"
assert check_repeat_date(" ",20000101,20001201,200)==False, "Expected Exception cannot have spaces for a name"
assert check_repeat_integer("name",0, 10, 2 ), "Expected valid repeat"
assert check_repeat_integer("",0, 10, 2 )==False, "Expected Exception since no name specified"
assert check_repeat_integer(" ",0, 10, 2 )==False, "Expected Exception cannot have spaces for a name"
assert check_repeat_string("name",[ "a" ]), "Expected valid repeat"
assert check_repeat_string("",["a"] )==False, "Expected Exception since no name specified"
assert check_repeat_string(" ",["a"] )==False, "Expected Exception cannot have spaces for a name"
assert check_repeat_string("name",[ 1,2 ])==False, "Expected Exception since a list of strings was expected"
assert check_repeat_string("name",[])==False, "Expected Exception since list of strings is empty"
assert check_repeat_enumerated("name",[ "a" ]), "Expected valid repeat"
assert check_repeat_enumerated("",["a"] )==False, "Expected Exception since no name specified"
assert check_repeat_enumerated(" ",["a"] )==False, "Expected Exception since no name specified"
assert check_repeat_enumerated("name",[ 1,2 ])==False, "Expected Exception since a list of strings was expected"
assert check_repeat_enumerated("name",[])==False, "Expected Exception since list is empty"
assert check_variable("name","value"), "Expected valid Variable"
assert check_variable("name",""), "Expected valid Variable"
assert check_variable("name"," "), "Expected valid Variable"
assert check_variable("name","12"), "Expected valid Variable"
assert check_variable("","12")==False, "Expected Exception name must be specified"
assert check_variable(" ","12")==False, "Expected Exception cannot have spaces for a name"
assert check_label("name","value"), "Expected valid label"
assert check_label("name",""), "Expected valid label"
assert check_label("name"," "), "Expected valid label"
assert check_label("name","12"), "Expected valid label"
assert check_label("","12")==False, "Expected exception name must be specified"
assert check_label(" ","12")==False, "Expected Exception cannot have spaces for a name"
assert check_limit("name",1), "Expected valid limit"
assert check_limit("name",20000), "Expected valid limit"
assert check_limit("name","ten")==False, "Expected exception, token must be a integer"
assert check_limit("name","2")==False, "Expected exception, token must be a integer"
assert check_limit("","2")==False, "Expected exception, no name specified"
assert check_limit(" ","2")==False, "Expected exception, cannot have spaces for a name"
assert check_inlimit("limit_name","/path/to/limit",1), "Expected valid in limit"
assert check_inlimit("limit_name","/path/to/limit",999999), "Expected valid in limit"
assert check_inlimit("limit_name","",1), "Expected valid in limit"
assert check_inlimit("","",1)==False, "Expected exception, no limit name specified"
assert check_inlimit(" ","",1)==False, "Expected exception, cannot have spaces for a name"
# ========================================================================
print("Check node names")
for i in range(25):
assert check_node_name(str(i)), "Integer names should be allowed"
for name in valid_names:
assert check_node_name(name), "Expected valid name " + name
for name in invalid_names:
assert check_node_name(name)==False, "Expected exception for invalid name " + name
assert check_defs("a_made_up_path_that_doesnt_not_exit.def") == False, "Expected exception, Defs file does not exist"
# =================================================================================
print("test save_as_defs")
defs = Defs() # create a empty definition
s1 | |
"""
Train Class1 pan-allele models.
"""
import argparse
import os
from os.path import join
import signal
import sys
import time
import traceback
import random
import pprint
import hashlib
import pickle
import uuid
from functools import partial
import numpy
import pandas
import yaml
import tqdm # progress bar
tqdm.monitor_interval = 0 # see https://github.com/tqdm/tqdm/issues/481
from .class2_affinity_predictor import Class2AffinityPredictor
from .class2_neural_network import Class2NeuralNetwork
from .common import configure_logging
from .local_parallelism import (
add_local_parallelism_args,
worker_pool_with_gpu_assignments_from_args,
call_wrapped_kwargs)
from .cluster_parallelism import (
add_cluster_parallelism_args,
cluster_results_from_args)
from .allele_encoding import AlleleEncoding
from .encodable_sequences import EncodableSequences
# To avoid pickling large matrices to send to child processes when running in
# parallel, we use this global variable as a place to store data. Data that is
# stored here before creating the thread pool will be inherited to the child
# processes upon fork() call, allowing us to share large data with the workers
# via shared memory.
GLOBAL_DATA = {}
# Note on parallelization:
# It seems essential currently (tensorflow==1.4.1) that no processes are forked
# after tensorflow has been used at all, which includes merely importing
# keras.backend. So we must make sure not to use tensorflow in the main process
# if we are running in parallel.
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--data",
metavar="FILE.csv",
help=(
"Training data CSV. Expected columns: "
"allele, peptide, measurement_value"))
parser.add_argument(
"--pretrain-data",
metavar="FILE.csv",
help=(
"Pre-training data CSV. Expected columns: "
"allele, peptide, measurement_value"))
parser.add_argument(
"--out-models-dir",
metavar="DIR",
required=True,
help="Directory to write models and manifest")
parser.add_argument(
"--hyperparameters",
metavar="FILE.json",
help="JSON or YAML of hyperparameters")
parser.add_argument(
"--held-out-measurements-per-allele-fraction-and-max",
type=float,
metavar="X",
nargs=2,
default=[0.25, 100],
help="Fraction of measurements per allele to hold out, and maximum number")
parser.add_argument(
"--ignore-inequalities",
action="store_true",
default=False,
help="Do not use affinity value inequalities even when present in data")
parser.add_argument(
"--num-folds",
type=int,
default=4,
metavar="N",
help="Number of training folds.")
parser.add_argument(
"--num-replicates",
type=int,
metavar="N",
default=1,
help="Number of replicates per (architecture, fold) pair to train.")
parser.add_argument(
"--max-epochs",
type=int,
metavar="N",
help="Max training epochs. If specified here it overrides any 'max_epochs' "
"specified in the hyperparameters.")
parser.add_argument(
"--allele-sequences",
metavar="FILE.csv",
help="Allele sequences file.")
parser.add_argument(
"--verbosity",
type=int,
help="Keras verbosity. Default: %(default)s",
default=0)
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Launch python debugger on error")
parser.add_argument(
"--continue-incomplete",
action="store_true",
default=False,
help="Continue training models from an incomplete training run. If this is "
"specified then the only required argument is --out-models-dir")
parser.add_argument(
"--only-initialize",
action="store_true",
default=False,
help="Do not actually train models. The initialized run can be continued "
"later with --continue-incomplete.")
add_local_parallelism_args(parser)
add_cluster_parallelism_args(parser)
def assign_folds(df, num_folds, held_out_fraction, held_out_max):
"""
Split training data into multple test/train pairs, which we refer to as
folds. Note that a given data point may be assigned to multiple test or
train sets; these folds are NOT a non-overlapping partition as used in cross
validation.
A fold is defined by a boolean value for each data point, indicating whether
it is included in the training data for that fold. If it's not in the
training data, then it's in the test data.
Folds are balanced in terms of allele content.
Parameters
----------
df : pandas.DataFrame
training data
num_folds : int
held_out_fraction : float
Fraction of data to hold out as test data in each fold
held_out_max
For a given allele, do not hold out more than held_out_max number of
data points in any fold.
Returns
-------
pandas.DataFrame
index is same as df.index, columns are "fold_0", ... "fold_N" giving
whether the data point is in the training data for the fold
"""
result_df = pandas.DataFrame(index=df.index)
for fold in range(num_folds):
result_df["fold_%d" % fold] = True
for (allele, sub_df) in df.groupby("allele"):
medians = sub_df.groupby("peptide").measurement_value.median()
low_peptides = medians[medians < medians.median()].index.values
high_peptides = medians[medians >= medians.median()].index.values
held_out_count = int(
min(len(medians) * held_out_fraction, held_out_max))
held_out_peptides = set()
if held_out_count == 0:
pass
elif held_out_count < 2:
held_out_peptides = set(
medians.index.to_series().sample(n=held_out_count))
else:
held_out_low_count = min(
len(low_peptides),
int(held_out_count / 2))
held_out_high_count = min(
len(high_peptides),
held_out_count - held_out_low_count)
held_out_low = pandas.Series(low_peptides).sample(
n=held_out_low_count) if held_out_low_count else set()
held_out_high = pandas.Series(high_peptides).sample(
n=held_out_high_count) if held_out_high_count else set()
held_out_peptides = set(held_out_low).union(set(held_out_high))
result_df.loc[
sub_df.index[sub_df.peptide.isin(held_out_peptides)],
"fold_%d" % fold
] = False
print("Training points per fold")
print(result_df.sum())
print("Test points per fold")
print((~result_df).sum())
return result_df
def pretrain_data_iterator(
filename,
master_allele_encoding,
peptides_per_chunk=1024):
"""
Step through a CSV file giving predictions for a large number of peptides
(rows) and alleles (columns).
Parameters
----------
filename : string
master_allele_encoding : AlleleEncoding
peptides_per_chunk : int
Returns
-------
Generator of (AlleleEncoding, EncodableSequences, float affinities) tuples
"""
empty = pandas.read_csv(filename, index_col=0, nrows=0)
usable_alleles = [
c for c in empty.columns
if c in master_allele_encoding.allele_to_sequence
]
print("Using %d / %d alleles" % (len(usable_alleles), len(empty.columns)))
print("Skipped alleles: ", [
c for c in empty.columns
if c not in master_allele_encoding.allele_to_sequence
])
allele_encoding = AlleleEncoding(
numpy.tile(usable_alleles, peptides_per_chunk),
borrow_from=master_allele_encoding)
while True:
synthetic_iter = pandas.read_csv(
filename, index_col=0, chunksize=peptides_per_chunk)
for (k, df) in enumerate(synthetic_iter):
if len(df) != peptides_per_chunk:
continue
df = df[usable_alleles]
encodable_peptides = EncodableSequences(
numpy.repeat(
df.index.values,
len(usable_alleles)))
yield (allele_encoding, encodable_peptides, df.stack().values)
def run(argv=sys.argv[1:]):
# On sigusr1 print stack trace
print("To show stack trace, run:\nkill -s USR1 %d" % os.getpid())
signal.signal(signal.SIGUSR1, lambda sig, frame: traceback.print_stack())
args = parser.parse_args(argv)
if args.debug:
try:
return main(args)
except Exception as e:
print(e)
import ipdb # pylint: disable=import-error
ipdb.set_trace()
raise
else:
return main(args)
def main(args):
print("Arguments:")
print(args)
args.out_models_dir = os.path.abspath(args.out_models_dir)
configure_logging(verbose=args.verbosity > 1)
if not args.continue_incomplete:
initialize_training(args)
if not args.only_initialize:
train_models(args)
def initialize_training(args):
required_arguments = [
"data",
"out_models_dir",
"hyperparameters",
"num_folds",
]
for arg in required_arguments:
if getattr(args, arg) is None:
parser.error("Missing required arg: %s" % arg)
print("Initializing training.")
hyperparameters_lst = yaml.load(open(args.hyperparameters))
assert isinstance(hyperparameters_lst, list)
print("Loaded hyperparameters list:")
pprint.pprint(hyperparameters_lst)
allele_sequences = pandas.read_csv(
args.allele_sequences, index_col=0).iloc[:,0]
df = pandas.read_csv(args.data)
print("Loaded training data: %s" % (str(df.shape)))
df = df.loc[
(df.peptide.str.len() >= 8) & (df.peptide.str.len() <= 15)
]
print("Subselected to 8-15mers: %s" % (str(df.shape)))
df = df.loc[~df.measurement_value.isnull()]
print("Dropped NaNs: %s" % (str(df.shape)))
df = df.loc[df.allele.isin(allele_sequences.index)]
print("Subselected to alleles with sequences: %s" % (str(df.shape)))
print("Data inequalities:")
print(df.measurement_inequality.value_counts())
if args.ignore_inequalities and "measurement_inequality" in df.columns:
print("Dropping measurement_inequality column")
del df["measurement_inequality"]
# Allele names in data are assumed to be already normalized.
print("Training data: %s" % (str(df.shape)))
(held_out_fraction, held_out_max) = (
args.held_out_measurements_per_allele_fraction_and_max)
folds_df = assign_folds(
df=df,
num_folds=args.num_folds,
held_out_fraction=held_out_fraction,
held_out_max=held_out_max)
allele_sequences_in_use = allele_sequences[
allele_sequences.index.isin(df.allele)
]
print("Will use %d / %d allele sequences" % (
len(allele_sequences_in_use), len(allele_sequences)))
# All alleles, not just those with training data.
full_allele_encoding = AlleleEncoding(
alleles=allele_sequences.index.values,
allele_to_sequence=allele_sequences.to_dict()
)
# Only alleles with training data. For efficiency we perform model training
# using only these alleles in the neural network embedding layer.
allele_encoding = AlleleEncoding(
alleles=allele_sequences_in_use.index.values,
allele_to_sequence=allele_sequences_in_use.to_dict())
if not os.path.exists(args.out_models_dir):
print("Attempting to create directory: %s" % args.out_models_dir)
os.mkdir(args.out_models_dir)
print("Done.")
predictor = Class1AffinityPredictor(
allele_to_sequence=allele_encoding.allele_to_sequence,
metadata_dataframes={
'train_data': pandas.merge(
df,
folds_df,
left_index=True,
right_index=True)
})
work_items = []
for (h, hyperparameters) in enumerate(hyperparameters_lst):
if 'n_models' in hyperparameters:
raise ValueError("n_models is unsupported")
if args.max_epochs:
hyperparameters['max_epochs'] = args.max_epochs
if hyperparameters.get("train_data", {}).get("pretrain", False):
if not args.pretrain_data:
raise ValueError("--pretrain-data is required")
for fold in range(args.num_folds):
for replicate in range(args.num_replicates):
work_dict = {
'work_item_name': str(uuid.uuid4()),
'architecture_num': h,
'num_architectures': len(hyperparameters_lst),
'fold_num': fold,
'num_folds': args.num_folds,
'replicate_num': replicate,
'num_replicates': args.num_replicates,
'hyperparameters': hyperparameters,
'pretrain_data_filename': args.pretrain_data,
}
work_items.append(work_dict)
training_init_info = {}
training_init_info["train_data"] = df
training_init_info["folds_df"] = folds_df
training_init_info["allele_encoding"] = allele_encoding
training_init_info["full_allele_encoding"] = full_allele_encoding
training_init_info["work_items"] = work_items
# Save empty predictor (for metadata)
predictor.save(args.out_models_dir)
# Write training_init_info.
with open(join(args.out_models_dir, "training_init_info.pkl"), "wb") as fd:
pickle.dump(training_init_info, fd, protocol=pickle.HIGHEST_PROTOCOL)
print("Done initializing training.")
def train_models(args):
global GLOBAL_DATA
print("Beginning training.")
predictor = Class1AffinityPredictor.load(
args.out_models_dir, optimization_level=0)
print("Loaded predictor with %d networks" % len(predictor.neural_networks))
with open(join(args.out_models_dir, "training_init_info.pkl"), "rb") as fd:
GLOBAL_DATA.update(pickle.load(fd))
print("Loaded training init info.")
all_work_items = GLOBAL_DATA["work_items"]
complete_work_item_names = [
network.fit_info[-1]["training_info"]["work_item_name"] for network in
predictor.neural_networks
]
work_items = [
item for item in all_work_items
if item["work_item_name"] not in complete_work_item_names
]
print("Found %d work items, of which %d are incomplete and will run now." % (
len(all_work_items), len(work_items)))
serial_run = not args.cluster_parallelism and args.num_jobs == 0
# The estimated time to completion is more accurate if we randomize
# the order of the work.
random.shuffle(work_items)
for (work_item_num, item) in enumerate(work_items):
item['work_item_num'] = work_item_num
item['num_work_items'] = len(work_items)
item['progress_print_interval'] = 60.0 if not serial_run else 5.0
item['predictor'] = predictor if serial_run else None
item['save_to'] = args.out_models_dir if serial_run else None
item['verbose'] = args.verbosity
if args.pretrain_data:
item['pretrain_data_filename'] = args.pretrain_data
start = time.time()
worker_pool = None
if serial_run:
# Run in serial. Every worker is passed the same predictor,
# which it adds models to, so no merging is required. It also saves
# as it goes so no saving | |
import utils
import torch
import numpy as np
from torch import nn
import torchgeometry
from kornia import color
import torch.nn.functional as F
from time import time
from torchvision.transforms import RandomResizedCrop
class Dense(nn.Module):
def __init__(self, in_features, out_features, activation='relu'):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.linear = nn.Linear(in_features, out_features)
self.IN = nn.InstanceNorm1d(self.out_features)
nn.init.kaiming_normal_(self.linear.weight)
def forward(self, inputs):
outputs = self.linear(inputs)
outputs = outputs.unsqueeze(1)
outputs = self.IN(outputs)
outputs = outputs.squeeze(1)
if self.activation is not None:
if self.activation == 'relu':
outputs = nn.ReLU(inplace=True)(outputs)
elif self.activation == 'tanh':
outputs = nn.Tanh()(outputs)
elif self.activation == 'sigmoid':
outputs = nn.Sigmoid()(outputs)
else:
raise NotImplementedError
return outputs
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, activation='relu', strides=1, pad=None):
super(Conv2D, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.activation = activation
self.strides = strides
if pad is None:
self.pad = int((kernel_size - 1) / 2)
else:
self.pad = pad
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, strides, self.pad)
self.IN = nn.InstanceNorm2d(self.out_channels)
# default: using he_normal as the kernel initializer
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, inputs):
outputs = self.conv(inputs)
outputs = self.IN(outputs)
if self.activation is not None:
if self.activation == 'relu':
outputs = nn.ReLU(inplace=True)(outputs)
elif self.activation == 'tanh':
outputs = nn.Tanh()(outputs)
elif self.activation == 'sigmoid':
outputs = nn.Sigmoid()(outputs)
else:
raise NotImplementedError
return outputs
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, input):
return input.view(input.size(0), -1)
class StegaStampEncoder(nn.Module):
def __init__(self):
super(StegaStampEncoder, self).__init__()
self.secret_dense = Dense(100, 7500, activation='relu')
self.conv1 = Conv2D(6, 32, 3, activation='relu')
self.conv2 = Conv2D(32, 32, 3, activation='relu', strides=2)
self.conv3 = Conv2D(32, 64, 3, activation='relu', strides=2)
self.conv4 = Conv2D(64, 128, 3, activation='relu', strides=2)
self.conv5 = Conv2D(128, 256, 3, activation='relu', strides=2)
self.up6 = Conv2D(256, 128, 3, activation='relu')
self.conv6 = Conv2D(256, 128, 3, activation='relu')
self.up7 = Conv2D(128, 64, 3, activation='relu')
self.conv7 = Conv2D(128, 64, 3, activation='relu')
self.up8 = Conv2D(64, 32, 3, activation='relu')
self.conv8 = Conv2D(64, 32, 3, activation='relu')
self.up9 = Conv2D(32, 32, 3, activation='relu')
self.conv9 = Conv2D(70, 32, 3, activation='relu')
self.residual = Conv2D(32, 3, 1, activation=None)
def forward(self, inputs):
secret, image = inputs
secret = secret - .5
image = image - .5
secret = self.secret_dense(secret)
secret = secret.reshape(-1, 3, 50, 50)
secret_enlarged = nn.Upsample(scale_factor=(8, 8))(secret)
inputs = torch.cat([secret_enlarged, image], dim=1)
conv1 = self.conv1(inputs)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
up6 = self.up6(nn.Upsample(scale_factor=(2, 2))(conv5))
merge6 = torch.cat([conv4, up6], dim=1)
conv6 = self.conv6(merge6)
up7 = self.up7(nn.Upsample(scale_factor=(2, 2))(conv6))
merge7 = torch.cat([conv3, up7], dim=1)
conv7 = self.conv7(merge7)
up8 = self.up8(nn.Upsample(scale_factor=(2, 2))(conv7))
merge8 = torch.cat([conv2, up8], dim=1)
conv8 = self.conv8(merge8)
up9 = self.up9(nn.Upsample(scale_factor=(2, 2))(conv8))
merge9 = torch.cat([conv1, up9, inputs], dim=1)
conv9 = self.conv9(merge9)
residual = self.residual(conv9)
return residual
class SpatialTransformerNetwork(nn.Module):
def __init__(self):
super(SpatialTransformerNetwork, self).__init__()
self.localization = nn.Sequential(
Conv2D(3, 32, 3, strides=2, activation='relu'),
Conv2D(32, 64, 3, strides=2, activation='relu'),
Conv2D(64, 128, 3, strides=2, activation='relu'),
Flatten(),
Dense(320000, 128, activation='relu'),
nn.Linear(128, 6)
)
self.localization[-1].weight.data.fill_(0)
self.localization[-1].bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0])
def forward(self, image):
theta = self.localization(image)
theta = theta.view(-1, 2, 3)
grid = F.affine_grid(theta, image.size(), align_corners=False)
transformed_image = F.grid_sample(image, grid, align_corners=False)
return transformed_image
class StegaStampDecoder(nn.Module):
def __init__(self, secret_size=100):
super(StegaStampDecoder, self).__init__()
self.secret_size = secret_size
self.stn = SpatialTransformerNetwork()
self.decoder = nn.Sequential(
Conv2D(3, 32, 3, strides=2, activation='relu'),
Conv2D(32, 32, 3, activation='relu'),
Conv2D(32, 64, 3, strides=2, activation='relu'),
Conv2D(64, 64, 3, activation='relu'),
Conv2D(64, 64, 3, strides=2, activation='relu'),
Conv2D(64, 128, 3, strides=2, activation='relu'),
Conv2D(128, 128, 3, strides=2, activation='relu'),
Flatten(),
Dense(21632, 512, activation='relu'),
Dense(512, secret_size, activation='sigmoid'))
def forward(self, image):
image = image - .5
transformed_image = self.stn(image)
return self.decoder(transformed_image)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
Conv2D(3, 8, 3, strides=2, activation='relu'),
Conv2D(8, 16, 3, strides=2, activation='relu'),
Conv2D(16, 32, 3, strides=2, activation='relu'),
Conv2D(32, 64, 3, strides=2, activation='relu'),
Conv2D(64, 1, 3, activation=None))
def forward(self, image):
x = image - .5
x = self.model(x)
output = torch.mean(x)
return output, x
def transform_net(encoded_image, args, global_step):
sh = encoded_image.size()
ramp_fn = lambda ramp: np.min([global_step / ramp, 1.])
rnd_bri = ramp_fn(args.rnd_bri_ramp) * args.rnd_bri
rnd_hue = ramp_fn(args.rnd_hue_ramp) * args.rnd_hue
rnd_brightness = utils.get_rnd_brightness_torch(rnd_bri, rnd_hue, args.batch_size) # [batch_size, 3, 1, 1]
jpeg_quality = 100. - torch.rand(1)[0] * ramp_fn(args.jpeg_quality_ramp) * (100. - args.jpeg_quality)
rnd_noise = torch.rand(1)[0] * ramp_fn(args.rnd_noise_ramp) * args.rnd_noise
contrast_low = 1. - (1. - args.contrast_low) * ramp_fn(args.contrast_ramp)
contrast_high = 1. + (args.contrast_high - 1.) * ramp_fn(args.contrast_ramp)
contrast_params = [contrast_low, contrast_high]
rnd_sat = torch.rand(1)[0] * ramp_fn(args.rnd_sat_ramp) * args.rnd_sat
# blur
N_blur = 7
f = utils.random_blur_kernel(probs=[.25, .25], N_blur=N_blur, sigrange_gauss=[1., 3.], sigrange_line=[.25, 1.],
wmin_line=3)
if args.cuda:
f = f.cuda()
encoded_image = F.conv2d(encoded_image, f, bias=None, padding=int((N_blur - 1) / 2))
# noise
noise = torch.normal(mean=0, std=rnd_noise, size=encoded_image.size(), dtype=torch.float32)
if args.cuda:
noise = noise.cuda()
encoded_image = encoded_image + noise
encoded_image = torch.clamp(encoded_image, 0, 1)
# contrast & brightness
contrast_scale = torch.Tensor(encoded_image.size()[0]).uniform_(contrast_params[0], contrast_params[1])
contrast_scale = contrast_scale.reshape(encoded_image.size()[0], 1, 1, 1)
if args.cuda:
contrast_scale = contrast_scale.cuda()
rnd_brightness = rnd_brightness.cuda()
encoded_image = encoded_image * contrast_scale
encoded_image = encoded_image + rnd_brightness
encoded_image = torch.clamp(encoded_image, 0, 1)
# saturation
sat_weight = torch.FloatTensor([.3, .6, .1]).reshape(1, 3, 1, 1)
if args.cuda:
sat_weight = sat_weight.cuda()
encoded_image_lum = torch.mean(encoded_image * sat_weight, dim=1).unsqueeze_(1)
encoded_image = (1 - rnd_sat) * encoded_image + rnd_sat * encoded_image_lum
# jpeg
encoded_image = encoded_image.reshape([-1, 3, 400, 400])
if not args.no_jpeg:
encoded_image = utils.jpeg_compress_decompress(encoded_image, rounding=utils.round_only_at_0,
quality=jpeg_quality)
crop_scale = 1 - 0.4 * ramp_fn(2e4)
cropper = RandomResizedCrop((400, 400), (crop_scale, 1.))
encoded_image = cropper(encoded_image)
return encoded_image
def get_secret_acc(secret_true, secret_pred):
if 'cuda' in str(secret_pred.device):
secret_pred = secret_pred.cpu()
secret_true = secret_true.cpu()
secret_pred = torch.round(secret_pred)
correct_pred = torch.sum((secret_pred - secret_true) == 0, dim=1)
str_acc = 1.0 - torch.sum((correct_pred - secret_pred.size()[1]) != 0).numpy() / correct_pred.size()[0]
bit_acc = torch.sum(correct_pred).numpy() / secret_pred.numel()
return bit_acc, str_acc
class LossCombine(nn.Module):
def __init__(self, initial):
super(LossCombine, self).__init__()
self.weight = nn.Parameter(torch.tensor(initial, dtype=torch.float32), requires_grad=True)
def forward(self, losses):
positive_weight = F.relu(self.weight)
num = self.weight.shape[-1]
loss_combine = torch.log(positive_weight + 1e-6).sum()
for i in range(num):
loss_combine += losses[i] / positive_weight[i]
return loss_combine
def build_model(encoder, decoder, discriminator, loss_combine, lpips_fn, secret_input, image_input, l2_edge_gain,
M, loss_scales, yuv_scales, args, global_step, writer):
input_warped = torchgeometry.warp_perspective(image_input, M[:, 1, :, :], dsize=(400, 400), flags='bilinear')
mask_warped = torchgeometry.warp_perspective(torch.ones_like(input_warped), M[:, 1, :, :], dsize=(400, 400),
flags='bilinear')
input_warped += (1 - mask_warped) * image_input
residual_warped = encoder((secret_input, input_warped))
image_rate = 5 * np.min([global_step / 2e4, 1.])
encoded_warped = residual_warped + (input_warped - 0.5)
encoded_warped = nn.Sigmoid()(encoded_warped)
mask = torchgeometry.warp_perspective(torch.ones_like(encoded_warped), M[:, 0, :, :], dsize=(400, 400),
flags='bilinear')
encoded_image = torchgeometry.warp_perspective(encoded_warped, M[:, 0, :, :], dsize=(400, 400),
flags='bilinear')
encoded_image += (1 - mask) * image_input
borders = args.borders
if borders == 'no_edge':
D_output_real, _ = discriminator(image_input)
D_output_fake, D_heatmap = discriminator(encoded_image)
else:
D_output_real, _ = discriminator(input_warped)
D_output_fake, D_heatmap = discriminator(encoded_warped)
transformed_image = transform_net(encoded_image, args, global_step)
decoded_secret = decoder(transformed_image)
bit_acc, str_acc = get_secret_acc(secret_input, decoded_secret)
normalized_input = image_input * 2 - 1
normalized_encoded = encoded_image * 2 - 1
lpips_loss = torch.mean(lpips_fn(normalized_input, normalized_encoded))
cross_entropy = nn.BCELoss()
if args.cuda:
cross_entropy = cross_entropy.cuda()
secret_loss = cross_entropy(decoded_secret, secret_input)
'''
size = (int(image_input.shape[2]), int(image_input.shape[3]))
gain = 10
falloff_speed = 4
falloff_im = np.ones(size)
for i in range(int(falloff_im.shape[0] / falloff_speed)): # for i in range 100
falloff_im[-i, :] *= (np.cos(4 * np.pi * i / size[0] + np.pi) + 1) / 2 # [cos[(4*pi*i/400)+pi] + 1]/2
falloff_im[i, :] *= (np.cos(4 * np.pi * i / size[0] + np.pi) + 1) / 2 # [cos[(4*pi*i/400)+pi] + 1]/2
for j in range(int(falloff_im.shape[1] / falloff_speed)):
falloff_im[:, -j] *= (np.cos(4 * np.pi * j / size[0] + np.pi) + 1) / 2
falloff_im[:, j] *= (np.cos(4 * np.pi * j / size[0] + np.pi) + 1) / 2
falloff_im = 1 - falloff_im
falloff_im = torch.from_numpy(falloff_im).float()
if args.cuda:
falloff_im = falloff_im.cuda()
falloff_im *= l2_edge_gain
'''
encoded_image_yuv = color.rgb_to_yuv(encoded_image)
image_input_yuv = color.rgb_to_yuv(image_input)
im_diff = encoded_image_yuv - image_input_yuv
# im_diff += im_diff * falloff_im.unsqueeze_(0)
yuv_loss = torch.mean((im_diff) ** 2, axis=[0, 2, 3])
yuv_scales = torch.Tensor(yuv_scales)
if args.cuda:
yuv_scales = yuv_scales.cuda()
image_loss = torch.dot(yuv_loss, yuv_scales)
D_loss = D_output_real - D_output_fake
G_loss = D_output_fake
if args.no_gan:
loss = loss_combine([secret_loss, image_loss, lpips_loss])
else:
loss = loss_combine([secret_loss, image_loss, lpips_loss, G_loss])
writer.add_scalar('loss/image_loss', image_loss, global_step)
writer.add_scalar('loss/lpips_loss', lpips_loss, global_step)
writer.add_scalar('loss/secret_loss', secret_loss, global_step)
writer.add_scalar('loss/G_loss', G_loss, global_step)
writer.add_scalar('loss/secret_weight', loss_combine.weight[0], global_step)
writer.add_scalar('loss/image_weight', loss_combine.weight[1], global_step)
writer.add_scalar('loss/lpips_weight', loss_combine.weight[2], global_step)
writer.add_scalar('residual/max', residual_warped.max(), global_step)
writer.add_scalar('residual/min', residual_warped.min(), global_step)
writer.add_scalar('metric/bit_acc', bit_acc, global_step)
writer.add_scalar('metric/str_acc', str_acc, global_step)
if global_step % 100 == 0:
'''
writer.add_image('input/image_input', image_input[0], global_step)
| |
<filename>tests/unit/test_parser.py
from pprint import pprint
import pytest
def test_select_star(parser):
tree = parser.parse('SELECT * from bar')
assert tree.query_type == "SELECT"
assert tree.table == 'bar'
assert tree.expressions == [
(
('*', None),
None
)
]
def test_select_table_wild(parser):
tree = parser.parse('SELECT bar.* from bar')
assert tree.query_type == "SELECT"
assert tree.table == 'bar'
assert tree.expressions == [
(
('*', 'bar'),
None
)
]
@pytest.mark.parametrize('query', [
'SELECT VERSION()',
'select version()'
])
def test_select_version(query, parser):
tree = parser.parse(query)
assert tree.query_type == "SELECT"
assert tree.table is None
pprint(tree.expressions)
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'VERSION')
)
)
)
),
None
)
]
def test_select_version_table_wild(parser):
tree = parser.parse('SELECT VERSION(), bar.* FROM bar')
assert tree.query_type == "SELECT"
assert tree.table == 'bar'
pprint(tree.expressions)
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'VERSION')
)
)
)
),
None
),
(
('*', 'bar'),
None
)
]
@pytest.mark.parametrize('query,table,fields', [
(
"""CREATE TABLE `django_migrations` (
`id` INTEGER AUTO_INCREMENT NOT NULL PRIMARY KEY,
`app` VARCHAR(255) NOT NULL,
`name` VARCHAR(255) NOT NULL,
`applied` DATETIME NOT NULL)""",
'django_migrations',
{
'id': {
'type': 'INTEGER',
'options': {
'auto_increment': True,
'nullable': False,
'primary': True
}
},
'app': {
'type': 'VARCHAR',
'options': {
'nullable': False
}
},
'name': {
'type': 'VARCHAR',
'options': {
'nullable': False
}
},
'applied': {
'type': 'DATETIME',
'options': {
'nullable': False
}
}
}
),
(
'CREATE TABLE `auth_group_permissions` ('
'`id` INTEGER AUTO_INCREMENT NOT NULL PRIMARY KEY, '
'`group_id` INTEGER NOT NULL, '
'`permission_id` INTEGER NOT NULL)',
'auth_group_permissions',
{
'id': {
'type': 'INTEGER',
'options': {
'auto_increment': True,
'nullable': False,
'primary': True
}
},
'group_id': {
'type': 'INTEGER',
'options': {
'nullable': False
}
},
'permission_id': {
'type': 'INTEGER',
'options': {
'nullable': False
}
}
}
),
(
'CREATE TABLE `t1` ('
'`id` INT)',
't1',
{
'id': {
'type': 'INT',
'options': {
'nullable': True
}
}
}
),
(
'CREATE TABLE t2 ('
'`id` INT)',
't2',
{
'id': {
'type': 'INT',
'options': {
'nullable': True
}
}
}
),
(
'CREATE TABLE 1t ('
'`id` INT)',
'1t',
{
'id': {
'type': 'INT',
'options': {
'nullable': True
}
}
}
),
(
'CREATE TABLE 1t1 ('
'`id` INT)',
'1t1',
{
'id': {
'type': 'INT',
'options': {
'nullable': True
}
}
}
),
(
"""
CREATE TABLE `django_admin_log` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`action_time` datetime NOT NULL,
`object_id` longtext NULL,
`object_repr` varchar(200) NOT NULL,
`action_flag` smallint UNSIGNED NOT NULL,
`change_message` longtext NOT NULL,
`content_type_id` integer NULL,
`user_id` integer NOT NULL)
""",
'django_admin_log',
{
'id': {
'type': 'INTEGER',
'options': {
'auto_increment': True,
'nullable': False,
'primary': True
}
},
'action_time': {
'type': 'DATETIME',
'options': {
'nullable': False,
}
},
'object_id': {
'type': 'LONGTEXT',
'options': {
'nullable': True,
}
},
'object_repr': {
'type': 'VARCHAR',
'options': {
'nullable': False,
}
},
'action_flag': {
'type': 'SMALLINT',
'options': {
'nullable': False
}
},
'change_message': {
'type': 'LONGTEXT',
'options': {
'nullable': False
}
},
'content_type_id': {
'type': 'INTEGER',
'options': {
'nullable': True
}
},
'user_id': {
'type': 'INTEGER',
'options': {
'nullable': False
}
}
}
),
(
"""
CREATE TABLE `auth_user` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`password` varchar(128) NOT NULL,
`last_login` datetime(6) NOT NULL,
`is_superuser` bool NOT NULL,
`username` varchar(30) NOT NULL UNIQUE,
`first_name` varchar(30) NOT NULL,
`last_name` varchar(30) NOT NULL,
`email` varchar(75) NOT NULL,
`is_staff` bool NOT NULL,
`is_active` bool NOT NULL,
`date_joined` datetime(6) NOT NULL)
""",
'auth_user',
{
'id': {
'type': 'INTEGER',
'options': {
'auto_increment': True,
'nullable': False,
'primary': True
}
},
'password': {
'type': 'VARCHAR',
'options': {
'nullable': False,
}
},
'last_login': {
'type': 'DATETIME',
'options': {
'nullable': False,
}
},
'is_superuser': {
'type': 'BOOL',
'options': {
'nullable': False,
}
},
'username': {
'type': 'VARCHAR',
'options': {
'unique': True,
'nullable': False
}
},
'first_name': {
'type': 'VARCHAR',
'options': {
'nullable': False,
}
},
'last_name': {
'type': 'VARCHAR',
'options': {
'nullable': False,
}
},
'email': {
'type': 'VARCHAR',
'options': {
'nullable': False,
}
},
'is_staff': {
'type': 'BOOL',
'options': {
'nullable': False,
}
},
'is_active': {
'type': 'BOOL',
'options': {
'nullable': False,
}
},
'date_joined': {
'type': 'DATETIME',
'options': {
'nullable': False,
}
}
}
),
(
"CREATE TABLE `auth_group` ("
"`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, "
"`name` varchar(80) NOT NULL UNIQUE)",
'auth_group',
{
'id': {
'type': 'INTEGER',
'options': {
'auto_increment': True,
'nullable': False,
'primary': True
}
},
'name': {
'type': 'VARCHAR',
'options': {
'unique': True,
'nullable': False
}
}
}
)
])
def test_create_table(query, table, fields, parser):
tree = parser.parse(query)
assert tree.query_type == "CREATE_TABLE"
assert tree.table == table
pprint(tree.fields)
assert tree.fields == fields
def test_create_database(parser):
query = "CREATE DATABASE `foo`"
tree = parser.parse(query)
assert tree.query_type == "CREATE_DATABASE"
assert tree.db == 'foo'
def test_show_databases(parser):
query = "SHOW DATABASES"
tree = parser.parse(query)
assert tree.query_type == "SHOW_DATABASES"
def test_show_tables(parser):
query = "SHOW TABLES"
tree = parser.parse(query)
assert tree.query_type == "SHOW_TABLES"
assert not tree.options['full']
assert tree.success
def test_show_full_tables(parser):
query = "SHOW FULL TABLES"
tree = parser.parse(query)
assert tree.query_type == "SHOW_TABLES"
assert tree.options['full']
assert tree.success
def test_use_database(parser):
query = "USE `foo`"
tree = parser.parse(query)
assert tree.query_type == "USE_DATABASE"
assert tree.db == 'foo'
def test_create_table_int(parser):
tree = parser.parse('CREATE TABLE t(id INT)')
assert tree.success
def test_select_fields_from(parser):
query = """SELECT `django_migrations`.`app`, `django_migrations`.`name` FROM `django_migrations`"""
tree = parser.parse(query)
assert tree.success
assert tree.query_type == "SELECT"
assert tree.table == "django_migrations"
pprint(tree.expressions)
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'django_migrations.app')
)
)
)
),
None),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'django_migrations.name')
)
)
)
),
None
)
]
@pytest.mark.parametrize('query', [
"SELECT app, foo FROM `django_migrations`",
"SELECT `app`, `foo` FROM `django_migrations`"
])
def test_select_short_fields_from(query, parser):
tree = parser.parse(query)
assert tree.success
assert tree.query_type == "SELECT"
assert tree.table == "django_migrations"
print(tree.expressions)
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'app')
)
)
)
),
None),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'foo')
)
)
)
),
None
)
]
def test_select_two_func(parser):
query = "SELECT VERSION(), VERSION()"
tree = parser.parse(query)
assert tree.success
assert tree.query_type == "SELECT"
print(tree.expressions)
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'VERSION')
)
)
)
),
None
),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'VERSION')
)
)
)
),
None
)
]
def test_select_var(parser):
query = "SELECT @@sql_mode"
tree = parser.parse(query)
assert tree.success
assert tree.query_type == "SELECT"
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('variable', 'sql_mode')
)
)
)
),
None
)
]
def test_select_var_SQL_AUTO_IS_NULL(parser):
query = "SELECT @@SQL_AUTO_IS_NULL"
tree = parser.parse(query)
assert tree.success
assert tree.query_type == "SELECT"
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('variable', 'SQL_AUTO_IS_NULL')
)
)
)
),
None
)
]
def test_commit(parser):
query = "COMMIT"
tree = parser.parse(query)
assert tree.success
assert tree.query_type == "COMMIT"
def test_select_from_tbl(parser):
query = """SELECT f1 FROM t1"""
tree = parser.parse(query)
assert tree.success
assert tree.table == 't1'
assert tree.db is None
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'f1')
)
)
)
),
None
),
]
def test_select_from_db_tbl(parser):
query = """SELECT f1, f2 FROM d1.t1"""
tree = parser.parse(query)
assert tree.success
assert tree.table == 't1'
assert tree.db == 'd1'
assert tree.expressions == [
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'f1')
)
)
)
),
None
),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'f2')
)
)
)
),
None
),
]
@pytest.mark.parametrize('query,table,expressions,where', [
(
"SELECT f1, f2 FROM t1 WHERE f1 = 'foo'",
't1',
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'f1')
)
)
)
),
None
),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'f2')
)
)
)
),
None
),
],
('bool_primary',
('=',
('predicate', ('bit_expr', ('simple_expr', ('IDENTIFIER', 'f1')))),
('bit_expr', ('simple_expr', ('literal', 'foo')))))
),
(
"""
SELECT `id`,
`app_label`,
`model`
FROM `django_content_type`
WHERE `model` = 'logentry'
AND `app_label` = 'admin'
""",
'django_content_type',
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'id')
)
)
)
),
None
),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'app_label')
)
)
)
),
None
),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'model')
)
)
)
),
None
),
],
('AND',
('bool_primary',
('=',
('predicate', ('bit_expr', ('simple_expr', ('IDENTIFIER', 'model')))),
('bit_expr', ('simple_expr', ('literal', 'logentry'))))),
('bool_primary',
('=',
('predicate', ('bit_expr', ('simple_expr', ('IDENTIFIER', 'app_label')))),
('bit_expr', ('simple_expr', ('literal', 'admin'))))))
),
(
"""
SELECT `django_content_type`.`id`,
`django_content_type`.`app_label`,
`django_content_type`.`model`
FROM `django_content_type`
WHERE `django_content_type`.`model` = 'logentry'
AND | |
== 2 :
s += 'bi'
l[elem] = s
# if mutants :
else :
for elem in lumen_list[:, 0] :
if elem in border_set :
if elem in wild_list :
l[elem] = 'TEmi'
elif elem in mutants_list :
l[elem] = 'mutantsmi'
else :
l[elem] = 'wild_mutantsmi'
else :
l[elem] = 'ICMmi'
return l
def gen_folders(nfold, name='config', abs_path = '~/cavitation/network/') :
"""
gen_folders(nfold, name='config', abs_path = '~/cavitation/outputs/')
Generate folders for simulations
Inputs
------
nfold : int
Number of folders to generate
name : str, optional, default : config
Name of the folders
abs_path : str, path, default : ~/cavitation/network/
Absolute path for the folders.
Returns
-------
list_dir : list
List of the directories created
"""
list_dir = []
for n in range(nfold) :
s = os.path.join(abs_path, name+ str(n).zfill(4))
list_dir += [s]
if not os.path.isdir(s) :
os.mkdir(s)
os.mkdir(os.path.join(s, 'network'))
return list_dir
def area_distribution(vertices, vol_avg, vol_std, threshold = 0.1) :
"""
volume_distribution(conversion_list, vol_avg, vol_std, threshold = 0.1)
Parameters
----------
vertices : list
List of the vertex of the network
vol_avg : float
Average volume
vol_std : float
Average volume
threshold : float, optional, default : 0.1
Minimal area allowed for the distribution
Returns
-------
area_list : dict
Dictionnary of areas, indexed by corresponding vertex index.
"""
# Area distribution
threshold = 0.1
area_list = {}
for elem in vertices :
p = np.random.normal(loc=vol_avg, scale=vol_std)
while p <= threshold :
p = np.random.normal(loc=vol_avg, scale=vol_std)
area_list[elem] = p
return area_list
def write_init(init_config, pathname) :
"""
write_init(init_config, pathname)
Write into the init_config file the path of the folder where to run the simulation
Inputs
------
init_config : str, path
Init configuration file, usually filename.ini
pathname : str, path
Path of the folder where to run the simulation.
"""
config = configparser.ConfigParser()
config.read(init_config)
config.set('network', 'path', pathname)
with open(init_config, 'wb') as configfile :
config.write(configfile)
def write_lumen_coord(folder, lumen_list, lumen_positions) :
"""
write_lumen_coord(folder, lumen_list, lumen_positions)
Write the coordinates of the lumens in file 'lumen_coord.dat'. The line index in the file is the vertex index.
Inputs
------
folder : str, path
Folder where to store the file.
lumen_list : array or list
Array of vertices of the graph, namely the lumens.
lumen_positions : array or list
Array of the coordinates of the lumens.
Returns
-------
0 if there is no problem.
Structure
---------
pos_x pos_y
0. 0.
1.4 3.5
... ...
"""
filename = 'lumen_coord.dat'
pos_x, pos_y = lumen_positions[lumen_list[:]][:,0], lumen_positions[lumen_list[:]][:,1]
fi = open(os.path.join(folder, filename), 'w')
fi.write('# # coordinates of each lumen, row represents the ID of the lumen \n')
fi.write('# # coordinates in this model do not change over time, empty lumen are included\n')
fi.write('# # x y\n')
for i in range(len(pos_x)) :
fi.write(str(pos_x[i]) + '\t' + str(pos_y[i]) + '\n' )
fi.close()
return 0
def write_lumen(folder, lumen_list, gamma_list, gamma_c_list, border_list, area_list, lum_type) :
"""
write_lumen(folder, lumen_list, gamma_list, gamma_c_list, border_list, area_list, lum_type)
Write the lumen.dat file
Parameters
----------
folder : str, path
Folder where to store the file.
lumen_list : array or list
Array of vertices of the graph, namely the lumens.
gamma_list : list
List of the tension gamma associated with each vertex
gamma_c_list : list
List of the (adhesion) tension gamma_c associated with each vertex
border_list : list
List of the vertex belonging to the borders of the graph.
area_list : list
List of the initial area of each vertex.
lum_type : list
List of the type of each vertex (0 : ICM-multi, 1 : TE-multi, 2 : ICM-bi, 3 : TE-multi, ...)
Returns
-------
1 if no problem.
Structure
---------
gamma1 gamma2 gamma_c area boundary
1 1. 0.4 3 0
1 1. 0.4 2.3 1
"""
def flag(type_lum) :
if type_lum == 'ICMmi' :
return 0
elif type_lum == 'TEmi' :
return 1
elif type_lum == 'ICMbi' :
return 2
elif type_lum == 'TEbi' :
return 3
elif type_lum == 'mutantsmi' :
return 4
elif type_lum == 'wild_mutantsmi' :
return 5
elif type_lum == 'mutantsbi' :
return 6
filename = 'lumen.dat'
fi = open(os.path.join(folder, filename), 'w')
e = np.sort(list(lumen_list))
for i in range(len(e)) :
fi.write(str(gamma_list[e[i]]) + '\t' + str(gamma_list[e[i]]) + '\t' + str(gamma_c_list[e[i]]) + '\t' + str(area_list[e[i]]) + '\t' + str(flag(lum_type[i])) + '\n')
fi.close()
return 1
def write_lumen_lumen(folder, edge_list, R_list) :
"""
write_lumen_lumen(folder, edge_list, R_list)
Write the lumen_lumen.dat file
Parameters
----------
folder : str, path
Folder where to store the file.
edge_list : array or list
Array of the edges of the graph.
R_list : list
List of the hydraulic resistance of each edge (no friction for the moment)
Returns
-------
2 if no problem.
NB : the file is such that for edge = [ID1, ID2], ID1 < ID2. Moreover, from one line to the next,
the ID1 is in growing order.
Structure
---------
ID1 ID2 distance
0 1 1.0
0 4 4.
1 2 1.0
1 4 3.
2 3 4.5
3 4 3.1
"""
filename = 'lumen_lumen.dat'
fi = open(os.path.join(folder, filename), 'w')
fi.write('# # triangle with lumen in the center\n')
fi.write('# # containes the connections between lumen, in the structure of a graph \n')
fi.write('# # lumen have an ID starting from 0, sorted such that, smallest on top and ID1 < ID2 \n')
fi.write('# # ID1 ID2 distance\n')
for i in range(len(edge_list)) :
fi.write(str(edge_list[i][0]) + '\t' + str(edge_list[i][1]) + '\t' + str(R_list[i]) + '\n' )
fi.close()
return 2
def write_bridge_lumen(folder, edge_list, bridge_list, R_list) :
"""
write_bridge_lumen(folder, edge_list, bridge_list, R_list)
Write the bridge_lumen.dat file
Parameters
----------
folder : str, path
Folder where to store the file.
edge_list : array or list
Array of the edges of the graph.
bridge_list : array or list
Array of the bridges of the graph.
R_list : list
List of the hydraulic resistance of each edge (no friction for the moment)
Returns
-------
3 if no problem.
NB : the file is such that for edge = [ID1, ID2], ID1 < ID2. Moreover, from one line to the next,
the ID1 is in growing order.
NB : the ID1 is ALWAYS a bridge
NB : the indices of the bridges start from 0 when created.
Structure
---------
ID1 ID2 distance
0 1 1.0
0 4 4.
1 2 1.0
1 4 3.
2 3 4.5
3 4 3.1
"""
filename = 'bridge_lumen.dat'
fi = open(os.path.join(folder, filename), 'w')
fi.write('# # triangle with lumen in the center\n')
fi.write('# # containes the connections between lumen and bridges (empty lumen), in the structure of a graph\n')
fi.write('# # lumen and bridges have an ID starting from 0; there are lumen and bridges with the same ID, sorted by the ID of the bridge\n')
fi.write('# # bridge lumen distance\n')
fi.close()
return 3
def write_bridge_bridge(folder, bridge_list, R_list) :
"""
write_bridge_bridge(folder, bridge_list, R_list)
Write the bridge_bridge.dat file
Parameters
----------
folder : str, path
Folder where to store the file.
bridge_list : array or list
Array of the bridges of the graph.
bridge_list : array or list
Array of the bridges of the graph.
R_list : list
List of the hydraulic resistance of each edge (no friction for the moment)
Returns
-------
4 if no problem.
NB : the file is such that for edge = [ID1, ID2], ID1 < ID2. Moreover, from one line to the next,
the ID1 is in growing order.
NB : the indices of the bridges start from 0 when created.
See the correspondance with preexisting vertices in conversion.dat
Structure
---------
ID1 ID2 distance
0 1 1.0
0 4 4.
1 2 1.0
1 4 3.
2 3 4.5
3 4 3.1
"""
filename = 'bridge_bridge.dat'
fi = open(os.path.join(folder, filename), 'w')
fi.write('# # triangle | |
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/swissborg",
"youtube": ""
}
},
"NMR": {
"symbol": "NMR",
"address": "0x1776e1F26f98b1A5dF9cD347953a26dd3Cb46671",
"decimals": 18,
"name": "Numerai",
"ens_address": "",
"website": "https://numer.ai",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/numerai",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/numerai",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/numerai",
"youtube": ""
}
},
"VIBE": {
"symbol": "VIBE",
"address": "0xe8Ff5C9c75dEb346acAc493C463C8950Be03Dfba",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://www.vibehub.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/vibehubvr",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/VibeHub",
"slack": "https://join.slack.com/t/vibehub/shared_invite/<KEY>",
"telegram": "",
"twitter": "https://twitter.com/VibeHubVR",
"youtube": ""
}
},
"KEE": {
"symbol": "KEE",
"address": "0x72D32ac1c5E66BfC5b08806271f8eEF915545164",
"decimals": 0,
"name": "CryptoKEE",
"ens_address": "",
"website": "",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"MRV": {
"symbol": "MRV",
"address": "0xAB6CF87a50F17d7F5E1FEaf81B6fE9FfBe8EBF84",
"decimals": 18,
"name": "MRV",
"ens_address": "",
"website": "https://macroverse.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "https://matrix.to/#/#macroverse:matrix.org",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"BIT": {
"symbol": "BIT",
"name": "BlockEstate Investment Token",
"type": "ERC20",
"address": "0x089B85FA15f72c1088CBbef23a49DB80B91DD521",
"ens_address": "",
"decimals": 8,
"website": "https://www.blockestate.net",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/blockestate",
"chat": "",
"facebook": "https://www.facebook.com/BlockEstate",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/block-estate/",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/block_estate",
"youtube": ""
}
},
"MRL": {
"symbol": "MRL",
"address": "0x82125AFe01819Dff1535D0D6276d57045291B6c0",
"decimals": 18,
"name": "Marcelo",
"ens_address": "",
"website": "https://moneyrebel.io/",
"logo": {
"src": "https://static.wixstatic.com/media/3e9a6a_15e519bd6672449182b4c3c557e49660~mv2.jpg/v1/fill/w_110,h_108,al_c,q_80,usm_0.66_1.00_0.01/3e9a6a_15e519bd6672449182b4c3c557e49660~mv2.webp",
"width": "86",
"height": "86",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://t.me/joinchat/IGyNLA9UybLEpfDZYX5xpQ"
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/marcelomining/",
"forum": "",
"github": "https://github.com/MarceloMRL",
"gitter": "",
"instagram": "https://www.instagram.com/marcelo.mrl/",
"linkedin": "https://www.linkedin.com/company/marcelo/",
"reddit": "",
"slack": "",
"telegram": "https://t.me/joinchat/IGyNLA9UybLEpfDZYX5xp",
"twitter": "https://twitter.com/Mrl_io",
"youtube": ""
}
},
"IG": {
"symbol": "IG",
"name": "IGToken",
"type": "ERC20",
"address": "0x8a88f04e0c905054D2F33b26BB3A46D7091A039A",
"ens_address": "",
"decimals": 18,
"website": "http://igtoken.net",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/IGToken_net",
"youtube": ""
}
},
"Hdp": {
"symbol": "Hdp",
"address": "0xE9fF07809CCff05daE74990e25831d0Bc5cbe575",
"decimals": 18,
"name": "HEdpAY",
"ens_address": "",
"website": "http://hedpay.com",
"logo": {
"src": "http://hedpay.com/content/images/systemCustom/o5VT92nyPRvA7E5j7ij265rHsezBdwnk04bXYqoY0OTsUF4IzFEIubdyfRlkLcDH_28x28.png?version=4.7.1&width=809&height=509",
"width": 28,
"height": 28,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "<EMAIL>"
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/hedpayltd",
"forum": "https://bitcointalk.org/index.php/HedPay",
"github": "https://github.com/HEDPAY",
"gitter": "",
"instagram": "https://www.instagram.com/myhedpay",
"linkedin": "https://www.linkedin.com/company/hedpay-ltd",
"reddit": "https://www.reddit.com/user/HEdpAY",
"slack": "https://hedpay.slack.com",
"telegram": "https://t.me/joinchat/GfkzpkPhHOM6kFZnhGbu2Q",
"twitter": "https://twitter.com/MyHEdpAY",
"youtube": ""
}
},
"DRVH": {
"symbol": "DRVH",
"address": "0x62D4c04644314F35868Ba4c65cc27a77681dE7a9",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://driveholic.com/",
"logo": {
"src": "https://airdrop.driveholic.com/icon/apple-icon-180x180.png",
"width": "180",
"height": "180",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://airdrop.driveholic.com/"
},
"social": {
"blog": "https://medium.com/@driveholicsite",
"chat": "",
"facebook": "",
"forum": "https://bitcointalk.org/index.php?topic=4613024",
"github": "https://github.com/TeamDriveholic",
"gitter": "",
"instagram": "https://www.instagram.com/driveholicsite/",
"linkedin": "",
"reddit": "https://www.reddit.com/r/driveholic/",
"slack": "",
"telegram": "https://t.me/driveholicairdrop",
"twitter": "https://twitter.com/driveholic",
"youtube": ""
}
},
"AIX": {
"symbol": "AIX",
"address": "0x1063ce524265d5a3A624f4914acd573dD89ce988",
"decimals": 18,
"name": "Aigang",
"ens_address": "",
"website": "https://aigang.network/",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/aigang-network",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/AigangNetwork",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/aigangnetwork",
"twitter": "https://twitter.com/aigangnetwork",
"youtube": ""
}
},
"DCA": {
"symbol": "DCA",
"address": "0x386Faa4703a34a7Fdb19Bec2e14Fd427C9638416",
"decimals": 18,
"name": "DoBetAcceptBet",
"ens_address": "",
"website": "http://www.dobetacceptbet.com",
"logo": {
"src": "https://cdn1.savepice.ru/uploads/2018/2/13/8c1a008b4e617ec75c4febe81f32ced5-full.png",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/betcoin.dobetacceptbet",
"forum": "",
"github": "https://github.com/dobetacceptbet",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/user/doBETacceptBET/",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/i/notifications",
"youtube": ""
}
},
"NET": {
"symbol": "NET",
"address": "0xcfb98637bcae43C13323EAa1731cED2B716962fD",
"decimals": 18,
"name": "NIMIQ",
"ens_address": "",
"website": "https://nimiq.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "https://nimiq-slackin.herokuapp.com",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"KC": {
"symbol": "KC",
"address": "0x0D6DD9f68d24EC1d5fE2174f3EC8DAB52B52BaF5",
"decimals": 18,
"name": "KMCC",
"ens_address": "",
"website": "https://www.kmcc.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"FNTB": {
"symbol": "FNTB",
"name": "Fintab",
"type": "ERC20",
"address": "0xbD4B60a138b3fce3584EA01f50c0908c18f9677A",
"ens_address": "",
"decimals": 8,
"website": "https://fintab.io/ico",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://steemit.com/@fintab",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://reddit.com/r/FinTab",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/fintab_io",
"youtube": ""
}
},
"BHPC": {
"symbol": "BHPC",
"name": "BHPCash",
"type": "ERC20",
"address": "0xEE74110fB5A1007b06282e0DE5d73A61bf41d9Cd",
"ens_address": "",
"decimals": 18,
"website": "https://bhpcash.io/bhpc/index.html",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://bhpcash.io/bhpc/community.html",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/bhpfinance",
"youtube": ""
}
},
"XES": {
"symbol": "XES",
"name": "Proxeus",
"type": "ERC20",
"address": "0xA017ac5faC5941f95010b12570B812C974469c2C",
"ens_address": "",
"decimals": 18,
"website": "https://proxeus.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@proxeusapp_4423",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/proxeus",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/proxeusapp",
"youtube": ""
}
},
"DUBI": {
"symbol": "DUBI",
"address": "0xD4CffeeF10F60eCA581b5E1146B5Aca4194a4C3b",
"decimals": 18,
"name": "Decentralized Universal Basic Income",
"ens_address": "",
"website": "https://prps.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/nionis/purpose",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/PRPS",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/prps_io",
"youtube": ""
}
},
"BEE": {
"symbol": "BEE",
"address": "0x4D8fc1453a0F359e99c9675954e656D80d996FbF",
"decimals": 18,
"name": "Bee Token",
"ens_address": "",
"website": "https://www.beetoken.com",
"logo": {
"src": "https://etherscan.io/token/images/beetoken_28.png",
"width": 28,
"height": 28,
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@thebeetoken",
"chat": "",
"facebook": "https://www.facebook.com/thebeetoken",
"forum": "",
"github": "https://github.com/thebeetoken",
"gitter": "",
"instagram": "https://www.instagram.com/thebeetoken",
"linkedin": "",
"reddit": "https://www.reddit.com/r/beetoken",
"slack": "",
"telegram": "https://t.me/beetoken",
"twitter": "https://twitter.com/thebeetoken",
"youtube": ""
}
},
"BTU": {
"symbol": "BTU",
"name": "BTU Protocol",
"type": "ERC20",
"address": "0xb683D83a532e2Cb7DFa5275eED3698436371cc9f",
"ens_address": "",
"decimals": 18,
"website": "https://btu-protocol.com",
"logo": {
"src": "https://btu-protocol.com/fr/favicon/favicon-32x32.png",
"width": 32,
"height": 32,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "btu-protocol.com"
},
"social": {
"blog": "https://medium.com/@BTUProtocolTeam/latest",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/btuprotocol",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/btu-protocol/",
"reddit": "",
"slack": "",
"telegram": "https://t.me/btucommunity",
"twitter": "https://twitter.com/BtuProtocol?lang=en",
"youtube": "https://www.youtube.com/channel/UC4TU0cH82u0kLeEomf26Z0g"
}
},
"OTN": {
"symbol": "OTN",
"name": "Open Trading Network",
"type": "ERC20",
"address": "0x881Ef48211982D01E2CB7092C915E647Cd40D85C",
"ens_address": "",
"decimals": 18,
"website": "https://otn.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/open_trading_network",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/otncoin",
"youtube": ""
}
},
"EMON": | |
<filename>URMC_CTSI_openbadge_analysis/Preprocessing.py
#For Hublog, import the following packages
from __future__ import absolute_import, division, print_function
import pandas as pd
import re
#for core, import the following packages
import crc16
#for raw, import the following packages
import json
import os
import datetime
#for metadata, import the following packages
import pandas as pd
import json
import io
#for proximity, import the following packages
import pandas as pd
import json
import collections
#from core import mac_address_to_id
def mac_address_to_id(mac):
"""Converts a MAC address to an id used by the badges for the proximity pings.
"""
# convert hex to bytes and reverse
macstr = mac.replace(':', '').decode('hex')[::-1]
crc = crc16.crc16xmodem("b"+macstr,0xFFFF)
return crc
#from .raw import split_raw_data_by_day
def split_raw_data_by_day(fileobject, target, kind, log_version=None):
"""Splits the data from a raw data file into a single file for each day.
Parameters
----------
fileobject : object, supporting tell, readline, seek, and iteration.
The raw data to be split, for instance, a file object open in read mode.
target : str
The directory into which the files will be written. This directory must
already exist.
kind : str
The kind of data being extracted, either 'audio' or 'proximity'.
log_version : str
The log version, in case no metadata is present.
"""
# The days fileobjects
# It's a mapping from iso dates (e.g. '2017-07-29') to fileobjects
days = {}
# Extract log version from metadata, if present
log_version = extract_log_version(fileobject) or log_version
if log_version not in ('1.0', '2.0'):
raise Exception('file log version was not set and cannot be identified')
if log_version in ('1.0'):
raise Exception('file version '+str(log_version)+'is no longer supported')
# Read each line
for line in fileobject:
data = json.loads(line)
# Keep only relevant data
if not data['type'] == kind + ' received':
continue
# Extract the day from the timestamp
day = datetime.date.fromtimestamp(data['data']['timestamp']).isoformat()
# If no fileobject exists for that day, create one
if day not in days:
days[day] = open(os.path.join(target, day), 'a')
# Write the data to the corresponding day file
json.dump(data, days[day])
days[day].write('\n')
# Free the memory
for f in days.values():
f.close()
#from .metadata import id_to_member_mapping
def id_to_member_mapping(mapper, time_bins_size='1min', tz='US/Eastern', fill_gaps=True):
"""Creates a pd.Series mapping member numeric IDs to the string
member key associated with them.
If the 'mapper' provided is a DataFrame, assumes it's metadata and that ID's
do not change mapping throughout the project, and proceeds to create a
Series with only a member index.
If the 'mapper' provided is a file object, assumes the old version of id_map
and creates a Series with a datetime and member index.
Parameters
----------
fileobject : file object
A file to read to determine the mapping.
members_metadata : pd.DataFrame
Metadata dataframe, as downloaded from the server, to map IDs to keys.
Returns
-------
pd.Series :
The ID to member key mapping.
"""
if isinstance(mapper, io.BufferedIOBase) | isinstance(mapper, io.IOBase):
idmap = legacy_id_to_member_mapping(mapper, time_bins_size=time_bins_size, tz=tz, fill_gaps=fill_gaps)
print(type(mapper))
return idmap
elif isinstance(mapper, pd.DataFrame):
idmap = {row.member_id: row.member for row in mapper.itertuples()}
return pd.DataFrame.from_dict(idmap, orient='index')[0].rename('member')
else:
raise ValueError("You must provide either a fileobject or metadata dataframe as the mapper.")
# from .metadata import legacy_id_to_member_mapping
def legacy_id_to_member_mapping(fileobject, time_bins_size='1min', tz='US/Eastern', fill_gaps=True):
"""Creates a mapping from badge id to member, for each time bin, from proximity data file.
Depending on the version of the logfile (and it's content), it will either use the member_id
field to generate the mapping (newer version), or calculate an ID form the MAC address (this
was the default behavior of the older version of the hubs and badges)
Parameters
----------
fileobject : file or iterable list of str
The proximity data, as an iterable of JSON strings.
time_bins_size : str
The size of the time bins used for resampling. Defaults to '1min'.
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
fill_gaps : boolean
If True, the code will ensure that a value exists for every time by by filling the gaps
with the last seen value
Returns
-------
pd.Series :
A mapping from badge id to member, indexed by datetime and id.
"""
def readfile(fileobject):
no_id_warning = False
for line in fileobject:
data = json.loads(line)['data']
member_id = None
if 'member_id' in data:
member_id = data['member_id']
else:
member_id = mac_address_to_id(data['badge_address'])
if not no_id_warning:
print("Warning - no id provided in data. Calculating id from MAC address")
no_id_warning = True
yield (data['timestamp'],
member_id,
str(data['member']))
df = pd.DataFrame(readfile(fileobject), columns=['timestamp', 'id', 'member'])
# Convert the timestamp to a datetime, localized in UTC
df['datetime'] = pd.to_datetime(df['timestamp'], unit='s', utc=True) \
.dt.tz_convert(tz)
del df['timestamp']
# Group by id and resample
df = df.groupby([
pd.Grouper(freq = time_bins_size, key='datetime'),
'id'
]).first()
# Extract series
s = df.sort_index()['member']
# Fill in gaps, if requested to do so
if fill_gaps:
s = _id_to_member_mapping_fill_gaps(s, time_bins_size=time_bins_size)
return s
# from .metadata import voltages
def voltages(fileobject, time_bins_size='1min', tz='US/Eastern', skip_errors=False):
"""Creates a DataFrame of voltages, for each member and time bin.
Parameters
----------
fileobject : file or iterable list of str
The proximity data, as an iterable of JSON strings.
time_bins_size : str
The size of the time bins used for resampling. Defaults to '1min'.
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
skip_errors : boolean
If set to True, skip errors in the data file
Returns
-------
pd.Series :
Voltages, indexed by datetime and member.
"""
def readfile(fileobject, skip_errors):
i = 0
for line in fileobject:
i = i + 1
try:
data = json.loads(line)['data']
yield (data['timestamp'],
str(data['member']),
float(data['voltage']))
except:
print("Error in line#:", i, line)
if skip_errors:
continue
else:
raise
df = pd.DataFrame(readfile(fileobject, skip_errors), columns=['timestamp', 'member', 'voltage'])
# Convert the timestamp to a datetime, localized in UTC
df['datetime'] = pd.to_datetime(df['timestamp'], unit='s', utc=True) \
.dt.tz_convert(tz)
del df['timestamp']
# Group by id and resample
df = df.groupby([
pd.TimeGrouper(time_bins_size, key='datetime'),
'member'
]).mean()
df.sort_index(inplace=True)
return df['voltage']
# from .metadata import sample_counts
def sample_counts(fileobject, tz='US/Eastern', keep_type=False, skip_errors=False):
"""Creates a DataFrame of sample counts, for each member and raw record
Parameters
----------
fileobject : file or iterable list of str
The proximity or audio data, as an iterable of JSON strings.
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
keep_type : boolean
If set to True, the type of the record will be returned as well
skip_errors : boolean
If set to True, skip errors in the data file
Returns
-------
pd.Series :
Counts, indexed by datetime, type and member.
"""
def readfile(fileobject, skip_errors=False):
i = 0
for line in fileobject:
i = i + 1
try:
raw_data = json.loads(line)
data = raw_data['data']
type = raw_data['type']
if type == 'proximity received':
cnt = len(data['rssi_distances'])
elif type == 'audio received':
cnt = len(data['samples'])
else:
cnt = -1
yield (data['timestamp'],
str(type),
str(data['member']),
int(cnt))
except:
print("Error in line#:", i, line)
if skip_errors:
continue
else:
raise
df = pd.DataFrame(readfile(fileobject, skip_errors), columns=['timestamp' ,'type', 'member',
'cnt'])
# Convert the timestamp to a datetime, localized in UTC
df['datetime'] = pd.to_datetime(df['timestamp'], unit='s', utc=True) \
.dt.tz_convert(tz)
del df['timestamp']
if keep_type:
df.set_index(['datetime','type','member'],inplace=True)
else:
del df['type']
df.set_index(['datetime', 'member'], inplace=True)
df.sort_index(inplace=True)
return df
def _id_to_member_mapping_fill_gaps(idmap, time_bins_size='1min'):
""" Fill gaps in a idmap
Parameters
----------
idmap : id mapping object
time_bins_size : str
The size of the time bins used for resampling. Defaults to '1min'.
Returns
-------
pd.DataFrame :
idmap, after filling gaps.
"""
df = idmap.to_frame().reset_index()
df.set_index('datetime', inplace=True)
#df.index = pd.to_datetime(df.index,unit='s')
s = df.groupby(['id'])['member'].resample(time_bins_size).fillna(method='ffill')
s = s.reorder_levels((1,0)).sort_index()
return s
# from .proximity import member_to_badge_proximity
def member_to_badge_proximity(fileobject, time_bins_size='1min', tz='US/Eastern'):
"""Creates a member-to-badge proximity DataFrame from a proximity data file.
Parameters
----------
fileobject : file or iterable list of str
The proximity data, as an iterable of JSON strings.
time_bins_size : str
The size of the time bins used for resampling. Defaults to '1min'.
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
Returns
-------
pd.DataFrame :
The member-to-badge proximity data.
"""
def readfile(fileobject):
for line in fileobject:
data = json.loads(line)['data']
for (observed_id, distance) in data['rssi_distances'].items():
yield (
data['timestamp'],
str(data['member']),
int(observed_id),
float(distance['rssi']),
| |
<filename>SIR2_DNN.py
"""
@author: LXA
Benchmark Code of SIR model
2020-11-13
"""
import os
import sys
import tensorflow as tf
import numpy as np
import time
import platform
import shutil
import DNN_base
import DNN_tools
import DNN_data
import plotData
import saveData
# 记录字典中的一些设置
def dictionary_out2file(R_dic, log_fileout):
DNN_tools.log_string('Equation name for problem: %s\n' % (R_dic['eqs_name']), log_fileout)
DNN_tools.log_string('Network model of solving problem: %s\n' % str(R_dic['model']), log_fileout)
DNN_tools.log_string('activate function: %s\n' % str(R_dic['act_name']), log_fileout)
DNN_tools.log_string('hidden layers: %s\n' % str(R_dic['hidden_layers']), log_fileout)
DNN_tools.log_string('Init learning rate: %s\n' % str(R_dic['learning_rate']), log_fileout)
DNN_tools.log_string('Decay to learning rate: %s\n' % str(R_dic['lr_decay']), log_fileout)
DNN_tools.log_string('The type for Loss function: %s\n' % str(R_dic['loss_function']), log_fileout)
if (R_dic['optimizer_name']).title() == 'Adam':
DNN_tools.log_string('optimizer:%s\n' % str(R_dic['optimizer_name']), log_fileout)
else:
DNN_tools.log_string('optimizer:%s with momentum=%f\n' % (R_dic['optimizer_name'], R_dic['momentum']), log_fileout)
if R_dic['activate_stop'] != 0:
DNN_tools.log_string('activate the stop_step and given_step= %s\n' % str(R_dic['max_epoch']), log_fileout)
else:
DNN_tools.log_string('no activate the stop_step and given_step = default: %s\n' % str(R_dic['max_epoch']), log_fileout)
DNN_tools.log_string(
'Initial penalty for difference of predict and true: %s\n' % str(R_dic['init_penalty2predict_true']),
log_fileout)
DNN_tools.log_string('The model of regular weights and biases: %s\n' % str(R_dic['regular_weight_model']), log_fileout)
DNN_tools.log_string('Regularization parameter for weights and biases: %s\n' % str(R_dic['regular_weight']),
log_fileout)
DNN_tools.log_string('Size 2 training set: %s\n' % str(R_dic['size2train']), log_fileout)
DNN_tools.log_string('Batch-size 2 training: %s\n' % str(R_dic['batch_size2train']), log_fileout)
DNN_tools.log_string('Batch-size 2 testing: %s\n' % str(R_dic['batch_size2test']), log_fileout)
def print_and_log2train(i_epoch, run_time, tmp_lr, temp_penalty_nt, penalty_wb2s, penalty_wb2i, penalty_wb2r,
loss_s, loss_i, loss_r, loss_n, log_out=None):
print('train epoch: %d, time: %.3f' % (i_epoch, run_time))
print('learning rate: %f' % tmp_lr)
print('penalty for difference of predict and true : %f' % temp_penalty_nt)
print('penalty weights and biases for S: %f' % penalty_wb2s)
print('penalty weights and biases for I: %f' % penalty_wb2i)
print('penalty weights and biases for R: %f' % penalty_wb2r)
print('loss for S: %.16f' % loss_s)
print('loss for I: %.16f' % loss_i)
print('loss for R: %.16f' % loss_r)
print('total loss: %.16f\n' % loss_n)
DNN_tools.log_string('train epoch: %d,time: %.3f' % (i_epoch, run_time), log_out)
DNN_tools.log_string('learning rate: %f' % tmp_lr, log_out)
DNN_tools.log_string('penalty for difference of predict and true : %f' % temp_penalty_nt, log_out)
DNN_tools.log_string('penalty weights and biases for S: %f' % penalty_wb2s, log_out)
DNN_tools.log_string('penalty weights and biases for I: %f' % penalty_wb2i, log_out)
DNN_tools.log_string('penalty weights and biases for R: %.10f' % penalty_wb2r, log_out)
DNN_tools.log_string('loss for S: %.16f' % loss_s, log_out)
DNN_tools.log_string('loss for I: %.16f' % loss_i, log_out)
DNN_tools.log_string('loss for R: %.16f' % loss_r, log_out)
DNN_tools.log_string('total loss: %.16f \n\n' % loss_n, log_out)
def solve_SIR2COVID(R):
log_out_path = R['FolderName'] # 将路径从字典 R 中提取出来
if not os.path.exists(log_out_path): # 判断路径是否已经存在
os.mkdir(log_out_path) # 无 log_out_path 路径,创建一个 log_out_path 路径
log_fileout = open(os.path.join(log_out_path, 'log_train.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件
dictionary_out2file(R, log_fileout)
log2trianSolus = open(os.path.join(log_out_path, 'train_Solus.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件
log2testSolus = open(os.path.join(log_out_path, 'test_Solus.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件
log2testSolus2 = open(os.path.join(log_out_path, 'test_Solus_temp.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件
log2testParas = open(os.path.join(log_out_path, 'test_Paras.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件
trainSet_szie = R['size2train']
train_size2batch = R['batch_size2train']
test_size2batch = R['batch_size2test']
pt_penalty_init = R['init_penalty2predict_true'] # Regularization parameter for difference of predict and true
wb_penalty = R['regular_weight'] # Regularization parameter for weights
lr_decay = R['lr_decay']
learning_rate = R['learning_rate']
act_func = R['act_name']
input_dim = R['input_dim']
out_dim = R['output_dim']
flag2S = 'WB2S'
flag2I = 'WB2I'
flag2R = 'WB2R'
flag2beta = 'WB2beta'
flag2gamma = 'WB2gamma'
hidden_layers = R['hidden_layers']
# 使用 initialize_NN_xavier这个函数初始化,结果很不好。原因应该是 Bias是uniform初始化导致的
# Weight2S, Bias2S = DNN_base.initialize_NN_xavier(input_dim, out_dim, hidden_layers, flag2S)
# Weight2I, Bias2I = DNN_base.initialize_NN_xavier(input_dim, out_dim, hidden_layers, flag2I)
# Weight2R, Bias2R = DNN_base.initialize_NN_xavier(input_dim, out_dim, hidden_layers, flag2R)
# Weight2beta, Bias2beta = DNN_base.initialize_NN_xavier(input_dim, out_dim, hidden_layers, flag2beta)
# Weight2gamma, Bias2gamma = DNN_base.initialize_NN_xavier(input_dim, out_dim, hidden_layers, flag2gamma)
if R['model'] == 'DNN_Cos_C_Sin_Base':
Weight2S, Bias2S = DNN_base.initialize_NN_random_normal2_CS(input_dim, out_dim, hidden_layers, flag2S)
Weight2I, Bias2I = DNN_base.initialize_NN_random_normal2_CS(input_dim, out_dim, hidden_layers, flag2I)
Weight2R, Bias2R = DNN_base.initialize_NN_random_normal2_CS(input_dim, out_dim, hidden_layers, flag2R)
Weight2beta, Bias2beta = DNN_base.initialize_NN_random_normal2_CS(input_dim, out_dim, hidden_layers, flag2beta)
Weight2gamma, Bias2gamma = DNN_base.initialize_NN_random_normal2_CS(input_dim, out_dim, hidden_layers, flag2gamma)
else:
Weight2S, Bias2S = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2S)
Weight2I, Bias2I = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2I)
Weight2R, Bias2R = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2R)
Weight2beta, Bias2beta = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2beta)
Weight2gamma, Bias2gamma = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2gamma)
global_steps = tf.Variable(0, trainable=False)
with tf.device('/gpu:%s' % (R['gpuNo'])):
with tf.variable_scope('vscope', reuse=tf.AUTO_REUSE):
T_it = tf.placeholder(tf.float32, name='T_it', shape=[None, input_dim])
I_observe = tf.placeholder(tf.float32, name='I_observe', shape=[None, input_dim])
N_observe = tf.placeholder(tf.float32, name='N_observe', shape=[None, input_dim])
predict_true_penalty = tf.placeholder_with_default(input=1e3, shape=[], name='bd_p')
in_learning_rate = tf.placeholder_with_default(input=1e-5, shape=[], name='lr')
train_opt = tf.placeholder_with_default(input=True, shape=[], name='train_opt')
if 'PDE_DNN' == str.upper(R['model']):
S_NN_temp = DNN_base.PDE_DNN(T_it, Weight2S, Bias2S, hidden_layers, activate_name=act_func)
I_NN_temp = DNN_base.PDE_DNN(T_it, Weight2I, Bias2I, hidden_layers, activate_name=act_func)
R_NN_temp = DNN_base.PDE_DNN(T_it, Weight2R, Bias2R, hidden_layers, activate_name=act_func)
in_beta = DNN_base.PDE_DNN(T_it, Weight2beta, Bias2beta, hidden_layers, activate_name=act_func)
in_gamma = DNN_base.PDE_DNN(T_it, Weight2gamma, Bias2gamma, hidden_layers, activate_name=act_func)
elif 'PDE_DNN_Fourier' == R['model']:
S_NN_temp = DNN_base.DNN_Fourier_Base(T_it, Weight2S, Bias2S, hidden_layers, activate_name=act_func)
I_NN_temp = DNN_base.DNN_Fourier_Base(T_it, Weight2I, Bias2I, hidden_layers, activate_name=act_func)
R_NN_temp = DNN_base.DNN_Fourier_Base(T_it, Weight2R, Bias2R, hidden_layers, activate_name=act_func)
in_beta = DNN_base.DNN_Fourier_Base(T_it, Weight2beta, Bias2beta, hidden_layers, activate_name=act_func)
in_gamma = DNN_base.DNN_Fourier_Base(T_it, Weight2gamma, Bias2gamma, hidden_layers, activate_name=act_func)
elif 'DNN_Cos_C_Sin_Base' == R['model']:
freq = [1]
S_NN_temp = DNN_base.DNN_Cos_C_Sin_Base(T_it, Weight2S, Bias2S, hidden_layers, freq_frag=freq, activate_name=act_func)
I_NN_temp = DNN_base.DNN_Cos_C_Sin_Base(T_it, Weight2I, Bias2I, hidden_layers, freq_frag=freq, activate_name=act_func)
R_NN_temp = DNN_base.DNN_Cos_C_Sin_Base(T_it, Weight2R, Bias2R, hidden_layers, freq_frag=freq, activate_name=act_func)
in_beta = DNN_base.DNN_Cos_C_Sin_Base(T_it, Weight2beta, Bias2beta, hidden_layers, freq_frag=freq, activate_name=act_func)
in_gamma = DNN_base.DNN_Cos_C_Sin_Base(T_it, Weight2gamma, Bias2gamma, hidden_layers, freq_frag=freq, activate_name=act_func)
elif 'PDE_DNN_BN' == str.upper(R['model']):
S_NN_temp = DNN_base.PDE_DNN_BN(T_it, Weight2S, Bias2S, hidden_layers, activate_name=act_func, is_training=train_opt)
I_NN_temp = DNN_base.PDE_DNN_BN(T_it, Weight2I, Bias2I, hidden_layers, activate_name=act_func, is_training=train_opt)
R_NN_temp = DNN_base.PDE_DNN_BN(T_it, Weight2R, Bias2R, hidden_layers, activate_name=act_func, is_training=train_opt)
in_beta = DNN_base.PDE_DNN_BN(T_it, Weight2beta, Bias2beta, hidden_layers, activate_name=act_func, is_training=train_opt)
in_gamma = DNN_base.PDE_DNN_BN(T_it, Weight2gamma, Bias2gamma, hidden_layers, activate_name=act_func, is_training=train_opt)
elif 'PDE_DNN_SCALEOUT' == str.upper(R['model']):
freq = np.concatenate(([1], np.arange(1, 20)), axis=0)
S_NN_temp = DNN_base.PDE_DNN_scaleOut(T_it, Weight2S, Bias2S, hidden_layers, freq, activate_name=act_func)
I_NN_temp = DNN_base.PDE_DNN_scaleOut(T_it, Weight2I, Bias2I, hidden_layers, freq, activate_name=act_func)
R_NN_temp = DNN_base.PDE_DNN_scaleOut(T_it, Weight2R, Bias2R, hidden_layers, freq, activate_name=act_func)
in_beta = DNN_base.PDE_DNN_scaleOut(T_it, Weight2beta, Bias2beta, hidden_layers, freq, activate_name=act_func)
in_gamma = DNN_base.PDE_DNN_scaleOut(T_it, Weight2gamma, Bias2gamma, hidden_layers, freq, activate_name=act_func)
# Remark: beta, gamma,S_NN.I_NN,R_NN都应该是正的. beta.1--15之间,gamma在(0,1)使用归一化的话S_NN.I_NN,R_NN都在[0,1)范围内
# beta = tf.nn.relu(in_beta)
# gamma = tf.nn.relu(in_gamma)
# beta = tf.exp(in_beta)
# gamma = tf.exp(in_gamma)
# beta = tf.abs(in_beta)
# gamma = tf.abs(in_gamma)
# beta = tf.square(in_beta)
# gamma = tf.square(in_gamma)
# beta = tf.sqrt(tf.square(in_beta))
# gamma = tf.sqrt(tf.square(in_gamma))
# beta = tf.nn.sigmoid(in_beta)
beta = tf.exp(in_beta)
gamma = tf.nn.sigmoid(in_gamma)
# gamma = DNN_base.gauss(in_gamma)
# S_NN = DNN_base.srelu(S_NN_temp)
# I_NN = DNN_base.srelu(I_NN_temp)
# R_NN = DNN_base.srelu(R_NN_temp)
# S_NN = DNN_base.asrelu(S_NN_temp)
# I_NN = DNN_base.asrelu(I_NN_temp)
# R_NN = DNN_base.asrelu(R_NN_temp)
# S_NN = tf.nn.relu(S_NN_temp)
# I_NN = tf.nn.relu(0.1*I_NN_temp)
# R_NN = tf.nn.relu(0.01*R_NN_temp)
# S_NN = tf.abs(S_NN_temp)
# I_NN = tf.abs(I_NN_temp)
# R_NN = tf.abs(R_NN_temp)
# S_NN = DNN_base.gauss(S_NN_temp)
# I_NN = tf.square(I_NN_temp)
# R_NN = tf.square(R_NN_temp)
# S_NN = DNN_base.gauss(S_NN_temp)
# I_NN = tf.square(I_NN_temp)
# R_NN = tf.nn.sigmoid(R_NN_temp)
# S_NN = DNN_base.gauss(S_NN_temp)
# I_NN = tf.nn.sigmoid(I_NN_temp)
# R_NN = tf.square(R_NN_temp)
# S_NN = tf.sqrt(tf.square(S_NN_temp))
# I_NN = tf.sqrt(tf.square(I_NN_temp))
# R_NN = tf.sqrt(tf.square(R_NN_temp))
S_NN = tf.nn.sigmoid(S_NN_temp)
I_NN = tf.nn.sigmoid(I_NN_temp)
R_NN = tf.nn.sigmoid(R_NN_temp)
# S_NN = DNN_base.gauss(S_NN_temp)
# I_NN = DNN_base.gauss(I_NN_temp)
# R_NN = DNN_base.gauss(R_NN_temp)
dS_NN2t = tf.gradients(S_NN, T_it)[0]
dI_NN2t = tf.gradients(I_NN, T_it)[0]
dR_NN2t = tf.gradients(R_NN, T_it)[0]
temp_snn2t = -beta*S_NN*I_NN
temp_inn2t = beta*S_NN*I_NN - gamma * I_NN
temp_rnn2t = gamma * I_NN
if str.lower(R['loss_function']) == 'l2_loss':
# LossS_Net_obs = tf.reduce_mean(tf.square(S_NN - S_observe))
LossI_Net_obs = tf.reduce_mean(tf.square(I_NN - I_observe))
# LossR_Net_obs = tf.reduce_mean(tf.square(R_NN - R_observe))
Loss2dS = tf.reduce_mean(tf.square(dS_NN2t - temp_snn2t))
Loss2dI = tf.reduce_mean(tf.square(dI_NN2t - temp_inn2t))
Loss2dR = tf.reduce_mean(tf.square(dR_NN2t - temp_rnn2t))
elif str.lower(R['loss_function']) == 'lncosh_loss':
# LossS_Net_obs = tf.reduce_mean(tf.ln(tf.cosh(S_NN - S_observe)))
LossI_Net_obs = tf.reduce_mean(tf.log(tf.cosh(I_NN - I_observe)))
# LossR_Net_obs = tf.reduce_mean(tf.log(tf.cosh(R_NN - R_observe)))
Loss2dS = tf.reduce_mean(tf.log(tf.cosh(dS_NN2t - temp_snn2t)))
Loss2dI = tf.reduce_mean(tf.log(tf.cosh(dI_NN2t - temp_inn2t)))
Loss2dR = tf.reduce_mean(tf.log(tf.cosh(dR_NN2t - temp_rnn2t)))
if R['regular_weight_model'] == 'L1':
regular_WB2S = DNN_base.regular_weights_biases_L1(Weight2S, Bias2S)
regular_WB2I = DNN_base.regular_weights_biases_L1(Weight2I, Bias2I)
regular_WB2R = DNN_base.regular_weights_biases_L1(Weight2R, Bias2R)
elif R['regular_weight_model'] == 'L2':
regular_WB2S = DNN_base.regular_weights_biases_L2(Weight2S, Bias2S)
regular_WB2I = DNN_base.regular_weights_biases_L2(Weight2I, Bias2I)
regular_WB2R = DNN_base.regular_weights_biases_L2(Weight2R, Bias2R)
else:
regular_WB2S = tf.constant(0.0)
regular_WB2I = tf.constant(0.0)
regular_WB2R = tf.constant(0.0)
PWB2S = wb_penalty*regular_WB2S
PWB2I = wb_penalty*regular_WB2I
PWB2R = wb_penalty*regular_WB2R
Loss2S = Loss2dS + PWB2S
Loss2I = predict_true_penalty * LossI_Net_obs + Loss2dI + PWB2I
Loss2R = Loss2dR + PWB2R
my_optimizer = tf.train.AdamOptimizer(in_learning_rate)
train_Loss2S = my_optimizer.minimize(Loss2S, global_step=global_steps)
train_Loss2I = my_optimizer.minimize(Loss2I, global_step=global_steps)
train_Loss2R = my_optimizer.minimize(Loss2R, global_step=global_steps)
train_Loss = tf.group(train_Loss2S, train_Loss2I, train_Loss2R)
t0 = time.time()
loss_s_all, loss_i_all, loss_r_all, loss_n_all = [], [], [], []
test_epoch = []
test_mse2I_all, test_rel2I_all = [], []
# filename = 'data2csv/Italia_data.csv'
filename = 'data2csv/Korea_data.csv'
date, data = DNN_data.load_csvData(filename)
assert(trainSet_szie + test_size2batch <= len(data))
train_date, train_data, test_date, test_data = \
DNN_data.split_csvData2train_test(date, data, size2train=trainSet_szie, normalFactor=R['scale_population'])
if R['scale_population'] != 1:
Have_normal = True
NormalFactor = 1.0
else:
Have_normal = False
NormalFactor = | |
<reponame>hkmoon/ms_deisotope<filename>ms_deisotope/data_source/thermo_raw_net.py
'''Thermo RAW file reading implementation using the pure .NET
RawFileReader library released in 2017.
This module provides :class:`ThermoRawLoader`, a :class:`~.RandomAccessScanSource`
implementation.
Depends upon the ``pythonnet`` project which provides the :mod:`clr`
module, enabling nearly seamless interoperation with the Common Language
Runtime.
The public interface of this module should be identical to
:mod:`ms_deisotope.data_source.thermo_raw`.
.. note::
This interface was largely based upon the APIs that ProteoWizard used, both
in order to understand how the Thermo libraries really worked, and to maintain
parity with it.
'''
import sys
import os
from collections import OrderedDict
import numpy as np
from pyteomics.auxiliary import unitfloat
from six import string_types as basestring
from ms_peak_picker import PeakSet, PeakIndex, simple_peak
from ms_deisotope.data_source.common import (
PrecursorInformation, ChargeNotProvided, Scan,
ActivationInformation, MultipleActivationInformation,
IsolationWindow, ScanDataSource, ScanEventInformation,
ScanAcquisitionInformation, ScanWindow, RandomAccessScanSource)
from ms_deisotope.data_source._thermo_helper import (
_InstrumentMethod, ThermoRawScanPtr, FilterString,
_make_id, _id_template, _RawFileMetadataLoader, analyzer_map)
from ms_deisotope.data_source.metadata.activation import (
supplemental_term_map, dissociation_methods_map)
from ms_deisotope.data_source.metadata.sample import Sample
from ms_deisotope.data_source.metadata.scan_traits import FAIMS_compensation_voltage
def _try_number(string):
try:
x = float(string)
return x
except (TypeError, ValueError):
return string
_DEFAULT_DLL_PATH = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
"_vendor",
"ThermoRawFileReader_3_0_41",
"Libraries")
# late binding imports
Business = None
_RawFileReader = None
clr = None
NullReferenceException = Exception
Marshal = None
IntPtr = None
Int64 = None
def is_thermo_raw_file(path):
'''Detect whether or not the file referenced by ``path``
is a Thermo RAW file.
Parameters
----------
path: :class:`str`
The path to test
Returns
-------
:class:`bool`:
Whether or not the file is a Thermo RAW file.
'''
if not _test_dll_loaded():
try:
register_dll()
except ImportError:
return False
try:
source = _RawFileReader.RawFileReaderAdapter.FileFactory(path)
source.SelectInstrument(Business.Device.MS, 1)
return True
except NullReferenceException: # pylint: disable=broad-except
return False
def infer_reader(path):
'''If the file referenced by ``path`` is a Thermo RAW
file, return the callable (:class:`ThermoRawLoader`) to
open it, otherwise raise an exception.
Parameters
----------
path: :class:`str`
The path to test
Returns
-------
:class:`type`:
The type to use to open the file
Raises
------
:class:`ValueError`:
If the file is not a Thermo RAW file
'''
if is_thermo_raw_file(path):
return ThermoRawLoader
raise ValueError("Not Thermo Raw File")
def determine_if_available():
'''Checks whether or not the .NET-based Thermo
RAW file reading feature is available.
Returns
-------
:class:`bool`:
Whether or not the feature is enabled.
'''
try:
return _register_dll([_DEFAULT_DLL_PATH])
except (OSError, ImportError):
return False
def _register_dll(search_paths=None):
'''Start the Common Language Runtime interop service by importing
the :mod:`clr` module from Pythonnet, and then populate the global
names referring to .NET entities, and finally attempt to locate the
ThermoRawFileReader DLLs by searching alogn ``search_paths``.
Parameters
----------
search_paths: list
The paths to check along for the ThermoRawFileReader DLL bundle.
Returns
-------
:class:`bool`:
Whether or not the .NET library successfully loaded
'''
from ms_deisotope.config import get_config
if search_paths is None:
search_paths = []
search_paths = list(search_paths)
search_paths.append(_DEFAULT_DLL_PATH)
# Take user-specified search paths first.
search_paths = get_config().get('vendor_readers', {}).get('thermo-net', []) + search_paths
global _RawFileReader, Business, clr, NullReferenceException # pylint: disable=global-statement
global Marshal, IntPtr, Int64 # pylint: disable=global-statement
if _test_dll_loaded():
return True
try:
import clr # pylint: disable=redefined-outer-name
from System import NullReferenceException # pylint: disable=redefined-outer-name
clr.AddReference("System.Runtime")
clr.AddReference("System.Runtime.InteropServices")
from System import IntPtr, Int64 # pylint: disable=redefined-outer-name
from System.Runtime.InteropServices import Marshal # pylint: disable=redefined-outer-name
except ImportError:
return False
for path in search_paths:
sys.path.append(path)
try:
clr.AddReference('ThermoFisher.CommonCore.RawFileReader')
clr.AddReference('ThermoFisher.CommonCore.Data')
except OSError:
continue
try:
import ThermoFisher.CommonCore.Data.Business as Business # pylint: disable=redefined-outer-name
import ThermoFisher.CommonCore.RawFileReader as _RawFileReader # pylint: disable=redefined-outer-name
except ImportError:
continue
return _test_dll_loaded()
def register_dll(search_paths=None):
'''Register the location of the Thermo RawFileReader DLL bundle with
the Common Language Runtime interop system and load the .NET symbols
used by this feature.
Parameters
----------
search_paths: list
The paths to check along for the ThermoRawFileReader DLL bundle.
'''
if search_paths is None:
search_paths = []
loaded = _register_dll(search_paths)
if not loaded:
msg = '''The ThermoFisher.CommonCore libraries could not be located and loaded.'''
raise ImportError(msg)
def _test_dll_loaded():
return _RawFileReader is not None
def _copy_double_array(src):
'''A quick and dirty implementation of the fourth technique shown in
https://mail.python.org/pipermail/pythondotnet/2014-May/001525.html for
copying a .NET Array[Double] to a NumPy ndarray[np.float64] via a raw
memory copy.
``int_ptr_tp`` must be an integer type that can hold a pointer. On Python 2
this is :class:`long`, and on Python 3 it is :class:`int`.
'''
# When the input .NET array pointer is None, return an empty array. On Py2
# this would happen automatically, but not on Py3, and perhaps not safely on
# all Py2 because it relies on pythonnet and the .NET runtime properly checking
# for nulls.
if src is None:
return np.array([], dtype=np.float64)
dest = np.empty(len(src), dtype=np.float64)
Marshal.Copy(
src, 0,
IntPtr.__overloads__[Int64](dest.__array_interface__['data'][0]),
len(src))
return dest
class RawReaderInterface(ScanDataSource):
''':class:`~.ScanDataSource` implementation for Thermo's RawFileReader API.
Not intended for direct instantiation.
'''
def _scan_arrays(self, scan):
scan_number = scan.scan_number + 1
stats = self._source.GetScanStatsForScanNumber(scan_number)
segscan = self._source.GetSegmentedScanFromScanNumber(scan_number, stats)
mzs = _copy_double_array(segscan.Positions)
inten = _copy_double_array(segscan.Intensities)
return mzs, inten
def _pick_peaks_vendor(self, scan, *args, **kwargs):
scan_info = Business.Scan.FromFile(self._source, scan.scan_number + 1)
if scan_info.HasCentroidStream:
stream = self._source.GetCentroidStream(scan.scan_number + 1, 0)
mzs = stream.Masses
intens = stream.Intensities
peaks = PeakSet([simple_peak(mzs[i], intens[i], 0.001) for i in range(len(mzs))])
peaks.reindex()
arrays = self._scan_arrays(scan)
return PeakIndex(arrays[0], arrays[1], peaks)
else:
raise NotImplementedError()
def _scan_id(self, scan):
scan_number = scan.scan_number
return _make_id(scan_number + 1)
def _is_profile(self, scan):
return not self._source.IsCentroidScanFromScanNumber(
scan.scan_number + 1)
def _polarity(self, scan):
filter_string = self._filter_string(scan)
return filter_string.data['polarity']
def _scan_title(self, scan):
return "%s %r" % (self._scan_id(scan), self._filter_string(scan))
def _filter_string(self, scan):
if scan.filter_string is None:
scan_number = scan.scan_number
scan.filter_string = FilterString(self._source.GetFilterForScanNumber(scan_number + 1).Filter)
return scan.filter_string
def _scan_index(self, scan):
scan_number = scan.scan_number
return scan_number
def _scan_time(self, scan):
scan_number = scan.scan_number
return self._source.RetentionTimeFromScanNumber(scan_number + 1)
def _ms_level(self, scan):
scan_number = scan.scan_number
f = self._source.GetFilterForScanNumber(scan_number + 1)
return f.MSOrder
def _isolation_window(self, scan):
scan_number = scan.scan_number
ms_level = self._ms_level(scan)
width = 0
trailer = self._trailer_values(scan)
filt = self._source.GetFilterForScanNumber(scan_number + 1)
seq_index = filt.MSOrder - 2
try:
# Fetch the isolation window width from the old location first, which
# will be correct on old files, where the new API won't be right.
width = trailer['MS%d Isolation Width' % ms_level]
except KeyError:
# Fall back to the new API, which is akin to our only hope here?
width = filt.GetIsolationWidth(seq_index)
width /= 2.0
offset = filt.GetIsolationWidthOffset(seq_index)
precursor_mz = filt.GetMass(seq_index)
return IsolationWindow(width, precursor_mz + offset, width)
def _trailer_values(self, scan):
if scan.trailer_values is not None:
return scan.trailer_values
scan_number = scan.scan_number
trailers = self._source.GetTrailerExtraInformation(scan_number + 1)
scan.trailer_values = OrderedDict(
zip([label.strip(":") for label in trailers.Labels], map(_try_number, trailers.Values)))
return scan.trailer_values
def _infer_precursor_scan_number(self, scan):
precursor_scan_number = None
last_index = self._scan_index(scan) - 1
current_level = self._ms_level(scan)
i = 0
while last_index >= 0 and i < 100:
prev_scan = self.get_scan_by_index(last_index)
if prev_scan.ms_level >= current_level:
last_index -= 1
else:
precursor_scan_number = prev_scan._data.scan_number
break
i += 1
return precursor_scan_number
def _precursor_information(self, scan):
scan_number = scan.scan_number
filt = self._source.GetFilterForScanNumber(scan_number + 1)
precursor_mz = filt.GetMass(filt.MSOrder - 2)
trailers = self._trailer_values(scan)
_precursor_mz = float(trailers.get("Monoisotopic M/Z", 0))
if _precursor_mz > 0:
precursor_mz = _precursor_mz
# imitate proteowizard's firmware bug correction
isolation_window = self._isolation_window(scan)
if (isolation_window.upper + isolation_window.lower) / 2 <= 2.0:
if (isolation_window.target - 3.0 > precursor_mz) or (isolation_window.target + 2.5 < precursor_mz):
precursor_mz = isolation_window.target
elif precursor_mz not in isolation_window:
precursor_mz = isolation_window.target
charge = int(trailers.get("Charge State", 0))
if charge == 0:
charge = ChargeNotProvided
inten = 0
precursor_scan_number = None
precursor_scan_number = trailers.get('Master Scan Number')
if precursor_scan_number == 0:
precursor_scan_number = None
if precursor_scan_number is not None:
precursor_scan_number = int(precursor_scan_number) - 1
elif precursor_scan_number is None:
last_index = self._scan_index(scan) - 1
current_level = self._ms_level(scan)
i = 0
while last_index >= 0 and i < 100:
prev_scan = self.get_scan_by_index(last_index)
if prev_scan.ms_level >= current_level:
last_index -= 1
else:
precursor_scan_number = prev_scan._data.scan_number
break
i += 1
if precursor_scan_number is not None:
precursor_scan_id = self.get_scan_by_index(precursor_scan_number).id
else:
import warnings
warnings.warn("Could not resolve precursor scan for %s" % (self._scan_id(scan), ))
precursor_scan_id = None
return PrecursorInformation(
precursor_mz, inten, charge, precursor_scan_id,
source=self, product_scan_id=self._scan_id(scan))
def _get_scan_segment(self, scan):
trailer = self._trailer_values(scan)
try:
return int(trailer['Scan Segment'])
except KeyError:
return 1
def _get_scan_event(self, scan):
trailer = self._trailer_values(scan)
try:
return int(trailer['Scan Event'])
except KeyError:
return 1
def _activation(self, scan):
filter_string = self._filter_string(scan)
tandem_sequence = filter_string.get("tandem_sequence")
# If the tandem sequence exists, the last entry is the most recent tandem acquisition.
# It will list contain one or more activation types. Alternatively, multiple activations
# of the same precursor may exist in the list | |
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 <NAME>. All rights reserved.
# Copyright (c) 2014 <NAME>. All rights reserved.
# Copyright (c) 2014 <NAME>. All rights reserved.
# Copyright (c) 2014 <NAME>. All rights reserved.
# Copyright (c) 2014 <NAME>. All rights reserved.
# Copyright (c) 2015 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import os
from oslo_log import log as logging
from oslo_log import versionutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
@interface.volumedriver
class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
"""NetApp NFS driver for Data ONTAP (7-mode)."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
def __init__(self, *args, **kwargs):
super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs)
self.driver_name = 'NetApp_NFS_7mode_direct'
self.driver_mode = '7mode'
self.configuration.append_config_values(na_opts.netapp_7mode_opts)
def do_setup(self, context):
"""Do the customized set up on client if any for 7 mode."""
super(NetApp7modeNfsDriver, self).do_setup(context)
self.zapi_client = client_7mode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vfiler=self.configuration.netapp_vfiler)
self.perf_library = perf_7mode.Performance7modeLibrary(
self.zapi_client)
# This driver has been marked 'deprecated' in the Ocata release and
# can be removed in Queens.
msg = _("The 7-mode Data ONTAP driver is deprecated and will be "
"removed in a future release.")
versionutils.report_deprecated_feature(LOG, msg)
def check_for_setup_error(self):
"""Checks if setup occurred properly."""
api_version = self.zapi_client.get_ontapi_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported Data ONTAP version."
" Data ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Data ONTAP API version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
self._add_looping_tasks()
super(NetApp7modeNfsDriver, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
super(NetApp7modeNfsDriver, self)._add_looping_tasks()
def _handle_ems_logging(self):
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version, self.driver_mode)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(
self.driver_name, self.app_version, None,
self._get_backing_flexvol_names(), [])
self.zapi_client.send_ems_log_message(pool_ems_message)
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume.
:param: is_snapshot Not used, present for method signature consistency
"""
(_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
storage_path = self.zapi_client.get_actual_path_for_export(export_path)
target_path = '%s/%s' % (storage_path, clone_name)
self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name),
target_path, source_snapshot)
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.driver_name
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats(
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function())
data['sparse_copy_volume'] = True
self._spawn_clean_cache_job()
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
pools = []
self.perf_library.update_performance_cache()
for nfs_share in self._mounted_shares:
capacity = self._get_share_capacity_info(nfs_share)
pool = dict()
pool['pool_name'] = nfs_share
pool['QoS_support'] = False
pool['multiattach'] = False
pool.update(capacity)
thick = not self.configuration.nfs_sparsed_volumes
pool['thick_provisioning_support'] = thick
pool['thin_provisioning_support'] = not thick
utilization = self.perf_library.get_node_utilization()
pool['utilization'] = na_utils.round_down(utilization, '0.01')
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pool['consistencygroup_support'] = True
pools.append(pool)
return pools
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(_, export_path) = self._get_export_ip_path(share=share)
exported_volume = self.zapi_client.get_actual_path_for_export(
export_path)
for old_file in old_files:
path = os.path.join(exported_volume, old_file)
u_bytes = self.zapi_client.get_file_usage(path)
file_list.append((old_file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _is_filer_ip(self, ip):
"""Checks whether ip is on the same filer."""
try:
ifconfig = self.zapi_client.get_ifconfig()
if_info = ifconfig.get_child_by_name('interface-config-info')
if if_info:
ifs = if_info.get_children()
for intf in ifs:
v4_addr = intf.get_child_by_name('v4-primary-address')
if v4_addr:
ip_info = v4_addr.get_child_by_name('ip-address-info')
if ip_info:
address = ip_info.get_child_content('address')
if ip == address:
return True
else:
continue
except Exception:
return False
return False
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
if self._is_filer_ip(ip) and shares:
for share in shares:
ip_sh = share.split(':')[0]
if self._is_filer_ip(ip_sh):
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _is_share_clone_compatible(self, volume, share):
"""Checks if share is compatible with volume to host its clone."""
thin = self.configuration.nfs_sparsed_volumes
return self._share_has_space_for_clone(share, volume['size'], thin)
def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Matches a volume type for share file."""
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if qos_policy_group:
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Setting file qos policy group is not supported"
" on this storage family and ontap version.")))
volume_type = na_utils.get_volume_type_from_volume(volume)
if volume_type and 'qos_spec_id' in volume_type:
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("QoS specs are not supported"
" on this storage family and ONTAP version."))
def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
"""Set QoS policy on backend from volume type information."""
# 7-mode DOT does not support QoS.
return
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
flexvol_names = []
for nfs_share in self._mounted_shares:
flexvol_name = nfs_share.rsplit('/', 1)[1]
flexvol_names.append(flexvol_name)
LOG.debug("Found flexvol %s", flexvol_name)
return flexvol_names
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
for host in hosts:
pool_name = volume_utils.extract_host(host, level='pool')
flexvol_name = pool_name.rsplit('/', 1)[1]
flexvols.add(flexvol_name)
return flexvols
@utils.trace_method
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete files backing each snapshot in the cgsnapshot.
:return: An implicit update of snapshot models that the manager will
interpret and subsequently set the model state to deleted.
"""
for snapshot in snapshots:
self._delete_file(snapshot['volume_id'], snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None
@utils.trace_method
def create_consistencygroup(self, context, group):
"""Driver entry point for creating a consistency group.
ONTAP does not maintain an actual CG construct. As a result, no
communtication to the backend is necessary for consistency group
creation.
:returns: Hard-coded model update for consistency group model.
"""
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return model_update
@utils.trace_method
def delete_consistencygroup(self, context, group, volumes):
"""Driver entry point for deleting a consistency group.
:returns: Updated consistency group model and list of volume models
for the volumes that were deleted.
"""
model_update = {'status': fields.ConsistencyGroupStatus.DELETED}
volumes_model_update = []
for volume in volumes:
try:
self._delete_file(volume['id'], volume['name'])
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except Exception:
volumes_model_update.append(
{'id': volume['id'],
'status': 'error_deleting'})
LOG.exception("Volume %(vol)s in the consistency group "
"could not be deleted.", {'vol': volume})
return model_update, volumes_model_update
@utils.trace_method
def update_consistencygroup(self, context, group, add_volumes=None,
remove_volumes=None):
"""Driver entry point for updating a consistency group.
Since no actual CG construct is ever created in ONTAP, it is not
necessary to update any metadata on the backend. Since this is a NO-OP,
there is guaranteed to be no change in any of the volumes' statuses.
"""
return None, None, None
@utils.trace_method
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a Cinder cgsnapshot object.
The Cinder cgsnapshot object is created by making use of an ONTAP CG
snapshot in order to provide write-order consistency for a set of
backing flexvols. First, a list of the flexvols backing the given
Cinder volumes in the CG is determined. An ONTAP CG snapshot of the
flexvols creates a write-order consistent snapshot of each backing
flexvol. For each Cinder volume in the CG, it is then necessary to
clone its volume from the ONTAP CG snapshot. The naming convention
used to create the clones indicates the clone's role as a Cinder
snapshot and its inclusion in a Cinder CG snapshot. The ONTAP CG
snapshots, of each backing flexvol, are deleted after the cloning
operation is completed.
:returns: An implicit update for the cgsnapshot and snapshot models
that is then used by the manager to set the models to
available.
"""
hosts = [snapshot['volume']['host'] for snapshot in snapshots]
flexvols = self._get_flexvol_names_from_hosts(hosts)
# Create | |
# -*- coding: utf-8 -*-
from openerp import models,fields,api
from openerp.tools.translate import _
from openerp.exceptions import Warning
import datetime
import time
import psycopg2
import sys
#TODO :
# - Liste des clients livrables avant la création de la liste à servir
# - Gestion du stock A et Q => Attente tests à ce sujet
# - Gestion des certificats => Attente réponse CC
@api.multi
def _acceder_commande(self,id):
dummy, view_id = self.env['ir.model.data'].get_object_reference('sale', 'view_order_form')
return {
'name': "Commande",
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'sale.order',
'type': 'ir.actions.act_window',
'res_id': id,
'domain': '[]',
}
#livrable=[('livrable', u'Livrable'),('toutes', u'Toutes')]
class is_liste_servir_client(models.Model):
_name='is.liste.servir.client'
_order='name'
name = fields.Many2one('res.partner', 'Client')
liste_servir_id = fields.Many2one('is.liste.servir', 'Liste à servir')
zip = fields.Char('Code postal')
city = fields.Char('Ville')
delai_transport = fields.Integer('Délai de transport')
date_debut = fields.Date("Date de début d'expédition")
date_fin = fields.Date("Date de fin d'expédition")
livrable = fields.Boolean("Livrable")
@api.multi
def action_creer_liste_servir(self):
for obj in self:
liste_servir_obj = self.env['is.liste.servir']
if obj.name.is_source_location_id:
is_source_location_id=obj.name.is_source_location_id.id
else:
is_source_location=liste_servir_obj._get_default_location()
is_source_location_id=is_source_location.id
vals={
'partner_id' : obj.name.id,
'transporteur_id' : obj.name.is_transporteur_id.id,
'is_source_location_id': is_source_location_id,
'date_debut' : obj.date_debut,
'date_fin' : obj.date_fin,
'livrable' : obj.livrable,
}
liste_servir=liste_servir_obj.create(vals)
liste_servir.action_importer_commandes()
obj.liste_servir_id=liste_servir.id
return {
'name': "Liste à servir",
'view_mode': 'form',
'view_type': 'form',
'res_model': 'is.liste.servir',
'type': 'ir.actions.act_window',
'res_id': liste_servir.id,
}
@api.multi
def action_voir_liste_servir(self):
for obj in self:
return {
'name': "Liste à servir",
'view_mode': 'form',
'view_type': 'form',
'res_model': 'is.liste.servir',
'type': 'ir.actions.act_window',
'res_id': obj.liste_servir_id.id,
}
class is_liste_servir(models.Model):
_name='is.liste.servir'
_order='name desc'
@api.model
def _get_default_location(self):
company_id = self.env.user.company_id.id
warehouse_obj = self.env['stock.warehouse']
warehouse_id = warehouse_obj.search([('company_id','=',company_id)])
location = warehouse_id.out_type_id and warehouse_id.out_type_id.default_location_src_id
return location and location or False
name = fields.Char("N°", readonly=True)
partner_id = fields.Many2one('res.partner', 'Client', required=True)
date_debut = fields.Date("Date de début d'expédition")
date_fin = fields.Date("Date de fin d'expédition", required=True)
livrable = fields.Boolean("Livrable")
transporteur_id = fields.Many2one('res.partner', 'Transporteur')
message = fields.Text("Message")
commentaire = fields.Text("Commentaire")
state = fields.Selection([('creation', u'Création'),('analyse', u'Analyse'),('traite', u'Traité')], u"État", readonly=True, select=True)
order_ids = fields.One2many('sale.order', 'is_liste_servir_id', 'Commandes générées', readonly=False)
line_ids = fields.One2many('is.liste.servir.line', 'liste_servir_id', u"Lignes")
uc_ids = fields.One2many('is.liste.servir.uc', 'liste_servir_id', u"UCs")
um_ids = fields.One2many('is.liste.servir.um', 'liste_servir_id', u"UMs")
is_source_location_id = fields.Many2one('stock.location', 'Emplacement Source', default=_get_default_location)
poids_brut = fields.Float('Poids brut', compute='_compute', readonly=True, store=False)
poids_net = fields.Float('Poids net' , compute='_compute', readonly=True, store=False)
info_client = fields.Text("Information client complèmentaire")
galia_um_ids = fields.One2many('is.galia.base.um', 'liste_servir_id', u"UMs scannées", readonly=True)
@api.multi
def tableaux(self):
t=[True,False]
return t
def _date_fin():
now = datetime.date.today() # Date du jour
date_fin = now + datetime.timedelta(days=1) # J+1
return date_fin.strftime('%Y-%m-%d') # Formatage
_defaults = {
'state': 'creation',
'date_fin': _date_fin(),
'livrable': False,
}
@api.depends('line_ids')
def _compute(self):
for obj in self:
poids_brut = 0
poids_net = 0
for line in obj.line_ids:
poids_brut = poids_brut + line.quantite * line.product_id.weight
poids_net = poids_net + line.quantite * line.product_id.weight_net
obj.poids_brut = poids_brut
obj.poids_net = poids_net
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
res = {}
vals = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.is_source_location_id:
vals.update({'is_source_location_id': partner.is_source_location_id })
if partner.is_transporteur_id:
vals.update({'transporteur_id': partner.is_transporteur_id })
res['value']=vals
return res
def _message(self,partner_id,vals):
if partner_id:
message=""
r=self.env['is.liste.servir.message'].search([['name', '=', partner_id]])
for l in r:
if l.message:
message=message+l.message+'\n'
partner=self.env['res.partner'].browse(partner_id)
if partner.is_certificat_matiere:
message=message+'JOINDRE CERTIFICAT CONFORMITE \n'
vals["message"]=message
return vals
#def create(self, cr, uid, vals, context=None):
@api.model
def create(self, vals):
if "partner_id" in vals:
vals=self._message(vals["partner_id"], vals)
data_obj = self.pool.get('ir.model.data')
sequence_ids = data_obj.search(self._cr, self._uid, [('name','=','is_liste_servir_seq')])
if sequence_ids:
sequence_id = data_obj.browse(self._cr, self._uid, sequence_ids[0]).res_id
vals['name'] = self.pool.get('ir.sequence').get_id(self._cr, self._uid, sequence_id, 'id')
new_id = super(is_liste_servir, self).create(vals)
return new_id
@api.multi
def write(self,vals):
cr = self._cr
if "partner_id" in vals:
vals=self._message(vals["partner_id"], vals)
res=super(is_liste_servir, self).write(vals)
for obj in self:
if 'line_ids' in vals or not obj.uc_ids:
#La procédure sotckée permet de gérer le regoupement des UC
SQL="""
CREATE OR REPLACE FUNCTION fmixer(mixer boolean, id integer) RETURNS integer AS $$
BEGIN
IF mixer = True THEN
RETURN 0;
ELSE
RETURN id;
END IF;
END;
$$ LANGUAGE plpgsql;
select uc_id,um_id,fmixer(mixer,id), sum(nb_uc),sum(nb_um)
from is_liste_servir_line
where liste_servir_id="""+str(obj.id)+"""
group by uc_id,um_id,fmixer(mixer,id);
"""
#** Création du tableau des UC *************************************
for row in obj.uc_ids:
row.unlink()
cr.execute(SQL)
result = cr.fetchall()
for r in result:
vals={
'liste_servir_id': obj.id,
'uc_id': r[0],
'um_id': r[1],
'nb_uc': r[3],
'nb_um': r[4],
}
self.env['is.liste.servir.uc'].create(vals)
#*******************************************************************
if 'line_ids' in vals or 'uc_ids' in vals or not obj.um_ids:
#** Création du tableau des UM *************************************
for row in obj.um_ids:
row.unlink()
r={}
for row in obj.uc_ids:
if row.mixer:
k=1000+row.um_id.id
else:
k=2000+row.id
um_id=row.um_id.id
if k in r:
nb_um=r[k]['nb_um']+row.nb_um
else:
nb_um=row.nb_um
r[k]={'um_id': um_id, 'nb_um': nb_um}
for k in r:
vals={
'liste_servir_id': obj.id,
'um_id': r[k]['um_id'],
'nb_um': r[k]['nb_um'],
}
self.env['is.liste.servir.um'].create(vals)
#*******************************************************************
return res
@api.multi
def _get_sql(self,obj):
SQL="""
select sol.order_id,
sol.product_id,
sol.is_client_order_ref,
sol.is_date_livraison,
sol.is_date_expedition,
sol.product_uom_qty,
sol.price_unit,
sol.is_justification
from sale_order so inner join sale_order_line sol on so.id=sol.order_id
where so.partner_id="""+str(obj.partner_id.id)+"""
and so.state='draft'
and sol.is_date_expedition<='"""+str(obj.date_fin)+"""'
and sol.product_id>0
and sol.is_type_commande='ferme'
and so.is_type_commande!='ls'
"""
#if obj.order_id:
# SQL=SQL+" and so.id!="+str(obj.order_id.id)+" "
if obj.date_debut:
SQL=SQL+" and sol.is_date_expedition>='"+str(obj.date_debut)+"' "
#SQL=SQL+"group by sol.order_id,sol.product_id, sol.is_client_order_ref "
if obj.partner_id.is_caracteristique_bl=='cde_client':
OrderBy="sol.is_client_order_ref"
else:
OrderBy="sol.product_id"
SQL=SQL+"order by "+OrderBy
return SQL
@api.multi
def action_importer_commandes(self):
cr = self._cr
for obj in self:
#** Connexion à Dynacase *******************************************
if obj.partner_id.is_certificat_matiere:
uid=self._uid
user=self.env['res.users'].browse(uid)
password=<PASSWORD>
cnx=False
try:
cnx = psycopg2.connect("host='dynacase' port=5432 dbname='freedom' user='dynacaseowner' password='"+password+"'")
except:
raise Warning("Impossible de se connecter à Dynacase")
cursor = cnx.cursor()
#*******************************************************************
for row in obj.line_ids:
row.unlink()
SQL=self._get_sql(obj)
cr.execute(SQL)
result = cr.fetchall()
line_obj = self.env['is.liste.servir.line']
sequence=0
for row in result:
sequence=sequence+1
product_id=row[1]
#** Recherche du certificat matière ****************************
certificat_matiere=False
if obj.partner_id.is_certificat_matiere:
product=self.env['product.product'].browse(product_id)
SQL="""
select id
from doc69106
where doctype='F' and locked='0' and cmc_codepg='"""+product.is_code+"""' limit 1
"""
cursor.execute(SQL)
for row2 in cursor:
certificat_matiere=row2[0]
#***************************************************************
stock01 = self.env['product.product'].get_stock(product_id,'f', '01')
stocka = self.env['product.product'].get_stock(product_id,'f')
stockq = self.env['product.product'].get_stock(product_id,'t')
qt=row[5]
livrable=False
if qt<=stocka:
livrable=True
test=True
if obj.livrable==True and livrable==False:
test=False
if test:
vals={
'liste_servir_id' : obj.id,
'sequence' : sequence,
'order_id' : row[0],
'product_id' : row[1],
'client_order_ref' : row[2],
'date_livraison' : row[3],
'date_expedition' : row[4],
'prix' : row[6],
'justification' : row[7],
'quantite' : qt,
'livrable' : livrable,
'stock01' : stock01,
'stocka' : stocka,
'stockq' : stockq,
'certificat_matiere': certificat_matiere,
}
line_obj.create(vals)
obj.state="analyse"
@api.multi
def action_generer_bl(self):
cr = self._cr
for obj in self:
obj.order_ids.unlink()
SQL=self._get_sql(obj)
cr.execute(SQL)
result = cr.fetchall()
Test=True
for line in obj.line_ids:
key1=str(line.order_id.id)+"-"+str(line.product_id.id)
anomalie="Commande non trouvée"
for order in result:
key2=str(order[0])+"-"+str(order[1])
if key1==key2:
anomalie=""
if line.quantite>order[3]:
anomalie="Qt en commande = "+ str(order[3])
line.anomalie=anomalie
if anomalie!="":
Test=False
#** Accèder à la liste des commandes générées **********************
if Test:
self.generer_bl(obj)
obj.state="traite"
ids=[]
for order in obj.order_ids:
ids.append(order.id)
res= {
'domain': "[('id','in',[" + ','.join(map(str, list(ids))) + "])]",
'name': 'Commandes',
'view_mode': 'tree,form',
'view_type': 'form',
'context': {'tree_view_ref': 'sale.view_order_tree'},
'res_model': 'sale.order',
'type': 'ir.actions.act_window',
}
return res
#*******************************************************************
@api.multi
def generer_bl(self,obj):
cr = self._cr
uid = self._uid
ids = self._ids
context = self._context
order_line_obj = self.pool.get('sale.order.line')
order_obj = self.pool.get('sale.order')
vals={}
lines = []
mem=''
for line in obj.line_ids:
key=''
if obj.partner_id.is_caracteristique_bl=='cde_odoo':
key=''
if obj.partner_id.is_caracteristique_bl=='cde_client':
key=str(line.client_order_ref)
if obj.partner_id.is_caracteristique_bl=='ref_article':
key=str(line.product_id.id)
if mem!=key:
if vals:
new_id = order_obj.create(cr, uid, vals, context=context)
vals={}
lines = []
mem=key
quotation_line = order_line_obj.product_id_change(cr, uid, ids, obj.partner_id.property_product_pricelist.id,
line.product_id.id, 0, False, 0, False, '', obj.partner_id.id,
False, True, False, False, False, False, context=context)['value']
if 'tax_id' in quotation_line:
quotation_line.update({'tax_id': [[6, False, quotation_line['tax_id']]]})
quotation_line.update({
'product_id' : line.product_id.id,
'product_uom_qty' : line.quantite,
'is_date_livraison' : line.date_livraison,
'is_type_commande' : 'ferme',
'is_client_order_ref' : line.client_order_ref,
'price_unit' : line.prix,
'is_justification' : line.justification,
})
lines.append([0,False,quotation_line])
values = {
'partner_id': obj.partner_id.id,
'is_source_location_id': obj.is_source_location_id.id,
'client_order_ref' : obj.name,
'is_liste_servir_id' : obj.id,
'origin' : obj.name,
'order_line' : lines,
'picking_policy' : 'direct',
'order_policy' : 'picking',
'is_transporteur_id' : obj.transporteur_id.id,
'is_type_commande' : 'ls',
'is_info_client' : obj.info_client,
}
vals.update(values)
if vals:
new_id = order_obj.create(cr, uid, vals, context=context)
#** Supprimer les lignes des commandes d'origine ***********************
SQL="""
select sol.order_id, sol.product_id, sol.product_uom_qty, sol.id
from sale_order so inner join sale_order_line sol on so.id=sol.order_id
where so.partner_id="""+str(obj.partner_id.id)+""" and so.state='draft'
and sol.is_date_expedition<='"""+str(obj.date_fin)+"""' and sol.product_id>0
"""
if obj.date_debut:
SQL=SQL+" and sol.is_date_expedition>='"+str(obj.date_debut)+"' "
SQL=SQL+" order by sol.is_date_expedition, sol.is_date_livraison"
cr.execute(SQL)
result = cr.fetchall()
for line in obj.line_ids:
key1=str(line.order_id.id)+"-"+str(line.product_id.id)
quantite=line.quantite
for order in result:
key2=str(order[0])+"-"+str(order[1])
if key1==key2 and quantite>=0:
order_line=self.env['sale.order.line'].search([('id', '=', order[3])])
qty=order_line.product_uom_qty
if quantite>=qty:
order=order_line.order_id
order_line.unlink()
#** Supprimer la commande si celle-ci est vide *********
if len(order.order_line)==0 and order.is_type_commande=='standard':
order.unlink()
#*******************************************************
else:
order_line.product_uom_qty=qty-quantite
quantite=quantite-qty
#***********************************************************************
@api.multi
def get_is_code_rowspan(self,product_id):
cr = self._cr
for obj in self:
SQL="""
select count(*)
from is_galia_base_uc uc inner join is_galia_base_um um on uc.um_id=um.id
inner join is_liste_servir ls on um.liste_servir_id=ls.id
inner join product_product | |
*pSFX, unsigned char loc, int x, int y)")
del_items(0x8003D4B8)
SetType(0x8003D4B8, "void PlayEffect__Fii(int i, int mode)")
del_items(0x8003D604)
SetType(0x8003D604, "int RndSFX__Fi(int psfx)")
del_items(0x8003D6AC)
SetType(0x8003D6AC, "void PlaySFX__Fi(int psfx)")
del_items(0x8003D718)
SetType(0x8003D718, "void PlaySfxLoc__Fiii(int psfx, int x, int y)")
del_items(0x8003D7C4)
SetType(0x8003D7C4, "void sound_stop__Fv()")
del_items(0x8003D85C)
SetType(0x8003D85C, "void sound_update__Fv()")
del_items(0x8003D890)
SetType(0x8003D890, "void priv_sound_init__FUc(unsigned char bLoadMask)")
del_items(0x8003D8D4)
SetType(0x8003D8D4, "void sound_init__Fv()")
del_items(0x8003D97C)
SetType(0x8003D97C, "void stream_fade__Fv()")
del_items(0x8003D9BC)
SetType(0x8003D9BC, "int GetDirection__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8003DA60)
SetType(0x8003DA60, "void SetRndSeed__Fl(long s)")
del_items(0x8003DA70)
SetType(0x8003DA70, "long GetRndSeed__Fv()")
del_items(0x8003DAB8)
SetType(0x8003DAB8, "long random__Fil(int idx, long v)")
del_items(0x8003DB24)
SetType(0x8003DB24, "unsigned char *DiabloAllocPtr__FUl(unsigned long dwBytes)")
del_items(0x8003DB70)
SetType(0x8003DB70, "void mem_free_dbg__FPv(void *p)")
del_items(0x8003DBC0)
SetType(0x8003DBC0, "unsigned char *LoadFileInMem__FPCcPUl(char *pszName, unsigned long *pdwFileLen)")
del_items(0x8003DBC8)
SetType(0x8003DBC8, "void PlayInGameMovie__FPCc(char *pszMovie)")
del_items(0x8003DBD0)
SetType(0x8003DBD0, "void Enter__9CCritSect(struct CCritSect *this)")
del_items(0x8003DBD8)
SetType(0x8003DBD8, "void InitDiabloMsg__Fc(char e)")
del_items(0x8003DC6C)
SetType(0x8003DC6C, "void ClrDiabloMsg__Fv()")
del_items(0x8003DC98)
SetType(0x8003DC98, "void DrawDiabloMsg__Fv()")
del_items(0x8003DDCC)
SetType(0x8003DDCC, "void interface_msg_pump__Fv()")
del_items(0x8003DDD4)
SetType(0x8003DDD4, "void ShowProgress__FUi(unsigned int uMsg)")
del_items(0x8003E1A8)
SetType(0x8003E1A8, "void InitAllItemsUseable__Fv()")
del_items(0x8003E1E0)
SetType(0x8003E1E0, "void InitItemGFX__Fv()")
del_items(0x8003E20C)
SetType(0x8003E20C, "unsigned char ItemPlace__Fii(int xp, int yp)")
del_items(0x8003E2A8)
SetType(0x8003E2A8, "void AddInitItems__Fv()")
del_items(0x8003E4C4)
SetType(0x8003E4C4, "void InitItems__Fb(bool re_init)")
del_items(0x8003E67C)
SetType(0x8003E67C, "void CalcPlrItemVals__FiUc(int p, unsigned char Loadgfx)")
del_items(0x8003F0F4)
SetType(0x8003F0F4, "void CalcPlrScrolls__Fi(int p)")
del_items(0x8003F474)
SetType(0x8003F474, "void CalcPlrStaff__FP12PlayerStruct(struct PlayerStruct *ptrplr)")
del_items(0x8003F530)
SetType(0x8003F530, "void CalcSelfItems__Fi(int pnum)")
del_items(0x8003F690)
SetType(0x8003F690, "unsigned char ItemMinStats__FPC12PlayerStructPC10ItemStruct(struct PlayerStruct *p, struct ItemStruct *x)")
del_items(0x8003F6DC)
SetType(0x8003F6DC, "void SetItemMinStats__FPC12PlayerStructP10ItemStruct(struct PlayerStruct *p, struct ItemStruct *x)")
del_items(0x8003F708)
SetType(0x8003F708, "void CalcPlrItemMin__Fi(int pnum)")
del_items(0x8003F7E8)
SetType(0x8003F7E8, "void CalcPlrBookVals__Fi(int p)")
del_items(0x8003FACC)
SetType(0x8003FACC, "void CalcPlrInv__FiUc(int p, unsigned char Loadgfx)")
del_items(0x8003FB7C)
SetType(0x8003FB7C, "void SetPlrHandItem__FP10ItemStructi(struct ItemStruct *h, int idata)")
del_items(0x8003FC94)
SetType(0x8003FC94, "void GetPlrHandSeed__FP10ItemStruct(struct ItemStruct *h)")
del_items(0x8003FCC0)
SetType(0x8003FCC0, "void GetGoldSeed__FiP10ItemStruct(int pnum, struct ItemStruct *h)")
del_items(0x8003FE28)
SetType(0x8003FE28, "void SetPlrHandSeed__FP10ItemStructi(struct ItemStruct *h, int iseed)")
del_items(0x8003FE30)
SetType(0x8003FE30, "void SetPlrHandGoldCurs__FP10ItemStruct(struct ItemStruct *h)")
del_items(0x8003FE60)
SetType(0x8003FE60, "void CreatePlrItems__Fi(int p)")
del_items(0x800403C0)
SetType(0x800403C0, "unsigned char ItemSpaceOk__Fii(int i, int j)")
del_items(0x80040640)
SetType(0x80040640, "unsigned char GetItemSpace__Fiic(int x, int y, char inum)")
del_items(0x8004085C)
SetType(0x8004085C, "void GetSuperItemSpace__Fiic(int x, int y, char inum)")
del_items(0x800409B4)
SetType(0x800409B4, "void GetSuperItemLoc__FiiRiT2(int x, int y, int *xx, int *yy)")
del_items(0x80040A7C)
SetType(0x80040A7C, "void CalcItemValue__Fi(int i)")
del_items(0x80040B34)
SetType(0x80040B34, "void GetBookSpell__Fii(int i, int lvl)")
del_items(0x80040D98)
SetType(0x80040D98, "void GetStaffPower__FiiiUc(int i, int lvl, int bs, unsigned char onlygood)")
del_items(0x80040F88)
SetType(0x80040F88, "void GetStaffSpell__FiiUc(int i, int lvl, unsigned char onlygood)")
del_items(0x80041264)
SetType(0x80041264, "void GetItemAttrs__Fiii(int i, int idata, int lvl)")
del_items(0x80041810)
SetType(0x80041810, "int RndPL__Fii(int param1, int param2)")
del_items(0x80041848)
SetType(0x80041848, "int PLVal__Fiiiii(int pv, int p1, int p2, int minv, int maxv)")
del_items(0x800418BC)
SetType(0x800418BC, "void SaveItemPower__Fiiiiiii(int i, int power, int param1, int param2, int minval, int maxval, int multval)")
del_items(0x80042FEC)
SetType(0x80042FEC, "void GetItemPower__FiiilUc(int i, int minlvl, int maxlvl, long flgs, int onlygood)")
del_items(0x80043454)
SetType(0x80043454, "void GetItemBonus__FiiiiUc(int i, int idata, int minlvl, int maxlvl, int onlygood)")
del_items(0x80043550)
SetType(0x80043550, "void SetupItem__Fi(int i)")
del_items(0x80043680)
SetType(0x80043680, "int RndItem__Fi(int m)")
del_items(0x800438C0)
SetType(0x800438C0, "int RndUItem__Fi(int m)")
del_items(0x80043B04)
SetType(0x80043B04, "int RndAllItems__Fv()")
del_items(0x80043C6C)
SetType(0x80043C6C, "int RndTypeItems__Fii(int itype, int imid)")
del_items(0x80043DDC)
SetType(0x80043DDC, "int CheckUnique__FiiiUc(int i, int lvl, int uper, unsigned char recreate)")
del_items(0x80043F8C)
SetType(0x80043F8C, "void GetUniqueItem__Fii(int i, int uid)")
del_items(0x80044244)
SetType(0x80044244, "void SpawnUnique__Fiii(int uid, int x, int y)")
del_items(0x80044384)
SetType(0x80044384, "void ItemRndDur__Fi(int ii)")
del_items(0x80044420)
SetType(0x80044420, "void SetupAllItems__FiiiiiUcUcUc(int ii, int idx, int iseed, int lvl, int uper, int onlygood, int recreate, int pregen)")
del_items(0x80044768)
SetType(0x80044768, "void SpawnItem__FiiiUc(int m, int x, int y, unsigned char sendmsg)")
del_items(0x800449C0)
SetType(0x800449C0, "void CreateItem__Fiii(int uid, int x, int y)")
del_items(0x80044B1C)
SetType(0x80044B1C, "void CreateRndItem__FiiUcUcUc(int x, int y, unsigned char onlygood, unsigned char sendmsg, int delta)")
del_items(0x80044C64)
SetType(0x80044C64, "void SetupAllUseful__Fiii(int ii, int iseed, int lvl)")
del_items(0x80044D50)
SetType(0x80044D50, "void CreateRndUseful__FiiiUc(int pnum, int x, int y, unsigned char sendmsg)")
del_items(0x80044E10)
SetType(0x80044E10, "void CreateTypeItem__FiiUciiUcUc(int x, int y, unsigned char onlygood, int itype, int imisc, int sendmsg, int delta)")
del_items(0x80044F54)
SetType(0x80044F54, "void RecreateEar__FiUsiUciiiiii(int ii, unsigned short ic, int iseed, unsigned char Id, int dur, int mdur, int ch, int mch, int ivalue, int ibuff)")
del_items(0x80045154)
SetType(0x80045154, "void SpawnQuestItem__Fiiiii(int itemid, int x, int y, int randarea, int selflag)")
del_items(0x800453A8)
SetType(0x800453A8, "void SpawnRock__Fv()")
del_items(0x80045554)
SetType(0x80045554, "void RespawnItem__FiUc(int i, unsigned char FlipFlag)")
del_items(0x8004570C)
SetType(0x8004570C, "void DeleteItem__Fii(int ii, int i)")
del_items(0x80045760)
SetType(0x80045760, "void ItemDoppel__Fv()")
del_items(0x80045820)
SetType(0x80045820, "void ProcessItems__Fv()")
del_items(0x80045AC4)
SetType(0x80045AC4, "void FreeItemGFX__Fv()")
del_items(0x80045ACC)
SetType(0x80045ACC, "void GetItemStr__Fi(int i)")
del_items(0x80045C74)
SetType(0x80045C74, "void CheckIdentify__Fii(int pnum, int cii)")
del_items(0x80045D70)
SetType(0x80045D70, "void RepairItem__FP10ItemStructi(struct ItemStruct *i, int lvl)")
del_items(0x80045E64)
SetType(0x80045E64, "void DoRepair__Fii(int pnum, int cii)")
del_items(0x80045F28)
SetType(0x80045F28, "void RechargeItem__FP10ItemStructi(struct ItemStruct *i, int r)")
del_items(0x80045F90)
SetType(0x80045F90, "void DoRecharge__Fii(int pnum, int cii)")
del_items(0x800460B4)
SetType(0x800460B4, "void PrintItemOil__Fc(char IDidx)")
del_items(0x800461B0)
SetType(0x800461B0, "void PrintItemPower__FcPC10ItemStruct(char plidx, struct ItemStruct *x)")
del_items(0x80046854)
SetType(0x80046854, "void PrintItemMisc__FPC10ItemStruct(struct ItemStruct *x)")
del_items(0x80046AB4)
SetType(0x80046AB4, "void PrintItemDetails__FPC10ItemStruct(struct ItemStruct *x)")
del_items(0x80046F18)
SetType(0x80046F18, "void PrintItemDur__FPC10ItemStruct(struct ItemStruct *x)")
del_items(0x80047284)
SetType(0x80047284, "void CastScroll__Fii(int pnum, int Spell)")
del_items(0x800474D4)
SetType(0x800474D4, "void UseItem__Fiii(int p, int Mid, int spl)")
del_items(0x80047AF0)
SetType(0x80047AF0, "unsigned char StoreStatOk__FP10ItemStruct(struct ItemStruct *h)")
del_items(0x80047B84)
SetType(0x80047B84, "unsigned char PremiumItemOk__Fi(int i)")
del_items(0x80047C00)
SetType(0x80047C00, "int RndPremiumItem__Fii(int minlvl, int maxlvl)")
del_items(0x80047D08)
SetType(0x80047D08, "void SpawnOnePremium__Fii(int i, int plvl)")
del_items(0x80047FFC)
SetType(0x80047FFC, "void SpawnPremium__Fi(int lvl)")
del_items(0x8004839C)
SetType(0x8004839C, "void WitchBookLevel__Fi(int ii)")
del_items(0x80048578)
SetType(0x80048578, "void SpawnStoreGold__Fv()")
del_items(0x80048648)
SetType(0x80048648, "void RecalcStoreStats__Fv()")
del_items(0x8004892C)
SetType(0x8004892C, "int ItemNoFlippy__Fv()")
del_items(0x80048990)
SetType(0x80048990, "void CreateSpellBook__FiiiUcUc(int x, int y, int ispell, unsigned char sendmsg, int delta)")
del_items(0x80048B20)
SetType(0x80048B20, "void CreateMagicArmor__FiiiiUcUc(int x, int y, int imisc, int icurs, int sendmsg, int delta)")
del_items(0x80048C9C)
SetType(0x80048C9C, "void CreateMagicWeapon__FiiiiUcUc(int x, int y, int imisc, int icurs, int sendmsg, int delta)")
del_items(0x80048E18)
SetType(0x80048E18, "void DrawUniqueInfo__Fv()")
del_items(0x80048F88)
SetType(0x80048F88, "char *MakeItemStr__FP10ItemStructUsUs(struct ItemStruct *ItemPtr, unsigned short ItemNo, unsigned short MaxLen)")
del_items(0x800493F8)
SetType(0x800493F8, "unsigned char SmithItemOk__Fi(int i)")
del_items(0x8004945C)
SetType(0x8004945C, "int RndSmithItem__Fi(int lvl)")
del_items(0x80049568)
SetType(0x80049568, "unsigned char WitchItemOk__Fi(int i)")
del_items(0x800495F8)
SetType(0x800495F8, "int RndWitchItem__Fi(int lvl)")
del_items(0x800497A8)
SetType(0x800497A8, "void BubbleSwapItem__FP10ItemStructT0(struct ItemStruct *a, struct ItemStruct *b)")
del_items(0x800498B0)
SetType(0x800498B0, "void SortWitch__Fv()")
del_items(0x80049A40)
SetType(0x80049A40, "int RndBoyItem__Fi(int lvl)")
del_items(0x80049B64)
SetType(0x80049B64, "unsigned char HealerItemOk__Fi(int i)")
del_items(0x80049D18)
SetType(0x80049D18, "int RndHealerItem__Fi(int lvl)")
del_items(0x80049E18)
SetType(0x80049E18, "void RecreatePremiumItem__Fiiii(int ii, int idx, int plvl, int iseed)")
del_items(0x80049EF4)
SetType(0x80049EF4, "void RecreateWitchItem__Fiiii(int ii, int idx, int lvl, int iseed)")
del_items(0x8004A060)
SetType(0x8004A060, "void RecreateSmithItem__Fiiii(int ii, int idx, int lvl, int iseed)")
del_items(0x8004A110)
SetType(0x8004A110, "void RecreateHealerItem__Fiiii(int ii, int idx, int lvl, int iseed)")
del_items(0x8004A1E4)
SetType(0x8004A1E4, "void RecreateBoyItem__Fiiii(int ii, int idx, int lvl, int iseed)")
del_items(0x8004A2BC)
SetType(0x8004A2BC, "void RecreateTownItem__FiiUsii(int ii, int idx, unsigned short icreateinfo, int iseed, int ivalue)")
del_items(0x8004A348)
SetType(0x8004A348, "void SpawnSmith__Fi(int lvl)")
del_items(0x8004A678)
SetType(0x8004A678, "void SpawnWitch__Fi(int lvl)")
del_items(0x8004AC70)
SetType(0x8004AC70, "void SpawnHealer__Fi(int lvl)")
del_items(0x8004B214)
SetType(0x8004B214, "void SpawnBoy__Fi(int lvl)")
del_items(0x8004B518)
SetType(0x8004B518, "void SortSmith__Fv()")
del_items(0x8004B69C)
SetType(0x8004B69C, "void SortHealer__Fv()")
del_items(0x8004B82C)
SetType(0x8004B82C, "void RecreateItem__FiiUsiii(int ii, int idx, unsigned short icreateinfo, int iseed, int ivalue, int PlrCreate)")
del_items(0x8004BA70)
SetType(0x8004BA70, "int veclen2__Fii(int ix, int iy)")
del_items(0x8004BAD8)
SetType(0x8004BAD8, "void set_light_bands__Fv()")
del_items(0x8004BB48)
SetType(0x8004BB48, "void SetLightFX__FiisssUcUcUc(int x, int y, short s_r, short s_g, int s_b, int d_r, int d_g, int d_b)")
del_items(0x8004BBB4)
SetType(0x8004BBB4, "void SetWeirdFX__Fv()")
del_items(0x8004BC28)
SetType(0x8004BC28, "void DoLighting__Fiiii(int nXPos, int nYPos, int nRadius, int Lnum)")
del_items(0x8004C8FC)
SetType(0x8004C8FC, "void DoUnLight__Fv()")
del_items(0x8004CB40)
SetType(0x8004CB40, "void DoUnVision__Fiiii(int nXPos, int nYPos, int nRadius, int num)")
del_items(0x8004CC48)
SetType(0x8004CC48, "void DoVision__FiiiUcUc(int nXPos, int nYPos, int nRadius, unsigned char doautomap, int visible)")
del_items(0x8004D070)
SetType(0x8004D070, "void FreeLightTable__Fv()")
del_items(0x8004D078)
SetType(0x8004D078, "void InitLightTable__Fv()")
del_items(0x8004D080)
SetType(0x8004D080, "void MakeLightTable__Fv()")
del_items(0x8004D088)
SetType(0x8004D088, "void InitLightMax__Fv()")
del_items(0x8004D0AC)
SetType(0x8004D0AC, "void InitLighting__Fv()")
del_items(0x8004D0F0)
SetType(0x8004D0F0, "int AddLight__Fiii(int x, int y, int r)")
del_items(0x8004D148)
SetType(0x8004D148, "void AddUnLight__Fi(int i)")
del_items(0x8004D16C)
SetType(0x8004D16C, "void ChangeLightRadius__Fii(int i, int r)")
del_items(0x8004D18C)
SetType(0x8004D18C, "void ChangeLightXY__Fiii(int i, int x, int y)")
del_items(0x8004D1B8)
SetType(0x8004D1B8, "void light_fix__Fi(int i)")
del_items(0x8004D1C0)
SetType(0x8004D1C0, "void ChangeLightOff__Fiii(int i, int x, int y)")
del_items(0x8004D1E8)
SetType(0x8004D1E8, "void ChangeLight__Fiiii(int i, int x, int y, int r)")
del_items(0x8004D214)
SetType(0x8004D214, "void ChangeLightColour__Fii(int i, int c)")
del_items(0x8004D23C)
SetType(0x8004D23C, "void ProcessLightList__Fv()")
del_items(0x8004D354)
SetType(0x8004D354, "void SavePreLighting__Fv()")
del_items(0x8004D35C)
SetType(0x8004D35C, "void InitVision__Fv()")
del_items(0x8004D3B0)
SetType(0x8004D3B0, "int AddVision__FiiiUc(int x, int y, int r, unsigned char mine)")
del_items(0x8004D424)
SetType(0x8004D424, "void ChangeVisionRadius__Fii(int id, int r)")
del_items(0x8004D4D8)
SetType(0x8004D4D8, "void ChangeVisionXY__Fiii(int id, int x, int y)")
del_items(0x8004D55C)
SetType(0x8004D55C, "void ProcessVisionList__Fv()")
del_items(0x8004D764)
SetType(0x8004D764, "void FreeQuestText__Fv()")
del_items(0x8004D76C)
SetType(0x8004D76C, "void InitQuestText__Fv()")
del_items(0x8004D778)
SetType(0x8004D778, "void CalcTextSpeed__FPCc(char *Name)")
del_items(0x8004D934)
SetType(0x8004D934, "void FadeMusicTSK__FP4TASK(struct TASK *T)")
del_items(0x8004DA80)
SetType(0x8004DA80, "void InitQTextMsg__Fi(int m)")
del_items(0x8004DCD4)
SetType(0x8004DCD4, "void DrawQTextBack__Fv()")
del_items(0x8004DE70)
SetType(0x8004DE70, "void DrawQTextTSK__FP4TASK(struct TASK *T)")
del_items(0x8004E158)
SetType(0x8004E158, "int KANJI_strlen__FPc(char *str)")
del_items(0x8004E198)
SetType(0x8004E198, "void DrawQText__Fv()")
del_items(0x8004E744)
SetType(0x8004E744, "void _GLOBAL__D_QBack()")
del_items(0x8004E76C)
SetType(0x8004E76C, "void _GLOBAL__I_QBack()")
del_items(0x8004E794)
SetType(0x8004E794, "void SetRGB__6DialogUcUcUc_addr_8004E794(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8004E7B4)
SetType(0x8004E7B4, "void SetBorder__6Dialogi_addr_8004E7B4(struct Dialog *this, int Type)")
del_items(0x8004E7BC)
SetType(0x8004E7BC, "void ___6Dialog_addr_8004E7BC(struct Dialog *this, int __in_chrg)")
del_items(0x8004E7E4)
SetType(0x8004E7E4, "struct Dialog *__6Dialog_addr_8004E7E4(struct Dialog *this)")
del_items(0x8004E864)
SetType(0x8004E864, "int GetOverlayOtBase__7CBlocks_addr_8004E864()")
del_items(0x8004E86C)
SetType(0x8004E86C, "unsigned short GetDown__C4CPad_addr_8004E86C(struct CPad *this)")
del_items(0x8004E894)
SetType(0x8004E894, "void nullmissile__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8004E89C)
SetType(0x8004E89C, "void FuncNULL__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x8004E8A4)
SetType(0x8004E8A4, "void delta_init__Fv()")
del_items(0x8004E8FC)
SetType(0x8004E8FC, "void delta_kill_monster__FiUcUcUc(int mi, unsigned char x, unsigned char y, unsigned char bLevel)")
del_items(0x8004E994)
SetType(0x8004E994, "void delta_monster_hp__FilUc(int mi, long hp, unsigned char bLevel)")
del_items(0x8004EA10)
SetType(0x8004EA10, "void delta_leave_sync__FUc(unsigned char bLevel)")
del_items(0x8004ED38)
SetType(0x8004ED38, "void delta_sync_object__FiUcUc(int oi, unsigned char bCmd, unsigned char bLevel)")
del_items(0x8004ED98)
SetType(0x8004ED98, "unsigned char delta_get_item__FPC9TCmdGItemUc(struct TCmdGItem *pI, unsigned char bLevel)")
del_items(0x8004EF64)
SetType(0x8004EF64, "void delta_put_item__FPC9TCmdPItemiiUc(struct TCmdPItem *pI, int x, int y, unsigned char bLevel)")
del_items(0x8004F0F0)
SetType(0x8004F0F0, "unsigned char delta_portal_inited__Fi(int i)")
del_items(0x8004F114)
SetType(0x8004F114, "unsigned char delta_quest_inited__Fi(int i)")
del_items(0x8004F138)
SetType(0x8004F138, "void DeltaAddItem__Fi(int ii)")
del_items(0x8004F360)
SetType(0x8004F360, "int DeltaExportData__FPc(char *Dst)")
del_items(0x8004F38C)
SetType(0x8004F38C, "int DeltaImportData__FPc(char *Src)")
del_items(0x8004F3D4)
SetType(0x8004F3D4, "void DeltaSaveLevel__Fv()")
del_items(0x8004F4D0)
SetType(0x8004F4D0, "void NetSendCmd__FUcUc(unsigned char bHiPri, unsigned char bCmd)")
del_items(0x8004F4F8)
SetType(0x8004F4F8, "void NetSendCmdGolem__FUcUcUcUclUc(unsigned char mx, unsigned char my, unsigned char dir, unsigned char menemy, long hp, int cl)")
del_items(0x8004F544)
SetType(0x8004F544, "void NetSendCmdLoc__FUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y)")
del_items(0x8004F574)
SetType(0x8004F574, "void NetSendCmdLocParam1__FUcUcUcUcUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1)")
del_items(0x8004F5AC)
SetType(0x8004F5AC, "void NetSendCmdLocParam2__FUcUcUcUcUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1, int wParam2)")
del_items(0x8004F5EC)
SetType(0x8004F5EC, "void NetSendCmdLocParam3__FUcUcUcUcUsUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1, int wParam2, int wParam3)")
del_items(0x8004F634)
SetType(0x8004F634, "void NetSendCmdParam1__FUcUcUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1)")
del_items(0x8004F660)
SetType(0x8004F660, "void NetSendCmdParam2__FUcUcUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1, unsigned short wParam2)")
del_items(0x8004F690)
SetType(0x8004F690, "void NetSendCmdParam3__FUcUcUsUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1, unsigned short wParam2, int wParam3)")
del_items(0x8004F6C8)
SetType(0x8004F6C8, "void NetSendCmdQuest__FUcUc(unsigned char bHiPri, unsigned char q)")
del_items(0x8004F73C)
SetType(0x8004F73C, "void NetSendCmdGItem__FUcUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char mast, unsigned char pnum, int ii)")
del_items(0x8004F884)
SetType(0x8004F884, "void NetSendCmdGItem2__FUcUcUcUcPC9TCmdGItem(unsigned char usonly, unsigned char bCmd, unsigned char mast, unsigned char pnum, struct TCmdGItem *p)")
del_items(0x8004F908)
SetType(0x8004F908, "unsigned char NetSendCmdReq2__FUcUcUcPC9TCmdGItem(unsigned char bCmd, unsigned char mast, unsigned char pnum, struct TCmdGItem *p)")
del_items(0x8004F968)
SetType(0x8004F968, "void NetSendCmdExtra__FPC9TCmdGItem(struct TCmdGItem *p)")
del_items(0x8004F9D8)
SetType(0x8004F9D8, "void NetSendCmdPItem__FUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y)")
del_items(0x8004FAF4)
SetType(0x8004FAF4, "void NetSendCmdChItem__FUcUc(unsigned char bHiPri, unsigned char bLoc)")
del_items(0x8004FB98)
SetType(0x8004FB98, "void NetSendCmdDelItem__FUcUc(unsigned char bHiPri, unsigned char bLoc)")
del_items(0x8004FBC8)
SetType(0x8004FBC8, "void NetSendCmdDItem__FUci(unsigned char bHiPri, int ii)")
del_items(0x8004FCF0)
SetType(0x8004FCF0, "unsigned char i_own_level__Fi(int nReqLevel)")
del_items(0x8004FCF8)
SetType(0x8004FCF8, "void NetSendCmdDamage__FUcUcUl(unsigned char bHiPri, unsigned char bPlr, unsigned long dwDam)")
del_items(0x8004FD2C)
SetType(0x8004FD2C, "void delta_close_portal__Fi(int pnum)")
del_items(0x8004FD6C)
SetType(0x8004FD6C, "void check_update_plr__Fi(int pnum)")
del_items(0x8004FD74)
SetType(0x8004FD74, | |
unicode(mkt.ADDON_PREMIUM_TYPES[mkt.ADDON_FREE_INAPP]))
def test_invalid_page(self):
r = self.client.get(self.url, {'page': 999})
eq_(r.status_code, 200)
eq_(r.context['pager'].number, 1)
def test_queue_count(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (2)')
eq_(links[1].text, u'Re-reviews (1)')
eq_(links[2].text, u'Updates (0)')
eq_(links[4].text, u'Homescreens (0)')
def test_homescreen_count(self):
Tag(tag_text='homescreen').save_tag(self.apps[1])
if self.uses_es():
self.reindex(Webapp)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (1)')
eq_(links[1].text, u'Re-reviews (1)')
eq_(links[2].text, u'Updates (0)')
eq_(links[4].text, u'Homescreens (1)')
def test_queue_count_senior_reviewer(self):
self.login_as_senior_reviewer()
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (2)')
eq_(links[1].text, u'Re-reviews (1)')
eq_(links[2].text, u'Updates (0)')
eq_(links[3].text, u'Escalations (0)')
def test_escalated_not_in_queue(self):
self.login_as_senior_reviewer()
EscalationQueue.objects.create(addon=self.apps[0])
if self.uses_es():
self.reindex(Webapp)
res = self.client.get(self.url)
# self.apps[2] is not pending so doesn't show up either.
eq_([a.app.id for a in res.context['addons']], [self.apps[1].id])
doc = pq(res.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (1)')
eq_(links[1].text, u'Re-reviews (1)')
eq_(links[2].text, u'Updates (0)')
eq_(links[3].text, u'Escalations (1)')
def test_incomplete_no_in_queue(self):
[app.update(status=mkt.STATUS_NULL) for app in self.apps]
if self.uses_es():
self.reindex(Webapp)
req = req_factory_factory(
self.url,
user=UserProfile.objects.get(email='<EMAIL>'))
doc = pq(queue_apps(req).content)
assert not doc('#addon-queue tbody tr').length
def test_waiting_time(self):
"""Check objects show queue objects' created."""
res = self.client.get(self.url)
waiting_times = [wait.attrib['isotime'] for wait in
pq(res.content)('td time')]
expected_waiting_times = [isotime(app.latest_version.nomination)
for app in self.apps[0:2]]
self.assertSetEqual(expected_waiting_times, waiting_times)
class TestAppQueueES(mkt.site.tests.ESTestCase, TestAppQueue):
def setUp(self):
super(TestAppQueueES, self).setUp()
self.create_switch('reviewer-tools-elasticsearch')
self.reindex(Webapp)
class TestRegionQueue(AppReviewerTest, AccessMixin, FlagsMixin, SearchMixin,
XSSMixin):
def setUp(self):
super(TestRegionQueue, self).setUp()
self.apps = [app_factory(name='WWW',
status=mkt.STATUS_PUBLIC),
app_factory(name='XXX',
status=mkt.STATUS_APPROVED),
app_factory(name='YYY',
status=mkt.STATUS_PUBLIC),
app_factory(name='ZZZ',
status=mkt.STATUS_PENDING)]
# WWW and XXX are the only ones actually requested to be public.
self.apps[0].geodata.update(region_cn_status=mkt.STATUS_PENDING,
region_cn_nominated=self.days_ago(2))
self.apps[1].geodata.update(region_cn_status=mkt.STATUS_PENDING,
region_cn_nominated=self.days_ago(1))
self.apps[2].geodata.update(region_cn_status=mkt.STATUS_PUBLIC)
self.grant_permission(self.reviewer_user, 'Apps:ReviewRegionCN')
self.login_as_editor()
self.url = reverse('reviewers.apps.queue_region',
args=[mkt.regions.CHN.slug])
def test_template_links(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
links = pq(r.content)('.regional-queue tbody tr td:first-child a')
apps = Webapp.objects.pending_in_region('cn').order_by(
'_geodata__region_cn_nominated')
src = '?src=queue-region-cn'
expected = [
(unicode(apps[0].name), apps[0].get_url_path() + src),
(unicode(apps[1].name), apps[1].get_url_path() + src),
]
check_links(expected, links, verify=False)
def test_escalated_not_in_queue(self):
self.grant_permission(self.snr_reviewer_user, 'Apps:ReviewRegionCN')
self.login_as_senior_reviewer()
self.apps[0].escalationqueue_set.create()
res = self.client.get(self.url)
eq_([a.app for a in res.context['addons']], [self.apps[1]])
@mock.patch('mkt.versions.models.Version.is_privileged', False)
class TestRereviewQueue(AppReviewerTest, AccessMixin, FlagsMixin, SearchMixin,
XSSMixin):
def setUp(self):
super(TestRereviewQueue, self).setUp()
self.apps = [app_factory(name='XXX'),
app_factory(name='YYY'),
app_factory(name='ZZZ')]
RereviewQueue.objects.create(addon=self.apps[0]).update(
created=self.days_ago(5))
RereviewQueue.objects.create(addon=self.apps[1]).update(
created=self.days_ago(3))
RereviewQueue.objects.create(addon=self.apps[2]).update(
created=self.days_ago(1))
self.apps[0].update(created=self.days_ago(15))
self.apps[1].update(created=self.days_ago(13))
self.apps[2].update(created=self.days_ago(11))
if self.uses_es():
self.reindex(Webapp)
self.url = reverse('reviewers.apps.queue_rereview')
def tearDown(self):
if self.uses_es():
unindex_webapps([app.id for app in self.apps])
super(TestRereviewQueue, self).tearDown()
def review_url(self, app):
return reverse('reviewers.apps.review', args=[app.app_slug])
def test_template_links(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
links = pq(r.content)('#addon-queue tbody')('tr td:nth-of-type(2) a')
apps = [rq.addon for rq in
RereviewQueue.objects.all().order_by('created')]
expected = [
(unicode(apps[0].name), self.review_url(apps[0])),
(unicode(apps[1].name), self.review_url(apps[1])),
(unicode(apps[2].name), self.review_url(apps[2])),
]
check_links(expected, links, verify=False)
def test_waiting_time(self):
"""Check objects show queue objects' created."""
r = self.client.get(self.url)
waiting_times = [wait.attrib['isotime'] for wait in
pq(r.content)('td time')]
expected_waiting_times = [
isotime(app.rereviewqueue_set.all()[0].created)
for app in self.apps]
self.assertSetEqual(expected_waiting_times, waiting_times)
def test_action_buttons_public_senior_reviewer(self):
self.login_as_senior_reviewer()
r = self.client.get(self.review_url(self.apps[0]))
eq_(r.status_code, 200)
actions = pq(r.content)('#review-actions input')
expected = [
(u'Reject', 'reject'),
(u'Ban app', 'disable'),
(u'Clear Re-review', 'clear_rereview'),
(u'Escalate', 'escalate'),
(u'Message developer', 'info'),
(u'Private comment', 'comment'),
]
self.check_actions(expected, actions)
def test_action_buttons_public(self):
r = self.client.get(self.review_url(self.apps[0]))
eq_(r.status_code, 200)
actions = pq(r.content)('#review-actions input')
expected = [
(u'Reject', 'reject'),
(u'Clear Re-review', 'clear_rereview'),
(u'Escalate', 'escalate'),
(u'Message developer', 'info'),
(u'Private comment', 'comment'),
]
self.check_actions(expected, actions)
def test_action_buttons_reject(self):
self.apps[0].update(status=mkt.STATUS_REJECTED)
self.apps[0].latest_version.files.update(status=mkt.STATUS_DISABLED)
r = self.client.get(self.review_url(self.apps[0]))
eq_(r.status_code, 200)
actions = pq(r.content)('#review-actions input')
expected = [
(u'Approve', 'public'),
(u'Clear Re-review', 'clear_rereview'),
(u'Escalate', 'escalate'),
(u'Message developer', 'info'),
(u'Private comment', 'comment'),
]
self.check_actions(expected, actions)
def test_invalid_page(self):
r = self.client.get(self.url, {'page': 999})
eq_(r.status_code, 200)
eq_(r.context['pager'].number, 1)
def test_queue_count(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (0)')
eq_(links[1].text, u'Re-reviews (3)')
eq_(links[2].text, u'Updates (0)')
def test_queue_count_senior_reviewer(self):
self.login_as_senior_reviewer()
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (0)')
eq_(links[1].text, u'Re-reviews (3)')
eq_(links[2].text, u'Updates (0)')
eq_(links[3].text, u'Escalations (0)')
def test_escalated_not_in_queue(self):
self.login_as_senior_reviewer()
EscalationQueue.objects.create(addon=self.apps[0])
if self.uses_es():
self.reindex(Webapp)
res = self.client.get(self.url)
if self.uses_es():
self.assertSetEqual([a.id for a in res.context['addons']],
[a.id for a in self.apps[1:]])
else:
self.assertSetEqual([a.app for a in res.context['addons']],
self.apps[1:])
doc = pq(res.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (0)')
eq_(links[1].text, u'Re-reviews (2)')
eq_(links[2].text, u'Updates (0)')
eq_(links[3].text, u'Escalations (1)')
def test_addon_deleted(self):
app = self.apps[0]
app.delete()
eq_(RereviewQueue.objects.filter(addon=app).exists(), False)
class TestRereviewQueueES(mkt.site.tests.ESTestCase, TestRereviewQueue):
def setUp(self):
super(TestRereviewQueueES, self).setUp()
self.create_switch('reviewer-tools-elasticsearch')
self.reindex(Webapp)
@mock.patch('mkt.versions.models.Version.is_privileged', False)
class TestUpdateQueue(AppReviewerTest, AccessMixin, FlagsMixin, SearchMixin,
XSSMixin):
def setUp(self):
super(TestUpdateQueue, self).setUp()
app1 = app_factory(is_packaged=True, name='XXX',
version_kw={'version': '1.0',
'created': self.days_ago(2),
'nomination': self.days_ago(2)})
app2 = app_factory(is_packaged=True, name='YYY',
version_kw={'version': '1.0',
'created': self.days_ago(2),
'nomination': self.days_ago(2)})
version_factory(addon=app1, version='1.1', created=self.days_ago(1),
nomination=self.days_ago(1),
file_kw={'status': mkt.STATUS_PENDING})
version_factory(addon=app2, version='1.1', created=self.days_ago(1),
nomination=self.days_ago(1),
file_kw={'status': mkt.STATUS_PENDING})
self.apps = list(Webapp.objects.order_by('id'))
self.url = reverse('reviewers.apps.queue_updates')
def tearDown(self):
if self.uses_es():
unindex_webapps([app.id for app in self.apps])
super(TestUpdateQueue, self).tearDown()
def review_url(self, app):
return reverse('reviewers.apps.review', args=[app.app_slug])
def test_template_links(self):
self.apps[0].versions.latest().update(nomination=self.days_ago(2))
self.apps[1].versions.latest().update(nomination=self.days_ago(1))
if self.uses_es():
self.reindex(Webapp)
r = self.client.get(self.url)
eq_(r.status_code, 200)
links = pq(r.content)('#addon-queue tbody')('tr td:nth-of-type(2) a')
expected = [
(unicode(self.apps[0].name), self.review_url(self.apps[0])),
(unicode(self.apps[1].name), self.review_url(self.apps[1])),
]
check_links(expected, links, verify=False)
def test_action_buttons_public_senior_reviewer(self):
self.apps[0].versions.latest().files.update(status=mkt.STATUS_PUBLIC)
self.login_as_senior_reviewer()
r = self.client.get(self.review_url(self.apps[0]))
eq_(r.status_code, 200)
actions = pq(r.content)('#review-actions input')
expected = [
(u'Reject', 'reject'),
(u'Ban app', 'disable'),
(u'Request Re-review', 'manual_rereview'),
(u'Escalate', 'escalate'),
(u'Message developer', 'info'),
(u'Private comment', 'comment'),
]
self.check_actions(expected, actions)
def test_action_buttons_public(self):
self.apps[0].versions.latest().files.update(status=mkt.STATUS_PUBLIC)
r = self.client.get(self.review_url(self.apps[0]))
eq_(r.status_code, 200)
actions = pq(r.content)('#review-actions input')
expected = [
(u'Reject', 'reject'),
(u'Request Re-review', 'manual_rereview'),
(u'Escalate', 'escalate'),
(u'Message developer', 'info'),
(u'Private comment', 'comment'),
]
self.check_actions(expected, actions)
def test_action_buttons_reject(self):
self.apps[0].versions.latest().files.update(status=mkt.STATUS_DISABLED)
r = self.client.get(self.review_url(self.apps[0]))
eq_(r.status_code, 200)
actions = pq(r.content)('#review-actions input')
expected = [
(u'Approve', 'public'),
(u'Request Re-review', 'manual_rereview'),
(u'Escalate', 'escalate'),
(u'Message developer', 'info'),
(u'Private comment', 'comment'),
]
self.check_actions(expected, actions)
def test_invalid_page(self):
r = self.client.get(self.url, {'page': 999})
eq_(r.status_code, 200)
eq_(r.context['pager'].number, 1)
def test_queue_count(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (0)')
eq_(links[1].text, u'Re-reviews (0)')
eq_(links[2].text, u'Updates (2)')
def test_homescreen(self):
Tag(tag_text='homescreen').save_tag(self.apps[1])
if self.uses_es():
self.reindex(Webapp)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (0)')
eq_(links[1].text, u'Re-reviews (0)')
eq_(links[2].text, u'Updates (1)')
eq_(links[3].text, u'Reviewing (0)')
eq_(links[4].text, u'Homescreens (1)')
def test_queue_count_senior_reviewer(self):
self.login_as_senior_reviewer()
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (0)')
eq_(links[1].text, u'Re-reviews (0)')
eq_(links[2].text, u'Updates (2)')
eq_(links[3].text, u'Escalations (0)')
def test_escalated_not_in_queue(self):
self.login_as_senior_reviewer()
EscalationQueue.objects.create(addon=self.apps[0])
if self.uses_es():
self.reindex(Webapp)
res = self.client.get(self.url)
eq_([a.app.id for a in res.context['addons']],
[app.id for app in self.apps[1:]])
doc = pq(res.content)
links = doc('.tabnav li a')
eq_(links[0].text, u'Apps (0)')
eq_(links[1].text, u'Re-reviews (0)')
eq_(links[2].text, u'Updates (1)')
eq_(links[3].text, u'Escalations (1)')
def test_order(self):
self.apps[0].update(created=self.days_ago(10))
self.apps[1].update(created=self.days_ago(5))
self.apps[0].versions.latest().update(nomination=self.days_ago(1))
self.apps[1].versions.latest().update(nomination=self.days_ago(4))
if self.uses_es():
self.reindex(Webapp)
res = self.client.get(self.url)
apps = list(res.context['addons'])
eq_(apps[0].app.id, self.apps[1].id)
eq_(apps[1].app.id, self.apps[0].id)
def test_only_updates_in_queue(self):
# Add new packaged app, which should only show up in the pending queue.
app = app_factory(is_packaged=True, name='ZZZ',
status=mkt.STATUS_PENDING,
version_kw={'version': '1.0'},
file_kw={'status': mkt.STATUS_PENDING})
self.apps.append(app)
if self.uses_es():
self.reindex(Webapp)
res = self.client.get(self.url)
apps = [a.app for a in res.context['addons']]
assert app not in apps, (
'Unexpected: Found a new packaged app in the updates queue.')
eq_(pq(res.content)('.tabnav li a')[2].text, u'Updates (2)')
def test_approved_update_in_queue(self):
app = app_factory(is_packaged=True, name='YYY',
status=mkt.STATUS_APPROVED,
version_kw={'version': '1.0',
'created': self.days_ago(2),
'nomination': self.days_ago(2)})
self.apps.append(app)
File.objects.filter(version__addon=app).update(status=app.status)
version_factory(addon=app, version='1.1', created=self.days_ago(1),
nomination=self.days_ago(1),
file_kw={'status': mkt.STATUS_PENDING})
if self.uses_es():
self.reindex(Webapp)
res = self.client.get(self.url)
assert app.id in [a.app.id for a in res.context['addons']]
eq_(pq(res.content)('.tabnav li a')[2].text, u'Updates (3)')
def test_update_queue_with_empty_nomination(self):
app = app_factory(is_packaged=True, name='YYY',
status=mkt.STATUS_NULL,
version_kw={'version': '1.0',
'created': self.days_ago(2),
'nomination': None})
self.apps.append(app)
first_version = app.latest_version
version_factory(addon=app, version='1.1', created=self.days_ago(1),
nomination=None,
file_kw={'status': mkt.STATUS_PENDING})
# Now that we have a version with nomination=None, reset app status.
app.update(status=mkt.STATUS_APPROVED)
File.objects.filter(version=first_version).update(status=app.status)
# Safeguard: we /really/ want to test with nomination=None.
eq_(app.latest_version.reload().nomination, None)
if self.uses_es():
self.reindex(Webapp)
res = self.client.get(self.url)
assert app.id in [a.app.id for a in res.context['addons']]
eq_(pq(res.content)('.tabnav li a')[2].text, u'Updates (3)')
def test_deleted_version_not_in_queue(self):
"""
This tests that an app with a prior pending version that got
deleted doesn't trigger the app to remain in the review queue.
"""
app = self.apps[0]
# File is PENDING and delete current version.
old_ver = app.versions.order_by('id')[0]
old_ver.files.latest().update(status=mkt.STATUS_PENDING)
old_ver.delete()
# "Approve" the app.
app.versions.latest().files.latest().update(status=mkt.STATUS_PUBLIC)
eq_(app.reload().status, mkt.STATUS_PUBLIC)
if self.uses_es():
self.reindex(Webapp)
res = self.client.get(self.url)
eq_(res.status_code, 200)
# Verify that our app has 2 versions.
eq_(Version.with_deleted.filter(addon=app).count(), 2)
# Verify the apps in the context are what we expect.
doc = pq(res.content)
eq_(doc('.tabnav li a')[2].text, u'Updates (1)')
apps = [a.app.id for a in res.context['addons']]
ok_(app.id not in apps)
ok_(self.apps[1].id in apps)
def test_waiting_time(self):
"""Check objects show queue objects' created."""
r = self.client.get(self.url)
waiting_times = [wait.attrib['isotime'] for wait in
pq(r.content)('td time')]
expected_waiting_times = [isotime(app.latest_version.nomination)
for app in self.apps]
self.assertSetEqual(expected_waiting_times, waiting_times)
class TestUpdateQueueES(mkt.site.tests.ESTestCase, TestUpdateQueue):
def setUp(self):
super(TestUpdateQueueES, self).setUp()
| |
str) -> Dict:
"""
Provides details about an entity recognizer including status, S3 buckets containing training data, recognizer metadata, metrics, and so on.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeEntityRecognizer>`_
**Request Syntax**
::
response = client.describe_entity_recognizer(
EntityRecognizerArn='string'
)
**Response Syntax**
::
{
'EntityRecognizerProperties': {
'EntityRecognizerArn': 'string',
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'TrainingStartTime': datetime(2015, 1, 1),
'TrainingEndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'EntityTypes': [
{
'Type': 'string'
},
],
'Documents': {
'S3Uri': 'string'
},
'Annotations': {
'S3Uri': 'string'
},
'EntityList': {
'S3Uri': 'string'
}
},
'RecognizerMetadata': {
'NumberOfTrainedDocuments': 123,
'NumberOfTestDocuments': 123,
'EvaluationMetrics': {
'Precision': 123.0,
'Recall': 123.0,
'F1Score': 123.0
},
'EntityTypes': [
{
'Type': 'string'
},
]
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **EntityRecognizerProperties** *(dict) --*
Describes information associated with an entity recognizer.
- **EntityRecognizerArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the entity recognizer.
- **LanguageCode** *(string) --*
The language of the input documents. All documents must be in the same language. Only English ("en") is currently supported.
- **Status** *(string) --*
Provides the status of the entity recognizer.
- **Message** *(string) --*
A description of the status of the recognizer.
- **SubmitTime** *(datetime) --*
The time that the recognizer was submitted for processing.
- **EndTime** *(datetime) --*
The time that the recognizer creation completed.
- **TrainingStartTime** *(datetime) --*
The time that training of the entity recognizer started.
- **TrainingEndTime** *(datetime) --*
The time that training of the entity recognizer was completed.
- **InputDataConfig** *(dict) --*
The input data properties of an entity recognizer.
- **EntityTypes** *(list) --*
The entity types in the input data for an entity recognizer.
- *(dict) --*
Information about an individual item on a list of entity types.
- **Type** *(string) --*
Entity type of an item on an entity type list.
- **Documents** *(dict) --*
S3 location of the documents folder for an entity recognizer
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the training documents for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **Annotations** *(dict) --*
S3 location of the annotations file for an entity recognizer.
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the annotations for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **EntityList** *(dict) --*
S3 location of the entity list for an entity recognizer.
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the entity list is located. The URI must be in the same region as the API endpoint that you are calling.
- **RecognizerMetadata** *(dict) --*
Provides information about an entity recognizer.
- **NumberOfTrainedDocuments** *(integer) --*
The number of documents in the input data that were used to train the entity recognizer. Typically this is 80 to 90 percent of the input documents.
- **NumberOfTestDocuments** *(integer) --*
The number of documents in the input data that were used to test the entity recognizer. Typically this is 10 to 20 percent of the input documents.
- **EvaluationMetrics** *(dict) --*
Detailed information about the accuracy of an entity recognizer.
- **Precision** *(float) --*
A measure of the usefulness of the recognizer results in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones.
- **Recall** *(float) --*
A measure of how complete the recognizer results are for the test data. High recall means that the recognizer returned most of the relevant results.
- **F1Score** *(float) --*
A measure of how accurate the recognizer results are for the test data. It is derived from the ``Precision`` and ``Recall`` values. The ``F1Score`` is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
- **EntityTypes** *(list) --*
Entity types from the metadata of an entity recognizer.
- *(dict) --*
Individual item from the list of entity types in the metadata of an entity recognizer.
- **Type** *(string) --*
Type of entity from the list of entity types in the metadata of an entity recognizer.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"<KEY>"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/<KEY>"``
:type EntityRecognizerArn: string
:param EntityRecognizerArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the entity recognizer.
:rtype: dict
:returns:
"""
pass
def describe_key_phrases_detection_job(self, JobId: str) -> Dict:
"""
Gets the properties associated with a key phrases detection job. Use this operation to get the status of a detection job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeKeyPhrasesDetectionJob>`_
**Request Syntax**
::
response = client.describe_key_phrases_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'KeyPhrasesDetectionJobProperties': {
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **KeyPhrasesDetectionJobProperties** *(dict) --*
An object that contains the properties associated with a key phrases detection job.
- **JobId** *(string) --*
The identifier assigned to the key phrases detection job.
- **JobName** *(string) --*
The name that you assigned the key phrases detection job.
- **JobStatus** *(string) --*
The current status of the key phrases detection job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of a job.
- **SubmitTime** *(datetime) --*
The time that the key phrases detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the key phrases detection job completed.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the key phrases detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the key phrases detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output | |
"""This module defines the base Optimization Protocol classes. The classes defined herein are not
intended for direct use, but are rather parent classes to those defined in
:mod:`hyperparameter_hunter.optimization`
Related
-------
:mod:`hyperparameter_hunter.optimization`
Defines the optimization classes that are intended for direct use. All classes defined in
:mod:`hyperparameter_hunter.optimization` should be descendants of
:class:`optimization_core.BaseOptimizationProtocol`
:mod:`hyperparameter_hunter.result_reader`
Used to locate result files for Experiments that are similar to the current optimization
constraints, and produce data to learn from in the case of :class:`SKOptimizationProtocol`
:mod:`hyperparameter_hunter.space`
Defines the child classes of `hyperparameter_hunter.space.Dimension`, which are used to define
the hyperparameters to optimize
:mod:`hyperparameter_hunter.utils.optimization_utils`:
Provides utility functions for locating saved Experiments that fit within the constraints
currently being optimized, as well as :class:`AskingOptimizer`, which guides the search of
:class:`optimization_core.SKOptimizationProtocol`"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.algorithm_handlers import (
identify_algorithm,
identify_algorithm_hyperparameters,
)
from hyperparameter_hunter.exceptions import (
EnvironmentInactiveError,
EnvironmentInvalidError,
RepeatedExperimentError,
)
from hyperparameter_hunter.experiments import CVExperiment
from hyperparameter_hunter.library_helpers.keras_helper import reinitialize_callbacks
from hyperparameter_hunter.library_helpers.keras_optimization_helper import (
keras_prep_workflow,
link_choice_ids,
)
from hyperparameter_hunter.metrics import get_formatted_target_metric
from hyperparameter_hunter.reporting import OptimizationReporter
from hyperparameter_hunter.result_reader import finder_selector
from hyperparameter_hunter.settings import G, TEMP_MODULES_DIR_PATH
from hyperparameter_hunter.space import Space, dimension_subset
from hyperparameter_hunter.utils.boltons_utils import get_path
from hyperparameter_hunter.utils.general_utils import deep_restricted_update
from hyperparameter_hunter.utils.optimization_utils import AskingOptimizer, get_choice_dimensions
##################################################
# Import Miscellaneous Assets
##################################################
from abc import ABCMeta, abstractmethod
from datetime import datetime
from inspect import currentframe, getframeinfo
from os import walk, remove, rmdir
from os.path import abspath
##################################################
# Import Learning Assets
##################################################
from skopt.callbacks import check_callback
# noinspection PyProtectedMember
from skopt.utils import cook_estimator, eval_callbacks
try:
from keras import backend as K
except ImportError:
K = None
class OptimizationProtocolMeta(type):
"""Metaclass to accurately set :attr:`source_script` for its descendants even if the original
call was the product of scripts calling other scripts that eventually instantiated an
optimization protocol"""
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
"""Prepare `namespace` to include :attr:`source_script`"""
namespace = dict(source_script=None)
return namespace
def __call__(cls, *args, **kwargs):
"""Set the instance's :attr:`source_script` to the absolute path of the file that
instantiated the OptimizationProtocol"""
setattr(cls, "source_script", abspath(getframeinfo(currentframe().f_back)[0]))
return super().__call__(*args, **kwargs)
class MergedOptimizationMeta(OptimizationProtocolMeta, ABCMeta):
"""Metaclass to combine :class:`OptimizationProtocolMeta`, and `ABCMeta`"""
pass
class BaseOptimizationProtocol(metaclass=MergedOptimizationMeta):
def __init__(
self,
target_metric=None,
iterations=1,
verbose=1,
read_experiments=True,
reporter_parameters=None,
):
"""Base class for intermediate base optimization protocol classes
Parameters
----------
target_metric: Tuple, default=('oof', <first key in :attr:`environment.Environment.metrics`>)
A path denoting the metric to be used to compare completed Experiments within the
Optimization Protocol. The first value should be one of ['oof', 'holdout', 'in_fold'].
The second value should be the name of a metric being recorded according to the values
supplied in :attr:`environment.Environment.metrics_params`. See the documentation for
:func:`metrics.get_formatted_target_metric` for more info. Any values returned by, or
given as the `target_metric` input to, :func:`metrics.get_formatted_target_metric` are
acceptable values for :attr:`BaseOptimizationProtocol.target_metric`
iterations: Int, default=1
The number of distinct experiments to execute
verbose: Int 0, 1, or 2, default=1
Verbosity mode for console logging. 0: Silent. 1: Show only logs from the Optimization
Protocol. 2: In addition to logs shown when verbose=1, also show the logs from individual
Experiments
read_experiments: Boolean, default=True
If True, all Experiment records that fit in the current :attr:`space` and guidelines,
and match :attr:`algorithm_name`, will be read in and used to fit any optimizers
reporter_parameters: Dict, or None, default={}
Additional parameters passed to :meth:`reporting.OptimizationReporter.__init__`. Note:
Unless provided explicitly, the key "do_maximize" will be added by default to
`reporter_params`, with a value inferred from the `direction` of :attr:`target_metric`
in `G.Env.metrics`. In nearly all cases, the "do_maximize" key should be ignored,
as there are very few reasons to explicitly include it
Notes
-----
By default, 'script_backup' for Experiments is blacklisted when executed within
:class:`BaseOptimizationProtocol` since it would just repeatedly create copies of the same,
unchanged file (this file). So don't expect any script_backup files for Experiments executed
during optimization rounds"""
#################### Optimization Protocol Parameters ####################
self.target_metric = target_metric
self.iterations = iterations
self.verbose = verbose
self.read_experiments = read_experiments
self.reporter_parameters = reporter_parameters or {}
#################### Experiment Guidelines ####################
self.model_initializer = None
self.model_init_params = None
self.model_extra_params = None
self.feature_engineer = None
self.feature_selector = None
self.notes = None
self.do_raise_repeated = True
#################### Search Parameters ####################
self.dimensions = []
self.search_bounds = dict()
self.space = None
self.similar_experiments = []
self.best_experiment = None
self.best_score = None
self.successful_iterations = 0
self.skipped_iterations = 0
self.tested_keys = []
self._search_space_size = None
self.current_init_params = None
self.current_extra_params = None
#################### Identification Attributes ####################
self.algorithm_name = None
self.module_name = None
self.current_experiment = None
self.current_score = None
#################### Keras-Specific Attributes ####################
self.dummy_layers = []
self.dummy_compile_params = dict()
self.init_iter_attrs = []
self.extra_iter_attrs = []
self.logger = None
self._preparation_workflow()
self.do_maximize = G.Env.metrics[self.target_metric[-1]].direction == "max"
##################################################
# Core Methods:
##################################################
# TODO: Add `model` here, with a `TranslateTrace` decorator, and document it below
def set_experiment_guidelines(
self,
model_initializer,
model_init_params,
model_extra_params=None,
feature_engineer=None,
feature_selector=None,
notes=None,
do_raise_repeated=True,
):
"""Provide arguments necessary to instantiate :class:`experiments.CVExperiment`. This method
has the same signature as :meth:`experiments.BaseExperiment.__init__` except where noted
Parameters
----------
model_initializer: Class, or functools.partial, or class instance
The algorithm class being used to initialize a model
model_init_params: Dict, or object
The dictionary of arguments given when creating a model instance with
`model_initializer` via the `__init__` method of :class:`models.Model`. Any kwargs that
are considered valid by the `__init__` method of `model_initializer` are
valid in `model_init_params`
model_extra_params: Dict, or None, default=None
A dictionary of extra parameters passed to :class:`models.Model`. This is used to
provide parameters to models' non-initialization methods (like `fit`, `predict`,
`predict_proba`, etc.), and for neural networks
feature_engineer: `FeatureEngineer`, or None, default=None # TODO: Add documentation
... # TODO: Add documentation
feature_selector: List of str, callable, list of booleans, default=None
The value provided when splitting apart the input data for all provided DataFrames.
`feature_selector` is provided as the second argument for calls to
`pandas.DataFrame.loc` in :meth:`BaseExperiment._initial_preprocessing`. If None,
`feature_selector` is set to all columns in :attr:`train_dataset`, less
:attr:`target_column`, and :attr:`id_column`
notes: String, or None, default=None
Additional information about the Experiment that will be saved with the Experiment's
description result file. This serves no purpose other than to facilitate saving
Experiment details in a more readable format
do_raise_repeated: Boolean, default=False
If True and this Experiment locates a previous Experiment's results with matching
Environment and Hyperparameter Keys, a RepeatedExperimentError will be raised. Else, a
warning will be logged
Notes
-----
The `auto_start` kwarg is not available here because
:meth:`BaseOptimizationProtocol._execute_experiment` sets it to False in order to check for
duplicated keys before running the whole Experiment. This is the most notable difference
between calling :meth:`set_experiment_guidelines` and instantiating
:class:`experiments.CVExperiment`"""
self.model_initializer = model_initializer
self.model_init_params = identify_algorithm_hyperparameters(self.model_initializer)
try:
self.model_init_params.update(model_init_params)
except TypeError:
self.model_init_params.update(dict(build_fn=model_init_params))
self.model_extra_params = model_extra_params if model_extra_params is not None else {}
self.feature_engineer = feature_engineer if feature_engineer is not None else {}
self.feature_selector = feature_selector if feature_selector is not None else []
self.notes = notes
self.do_raise_repeated = do_raise_repeated
if self.do_raise_repeated is False:
G.warn_("WARNING: Setting `do_raise_repeated`=False allows duplicated Experiments")
self.algorithm_name, self.module_name = identify_algorithm(self.model_initializer)
self._validate_guidelines()
#################### Deal with Keras ####################
if self.module_name == "keras":
reusable_build_fn, reusable_wrapper_params, dummy_layers, dummy_compile_params = keras_prep_workflow(
self.model_initializer,
self.model_init_params["build_fn"],
self.model_extra_params,
self.source_script,
)
self.model_init_params = dict(build_fn=reusable_build_fn)
self.model_extra_params = reusable_wrapper_params
self.dummy_layers = dummy_layers
self.dummy_compile_params = dummy_compile_params
# FLAG: Deal with capitalization conflicts when comparing similar experiments: `optimizer`='Adam' vs 'adam'
self.set_dimensions()
def set_dimensions(self):
"""Locate given hyperparameters that are `space` choice declarations and add them to
:attr:`dimensions`"""
all_dimension_choices = []
#################### Remap Extra Objects ####################
if self.module_name == "keras":
from keras.initializers import Initializer as KerasInitializer
from keras.callbacks import Callback as KerasCB
self.init_iter_attrs.append(lambda _p, _k, _v: isinstance(_v, KerasInitializer))
self.extra_iter_attrs.append(lambda _p, _k, _v: isinstance(_v, KerasCB))
#################### Collect Choice Dimensions ####################
init_dim_choices = get_choice_dimensions(self.model_init_params, self.init_iter_attrs)
extra_dim_choices = get_choice_dimensions(self.model_extra_params, self.extra_iter_attrs)
for (path, choice) in init_dim_choices:
choice._name = ("model_init_params",) + path
all_dimension_choices.append(choice)
for (path, choice) in extra_dim_choices:
choice._name = ("model_extra_params",) + path
all_dimension_choices.append(choice)
self.dimensions = all_dimension_choices
if self.module_name == "keras":
self.model_extra_params = link_choice_ids(
self.dummy_layers,
self.dummy_compile_params,
self.model_extra_params,
self.dimensions,
)
def go(self):
"""Begin hyperparameter optimization process after experiment guidelines have been set and
search dimensions are in place. This process includes the following: setting the
hyperparameter space; locating similar experiments to be used as learning material for
:class:`SKOptimizationProtocol` s; and executing :meth:`_optimization_loop`, which
actually sets off the Experiment execution process"""
if self.model_initializer is None:
raise ValueError("Experiment guidelines must be set before starting optimization")
_reporter_params = dict(dict(do_maximize=self.do_maximize), **self.reporter_parameters)
self.logger = OptimizationReporter([_.name for _ in self.dimensions], **_reporter_params)
self.tested_keys = []
self._set_hyperparameter_space()
self._find_similar_experiments()
loop_start_time = datetime.now()
self._optimization_loop()
loop_end_time = datetime.now()
G.log_(f"Optimization | |
"""Utility module for creating transformation matrices
Basically this gives you the ability to construct
transformation matrices without needing OpenGL
or similar run-time engines. The result is that
design-time utilities can process files without
trading dependencies on a particular run-time.
This code is originally from the mcf.vrml processing
engine, and has only been cosmetically altered to
fit the new organizational pattern.
Note: to apply these matrices to a particular coordinate,
you would do the following:
p = ones( 4 )
p[:3] = coordinate
return dot( p, matrix)
That is, you use the homogenous coordinate, and
make it the first item in the dot'ing.
"""
from math import *
from vrml.arrays import *
# used to determine whether angles are non-null
TWOPI = pi * 2.0
RADTODEG = 360./TWOPI
DEGTORAD = TWOPI/360.
# used to determine the center point of a transform
ORIGINPOINT = array([0,0,0,1],'f')
VERY_SMALL = 1e-300
def transformMatrix(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Convert VRML transform values to an overall matrix
Returns 4x4 transformation matrix
Note that this uses VRML standard for rotations
(angle last, and in radians).
This should return matrices which, when applied to
local-space coordinates, give you parent-space
coordinates.
parentMatrix if provided, should be the parent's
transformation matrix, a 4x4 matrix of such as
returned by this function.
"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return compressMatrices( parentMatrix, T,C,R,SO,S,SO1,C1 )
def itransformMatrix(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Convert VRML transform values to an inverse transform matrix
Returns 4x4 transformation matrix
Note that this uses VRML standard for rotations
(angle last, and in radians).
This should return matrices which, when applied to
parent-space coordinates, give you local-space
coordinates for the corresponding transform.
Note: this is a substantially un-tested algorithm
though it seems to be properly constructed as far
as I can see. Whether to use dot(x, parentMatrix)
or the reverse is not immediately clear to me.
parentMatrix if provided, should be the child's
transformation matrix, a 4x4 matrix of such as
returned by this function.
"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return compressMatrices( parentMatrix, C,SO, S1, SO1, R1, C1, T1)
def transformMatrices(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Calculate both forward and backward matrices for these parameters"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return (
compressMatrices( parentMatrix, T,C,R,SO,S,SO1,C1 ),
compressMatrices( parentMatrix, C,SO, S1, SO1, R1, C1, T1)
)
def localMatrices(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Calculate (forward,inverse) matrices for this transform element"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return (
compressMatrices( T,C,R,SO,S,SO1,C1 ),
compressMatrices( C,SO, S1, SO1, R1, C1, T1)
)
def compressMatrices( *matrices ):
"""Compress a set of matrices
Any (or all) of the matrices may be None,
if *all* are None, then the result will be None,
otherwise will be the dot product of all of the
matrices...
"""
if not matrices:
return None
else:
first = matrices[0]
matrices = matrices[1:]
for item in matrices:
if item is not None:
if first is None:
first = item
else:
first = dot( item, first )
return first
def center(
translation = (0,0,0),
center = (0,0,0),
parentMatrix = None,
):
"""Determine the center of rotation for a transform node
Returns the parent-space coordinate of the
node's center of rotation.
"""
if parentMatrix is None:
parentMatrix = identity(4)
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
for x in (T,C):
if x:
parentMatrix = dot( x, parentMatrix)
return dot( ORIGINPOINT, parentMatrix )
if tmatrixaccel:
def rotMatrix( source = None ):
"""Convert a VRML rotation to rotation matrices
Returns (R, R') (R and the inverse of R), with both
being 4x4 transformation matrices.
or
None,None if the angle is an exact multiple of 2pi
x,y,z -- (normalised) rotational vector
a -- angle in radians
"""
if source is None:
return None,None
else:
(x,y,z, a) = source
if a % TWOPI:
return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )
return None,None
def scaleMatrix( source=None ):
"""Convert a VRML scale to scale matrices
Returns (S, S') (S and the inverse of S), with both
being 4x4 transformation matrices.
or
None,None if x == y == z == 1.0
x,y,z -- scale vector
"""
if source is None:
return None,None
else:
(x,y,z) = source[:3]
if x == y == z == 1.0:
return None, None
forward = tmatrixaccel.scaleMatrix( x,y,z )
backward = tmatrixaccel.scaleMatrix( 1.0/(x or VERY_SMALL),1.0/(y or VERY_SMALL), 1.0/(z or VERY_SMALL) )
return forward, backward
def transMatrix( source=None ):
"""Convert a VRML translation to translation matrices
Returns (T, T') (T and the inverse of T), with both
being 4x4 transformation matrices.
or
None,None if x == y == z == 0.0
x,y,z -- scale vector
"""
if source is None:
return None,None
else:
(x,y,z) = source[:3]
if x == y == z == 0.0:
return None, None
return tmatrixaccel.transMatrix( x,y,z ),tmatrixaccel.transMatrix( -x, -y, -z )
perspectiveMatrix = tmatrixaccel.perspectiveMatrix
orthoMatrix = tmatrixaccel.orthoMatrix
else:
def rotMatrix( source=None ):
"""Convert a VRML rotation to rotation matrices
Returns (R, R') (R and the inverse of R), with both
being 4x4 transformation matrices.
or
None,None if the angle is an exact multiple of 2pi
x,y,z -- (normalised) rotational vector
a -- angle in radians
"""
if source is None:
return None,None
else:
(x,y,z, a) = source
if a % TWOPI:
# normalize the rotation vector!
squared = x*x + y*y + z*z
if squared != 1.0:
length = squared ** .5
x /= squared
y /= squared
z /= squared
c = cos( a )
c1 = cos( -a )
s = sin( a )
s1 = sin( -a )
t = 1-c
R = array( [
[ t*x*x+c, t*x*y+s*z, t*x*z-s*y, 0],
[ t*x*y-s*z, t*y*y+c, t*y*z+s*x, 0],
[ t*x*z+s*y, t*y*z-s*x, t*z*z+c, 0],
[ 0, 0, 0, 1]
] )
R1 = array( [
[ t*x*x+c1, t*x*y+s1*z, t*x*z-s1*y, 0],
[ t*x*y-s1*z, t*y*y+c1, t*y*z+s1*x, 0],
[ t*x*z+s1*y, t*y*z-s1*x, t*z*z+c1, 0],
[ 0, 0, 0, 1]
] )
return R, R1
else:
return None, None
def scaleMatrix( source=None ):
"""Convert a VRML scale to scale matrices
Returns (S, S') (S and the inverse of S), with both
being 4x4 transformation matrices.
or
None,None if x == y == z == 1.0
x,y,z -- scale vector
"""
if source is None:
return None,None
else:
(x,y,z) = source[:3]
if x == y == z == 1.0:
return None, None
S = array( [ [x,0,0,0], [0,y,0,0], [0,0,z,0], [0,0,0,1] ], 'f' )
S1 = array( [
[1./(x or VERY_SMALL),0,0,0],
[0,1./(y or VERY_SMALL),0,0],
[0,0,1./(z or VERY_SMALL),0],
[0,0,0,1] ], 'f'
)
return S, S1
def transMatrix( source=None ):
"""Convert a VRML translation to translation matrices
Returns (T, T') (T and the inverse of T), with both
being 4x4 transformation matrices.
or
None,None if x == y == z == 0.0
x,y,z -- scale vector
"""
if source is None:
return None,None
else:
(x,y,z) = source[:3]
if x == y == z == 0.0:
return None, None
T = array( [ [1,0,0,0], [0,1,0,0], [0,0,1,0], [x,y,z,1] ], 'f' )
T1 = array( [ [1,0,0,0], [0,1,0,0], [0,0,1,0], [-x,-y,-z,1] ], 'f' )
return T, T1
def perspectiveMatrix( fovy, aspect, zNear, zFar, inverse=False ):
"""Create a perspective matrix from given parameters
Note that this is | |
<filename>leavitt/sampler.py
#!/usr/bin/env python
"""SAMPLER.PY - Variable star sampler
"""
from __future__ import print_function
__authors__ = '<NAME> <<EMAIL>>'
__version__ = '20220320' # yyyymmdd
import time
import numpy as np
from dlnpyutils import utils as dln
from astropy.table import Table
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy import stats
import emcee
import corner
def solveone(data,template,ampratios,bandindex,period,offset,totwtdict,totwtydict):
ndata = len(data)
# Calculate phase for each data point
phase = (data['jd']/period + offset) % 1
# Calculate template values for this set of period and phase
tmpl = np.interp(phase,template['phase'],template['mag'])
# -- Find best fitting values for linear parameters ---
# Calculate amplitude
# term1 = Sum of XY
# term2 = Sum of X * Y / W
# term3 = Sum of X^2
# term4 = Sum of X * X / W
# amplitude = (term1 - term2)/(term3 - term4)
term1 = 0.0
term2 = 0.0
term3 = 0.0
term4 = 0.0
totwtxdict = {}
for b in bandindex.keys():
ind = bandindex[b]
totwtx1 = np.sum(data['wt'][ind] * tmpl[ind]*ampratios[b])
totwtxdict[b] = totwtx1
totwtx2 = np.sum(data['wt'][ind] * (tmpl[ind]*ampratios[b])**2)
totwtxy = np.sum(data['wt'][ind] * tmpl[ind]*ampratios[b] * data['mag'][ind])
term1 += totwtxy
term2 += totwtx1 * totwtydict[b] / totwtdict[b]
term3 += totwtx2
term4 += totwtx1**2 / totwtdict[b]
amplitude = (term1-term2)/(term3-term4)
# Calculate best mean magnitudes
# mean mag = (Y - amplitude * X)/W
meanmag = {}
for b in bandindex.keys():
meanmag1 = (totwtydict[b] - amplitude * totwtxdict[b])/totwtdict[b]
meanmag[b] = meanmag1
# Calculate likelihood/chisq
model = np.zeros(ndata,float)
resid = np.zeros(ndata,float)
wtresid = np.zeros(ndata,float)
for b in bandindex.keys():
ind = bandindex[b]
model1 = tmpl[ind]*ampratios[b]*amplitude+meanmag[b]
model[ind] = model1
resid[ind] = data['mag'][ind]-model1
wtresid[ind] = resid[ind]**2 * data['wt'][ind]
lnlkhood = -0.5*np.sum(wtresid + np.log(2*np.pi*data['err']**2))
return amplitude,meanmag,model,lnlkhood
def log_likelihood_variable(theta,x,y,err,data=None,template=None,ampratios=None,bandindex=None,totwtdict=None,totwtydict=None,**kwargs):
try:
dum = data['wt']
except:
data = Table(data)
data['wt'] = 1/data['err']**2
if bandindex is None:
# Get band index
uband = np.unique(data['band'])
nband = len(uband)
bandindex = {}
for i,b in enumerate(uband):
ind, = np.where(data['band']==b)
bandindex[b] = ind
if totwtdict is None or totwtydict is None:
# Pre-calculate some terms that are constant
totwtdict = {}
totwtydict = {}
for b in uband:
ind = bandindex[b]
totwtdict[b] = np.sum(data['wt'][ind])
totwtydict[b] = np.sum(data['wt'][ind] * data['mag'][ind])
period, offset = theta
ndata = len(data)
# Calculate phase for each data point
if period.size > 1:
# Get phase and template points
phase = (data['jd'].reshape(-1,1)/period.reshape(1,-1) + offset.reshape(1,-1)) % 1
tmpl = np.interp(phase.ravel(),template['phase'],template['mag'])
tmpl = tmpl.reshape(ndata,period.size)
else:
phase = (data['jd']/period + offset) % 1
# Calculate template values for this set of period and phase
tmpl = np.interp(phase,template['phase'],template['mag'])
# -- Find best fitting values for linear parameters ---
# Calculate amplitude
# term1 = Sum of XY
# term2 = Sum of X * Y / W
# term3 = Sum of X^2
# term4 = Sum of X * X / W
# amplitude = (term1 - term2)/(term3 - term4)
term1 = 0.0
term2 = 0.0
term3 = 0.0
term4 = 0.0
totwtxdict = {}
for b in bandindex.keys():
ind = bandindex[b]
if period.size > 1:
totwtx1 = np.sum(data['wt'][ind].reshape(-1,1) * tmpl[ind,:]*ampratios[b],axis=0)
totwtx2 = np.sum(data['wt'][ind].reshape(-1,1) * (tmpl[ind,:]*ampratios[b])**2,axis=0)
totwtxy = np.sum(data['wt'][ind].reshape(-1,1) * tmpl[ind,:]*ampratios[b] * data['mag'][ind].reshape(-1,1),axis=0)
else:
totwtx1 = np.sum(data['wt'][ind] * tmpl[ind]*ampratios[b])
totwtx2 = np.sum(data['wt'][ind] * (tmpl[ind]*ampratios[b])**2)
totwtxy = np.sum(data['wt'][ind] * tmpl[ind]*ampratios[b] * data['mag'][ind])
totwtxdict[b] = totwtx1
term1 += totwtxy
term2 += totwtx1 * totwtydict[b] / totwtdict[b]
term3 += totwtx2
term4 += totwtx1**2 / totwtdict[b]
amplitude = (term1-term2)/(term3-term4)
# Calculate best mean magnitudes
# mean mag = (Y - amplitude * X)/W
meanmag = {}
for b in bandindex.keys():
meanmag1 = (totwtydict[b] - amplitude * totwtxdict[b])/totwtdict[b]
meanmag[b] = meanmag1
# Calculate likelihood/chisq
if period.size > 1:
model = np.zeros((ndata,period.size),float)
resid = np.zeros((ndata,period.size),float)
wtresid = np.zeros((ndata,period.size),float)
for b in uband:
ind = bandindex[b]
model1 = tmpl[ind,:]*ampratios[b]*amplitude+meanmag[b]
model[ind,:] = model1
resid[ind,:] = data['mag'][ind].reshape(-1,1)-model1
wtresid[ind,:] = resid[ind,:]**2 * data['wt'][ind].reshape(-1,1)
lnlikelihood = -0.5*np.sum(wtresid,axis=0)
lnlikelihood += -0.5*np.sum(np.log(2*np.pi*data['err']**2))
else:
model = np.zeros(ndata,float)
resid = np.zeros(ndata,float)
wtresid = np.zeros(ndata,float)
for b in bandindex.keys():
ind = bandindex[b]
model1 = tmpl[ind]*ampratios[b]*amplitude+meanmag[b]
model[ind] = model1
resid[ind] = data['mag'][ind]-model1
wtresid[ind] = resid[ind]**2 * data['wt'][ind]
lnlikelihood = -0.5*np.sum(wtresid + np.log(2*np.pi*data['err']**2))
return lnlikelihood
def model_variable(phase,**kwargs):
""" Generate variable star template model using phase."""
template = kwargs['template']
tmpl = np.interp(phase.ravel(),template['phase'],template['mag'])
if phase.ndim > 1:
tmpl = tmpl.reshape(phase.shape)
return tmpl
#def log_likelihood(theta, x, y, yerr):
# m, b, log_f = theta
# model = m * x + b
# sigma2 = yerr ** 2 + model ** 2 * np.exp(2 * log_f)
# return -0.5 * np.sum((y - model) ** 2 / sigma2 + np.log(sigma2))
def log_prior_variable(theta,prange=None):
period = theta[0]
pmin = np.min(period)
pmax = np.max(period)
lnprior = np.log(1/(1.0*(np.log10(pmax)-np.log10(pmin))))
return lnprior
def log_probability_variable(theta, x, y, yerr, *args, **kwargs):
lp = log_prior_variable(theta)
#if not np.isfinite(lp):
# return -np.inf
return lp + log_likelihood_variable(theta, x, y, yerr, *args, **kwargs)
class VariableSampler:
"""
Class for doing sampling of variable star lightcurve ddata.
Parameters
----------
catalog : table
Catalog of data points, just have mag, err, jd, band
template : table
Template information.
ampratios : dict, optional
Amplitude ratios. Keys should be the unique band names
and values should be the amplitue ratios.
If this is not input, then a ratio of 1.0 is used.
minerror : float, optional
Minimum error to use. Default is 0.02.
"""
def __init__(self,catalog,template,ampratios=None,minerror=0.02):
# Create the sampling for Period (pmin to pmax) and phase offset (0-1)
self._catalog = catalog
self._template = template
# Internal catalog
data = Table(catalog).copy()
data['wt'] = 1/np.maximum(data['err'],minerror)**2
# Only keep bands with 2+ observations
uband = np.unique(data['band'])
badind = np.array([],int)
for i,b in enumerate(uband):
ind, = np.where(data['band']==b)
if len(ind)<2:
print('band '+str(b)+' only has '+str(len(ind))+' observations. Not using')
badind = np.hstack((badind,ind))
if len(badind)>0:
data.remove_rows(badind)
ndata = len(data)
self._data = data
self._ndata = ndata
print(str(ndata)+' data points')
print('time baselines = %.2f' % (np.max(data['jd'])-np.min(data['jd'])))
# Get band index
uband = np.unique(data['band'])
nband = len(uband)
bandindex = {}
for i,b in enumerate(uband):
ind, = np.where(data['band']==b)
bandindex[b] = ind
self._uband = uband
self._nband = nband
self._bandindex = bandindex
print(str(len(uband))+' bands = ',', '.join(np.char.array(uband).astype(str)))
# No amplitude ratios input
if ampratios is None:
ampratios = {}
for b in uband:
ampratios[b] = 1.0
self._ampratios = ampratios
# Pre-calculate some terms that are constant
totwtdict = {}
totwtydict = {}
for b in uband:
ind = bandindex[b]
totwtdict[b] = np.sum(data['wt'][ind])
totwtydict[b] = np.sum(data['wt'][ind] * data['mag'][ind])
self._totwtdict = totwtdict
self._totwtydict = totwtydict
def run(self,pmin=0.1,pmax=None,minsample=128,npoints=200000):
"""
Run the sampler.
Parameters
----------
pmin : float, optional
Minimum period to search in days. Default is 0.1 days.
pmax : float, optional
Maximum period to search in days. Default is 2 x time baseline.
minsample : int, optional
Mininum number of samples to return. Default is 128.
npoints : int, optional
Number of points to use per loop. Default is 200,000.
"""
data = self._data
ndata = self._ndata
template = self._template
uband = self._uband
nband = self._nband
bandindex = self._bandindex
ampratios = self._ampratios
totwtdict = self._totwtdict
totwtydict = self._totwtydict
self._bestperiod = None
self._bestoffset = None
self._bestamplitude = None
self._bestmeanmag = None
self._bestlnprob = None
self._samples = None
self._trials = None
# Period range
if pmax is None:
pmax = (np.max(data['jd'])-np.min(data['jd']))*2
lgminp = np.log10(pmin)
lgmaxp = np.log10(pmax)
print('Pmin = %.3f' % pmin)
print('Pmax = %.3f' % pmax)
self._pmin = pmin
self._pmax = pmax
# Loop until we have enough samples
nsamples = 0
samplelist = []
count = 0
dtt = [('period',float),('offset',float),('amplitude',float),('lnlikelihood',float),('lnprob',float)]
for b in uband:
dtt += [('mag'+str(b),float)]
trials = None
while (nsamples<minsample):
# Uniformly sample from log(pmin) to log(pmax)
period = np.random.rand(npoints)*(lgmaxp-lgminp)+lgminp
period = 10**period
# Uniformly sample from 0 to 1
offset = np.random.rand(npoints)
# Get phase and template points
phase = (data['jd'].reshape(-1,1)/period.reshape(1,-1) + offset.reshape(1,-1)) % 1
tmpl = np.interp(phase.ravel(),template['phase'],template['mag'])
tmpl = tmpl.reshape(ndata,npoints)
# -- Find best fitting values for linear parameters ---
# Calculate amplitude
# term1 = Sum of XY
# term2 = Sum of X | |
from clang.cindex import *
import vim
import time
import re
import threading
def initClangComplete(clang_complete_flags):
global index
index = Index.create()
global translationUnits
translationUnits = dict()
global complete_flags
complete_flags = int(clang_complete_flags)
# Get a tuple (fileName, fileContent) for the file opened in the current
# vim buffer. The fileContent contains the unsafed buffer content.
def getCurrentFile():
file = "\n".join(vim.eval("getline(1, '$')"))
return (vim.current.buffer.name, file)
def getCurrentTranslationUnit(args, currentFile, fileName, update = False):
if fileName in translationUnits:
tu = translationUnits[fileName]
if update:
if debug:
start = time.time()
tu.reparse([currentFile])
if debug:
elapsed = (time.time() - start)
print "LibClang - Reparsing: %.3f" % elapsed
return tu
if debug:
start = time.time()
flags = TranslationUnit.PrecompiledPreamble | TranslationUnit.CXXPrecompiledPreamble # | TranslationUnit.CacheCompletionResults
tu = index.parse(fileName, args, [currentFile], flags)
if debug:
elapsed = (time.time() - start)
print "LibClang - First parse: %.3f" % elapsed
if tu == None:
print "Cannot parse this source file. The following arguments " \
+ "are used for clang: " + " ".join(args)
return None
translationUnits[fileName] = tu
# Reparse to initialize the PCH cache even for auto completion
# This should be done by index.parse(), however it is not.
# So we need to reparse ourselves.
if debug:
start = time.time()
tu.reparse([currentFile])
if debug:
elapsed = (time.time() - start)
print "LibClang - First reparse (generate PCH cache): %.3f" % elapsed
return tu
def splitOptions(options):
optsList = []
opt = ""
quoted = False
for char in options:
if char == ' ' and not quoted:
if opt != "":
optsList += [opt]
opt = ""
continue
elif char == '"':
quoted = not quoted
opt += char
if opt != "":
optsList += [opt]
return optsList
def getQuickFix(diagnostic):
# Some diagnostics have no file, e.g. "too many errors emitted, stopping now"
if diagnostic.location.file:
filename = diagnostic.location.file.name
else:
filename = ""
if diagnostic.severity == diagnostic.Ignored:
type = 'I'
elif diagnostic.severity == diagnostic.Note:
type = 'I'
elif diagnostic.severity == diagnostic.Warning:
type = 'W'
elif diagnostic.severity == diagnostic.Error:
type = 'E'
elif diagnostic.severity == diagnostic.Fatal:
type = 'E'
else:
return None
return dict({ 'bufnr' : int(vim.eval("bufnr('" + filename + "', 1)")),
'lnum' : diagnostic.location.line,
'col' : diagnostic.location.column,
'text' : diagnostic.spelling,
'type' : type})
def getQuickFixList(tu):
return filter (None, map (getQuickFix, tu.diagnostics))
def highlightRange(range, hlGroup):
pattern = '/\%' + str(range.start.line) + 'l' + '\%' \
+ str(range.start.column) + 'c' + '.*' \
+ '\%' + str(range.end.column) + 'c/'
command = "exe 'syntax match' . ' " + hlGroup + ' ' + pattern + "'"
vim.command(command)
def highlightDiagnostic(diagnostic):
if diagnostic.severity == diagnostic.Warning:
hlGroup = 'SpellLocal'
elif diagnostic.severity == diagnostic.Error:
hlGroup = 'SpellBad'
else:
return
pattern = '/\%' + str(diagnostic.location.line) + 'l\%' \
+ str(diagnostic.location.column) + 'c./'
command = "exe 'syntax match' . ' " + hlGroup + ' ' + pattern + "'"
vim.command(command)
# Use this wired kind of iterator as the python clang libraries
# have a bug in the range iterator that stops us to use:
#
# | for range in diagnostic.ranges
#
for i in range(len(diagnostic.ranges)):
highlightRange(diagnostic.ranges[i], hlGroup)
def highlightDiagnostics(tu):
map (highlightDiagnostic, tu.diagnostics)
def highlightCurrentDiagnostics():
if vim.current.buffer.name in translationUnits:
highlightDiagnostics(translationUnits[vim.current.buffer.name])
def getCurrentQuickFixList():
if vim.current.buffer.name in translationUnits:
return getQuickFixList(translationUnits[vim.current.buffer.name])
return []
def updateCurrentDiagnostics():
global debug
debug = int(vim.eval("g:clang_debug")) == 1
userOptionsGlobal = splitOptions(vim.eval("g:clang_user_options"))
userOptionsLocal = splitOptions(vim.eval("b:clang_user_options"))
args = userOptionsGlobal + userOptionsLocal
getCurrentTranslationUnit(args, getCurrentFile(),
vim.current.buffer.name, update = True)
def getCurrentCompletionResults(line, column, args, currentFile, fileName):
tu = getCurrentTranslationUnit(args, currentFile, fileName)
if debug:
start = time.time()
cr = tu.codeComplete(fileName, line, column, [currentFile],
complete_flags)
if debug:
elapsed = (time.time() - start)
print "LibClang - Code completion time (library): %.3f" % elapsed
return cr
def formatResult(result):
completion = dict()
returnValue = None
abbr = ""
chunks = filter(lambda x: not x.isKindInformative(), result.string)
args_pos = []
cur_pos = 0
word = ""
for chunk in chunks:
if chunk.isKindResultType():
returnValue = chunk
continue
chunk_spelling = chunk.spelling
if chunk.isKindTypedText():
abbr = chunk_spelling
chunk_len = len(chunk_spelling)
if chunk.isKindPlaceHolder():
args_pos += [[ cur_pos, cur_pos + chunk_len ]]
cur_pos += chunk_len
word += chunk_spelling
menu = word
if returnValue:
menu = returnValue.spelling + " " + menu
completion['word'] = word
completion['abbr'] = abbr
completion['menu'] = menu
completion['info'] = word
completion['args_pos'] = args_pos
completion['dup'] = 0
# Replace the number that represents a specific kind with a better
# textual representation.
completion['kind'] = kinds[result.cursorKind]
return completion
class CompleteThread(threading.Thread):
lock = threading.Lock()
def __init__(self, line, column, currentFile, fileName):
threading.Thread.__init__(self)
self.line = line
self.column = column
self.currentFile = currentFile
self.fileName = fileName
self.result = None
userOptionsGlobal = splitOptions(vim.eval("g:clang_user_options"))
userOptionsLocal = splitOptions(vim.eval("b:clang_user_options"))
self.args = userOptionsGlobal + userOptionsLocal
def run(self):
try:
CompleteThread.lock.acquire()
if self.line == -1:
# Warm up the caches. For this it is sufficient to get the current
# translation unit. No need to retrieve completion results.
# This short pause is necessary to allow vim to initialize itself.
# Otherwise we would get: E293: block was not locked
# The user does not see any delay, as we just pause a background thread.
time.sleep(0.1)
getCurrentTranslationUnit(self.args, self.currentFile, self.fileName)
else:
self.result = getCurrentCompletionResults(self.line, self.column,
self.args, self.currentFile, self.fileName)
except Exception:
pass
CompleteThread.lock.release()
def WarmupCache():
global debug
debug = int(vim.eval("g:clang_debug")) == 1
t = CompleteThread(-1, -1, getCurrentFile(), vim.current.buffer.name)
t.start()
def getCurrentCompletions(base):
global debug
debug = int(vim.eval("g:clang_debug")) == 1
sorting = vim.eval("g:clang_sort_algo")
line = int(vim.eval("line('.')"))
column = int(vim.eval("b:col"))
if debug:
start = time.time()
t = CompleteThread(line, column, getCurrentFile(), vim.current.buffer.name)
t.start()
while t.isAlive():
t.join(0.01)
cancel = int(vim.eval('complete_check()'))
if cancel != 0:
return []
cr = t.result
if cr is None:
return []
results = cr.results
if base != "":
regexp = re.compile("^" + base)
results = filter(lambda x: regexp.match(getAbbr(x.string)), results)
if sorting == 'priority':
getPriority = lambda x: x.string.priority
results = sorted(results, None, getPriority)
if sorting == 'alpha':
getAbbrevation = lambda x: getAbbr(x.string).lower()
results = sorted(results, None, getAbbrevation)
result = map(formatResult, results)
if debug:
elapsed = (time.time() - start)
print "LibClang - Code completion time (library + formatting): %.3f" \
% elapsed
time.sleep(1)
return result
def getAbbr(strings):
tmplst = filter(lambda x: x.isKindTypedText(), strings)
if len(tmplst) == 0:
return ""
else:
return tmplst[0].spelling
kinds = dict({ \
# Declarations \
1 : 't', # CXCursor_UnexposedDecl (A declaration whose specific kind is not \
# exposed via this interface) \
2 : 't', # CXCursor_StructDecl (A C or C++ struct) \
3 : 't', # CXCursor_UnionDecl (A C or C++ union) \
4 : 't', # CXCursor_ClassDecl (A C++ class) \
5 : 't', # CXCursor_EnumDecl (An enumeration) \
6 : 'm', # CXCursor_FieldDecl (A field (in C) or non-static data member \
# (in C++) in a struct, union, or C++ class) \
7 : 'e', # CXCursor_EnumConstantDecl (An enumerator constant) \
8 : 'f', # CXCursor_FunctionDecl (A function) \
9 : 'v', # CXCursor_VarDecl (A variable) \
10 : 'a', # CXCursor_ParmDecl (A function or method parameter) \
11 : '11', # CXCursor_ObjCInterfaceDecl (An Objective-C @interface) \
12 : '12', # CXCursor_ObjCCategoryDecl (An Objective-C @interface for a \
# category) \
13 : '13', # CXCursor_ObjCProtocolDecl (An Objective-C @protocol declaration) \
14 : '14', # CXCursor_ObjCPropertyDecl (An Objective-C @property declaration) \
15 : '15', # CXCursor_ObjCIvarDecl (An Objective-C instance variable) \
16 : '16', # CXCursor_ObjCInstanceMethodDecl (An Objective-C instance method) \
17 : '17', # CXCursor_ObjCClassMethodDecl (An Objective-C class method) \
18 : '18', # CXCursor_ObjCImplementationDec (An Objective-C @implementation) \
19 : '19', # CXCursor_ObjCCategoryImplDecll (An Objective-C @implementation \
# for a category) \
20 : 't', # CXCursor_TypedefDecl (A typedef) \
21 : 'f', # CXCursor_CXXMethod (A C++ class method) \
22 : 'n', # CXCursor_Namespace (A C++ namespace) \
23 : '23', # CXCursor_LinkageSpec (A linkage specification, e.g. 'extern "C"') \
24 : '+', # CXCursor_Constructor (A C++ constructor) \
25 : '~', # CXCursor_Destructor (A C++ destructor) \
26 : '26', # CXCursor_ConversionFunction (A C++ conversion function) \
27 : 'a', # CXCursor_TemplateTypeParameter (A C++ template type parameter) \
28 : 'a', # CXCursor_NonTypeTemplateParameter (A C++ non-type template \
# parameter) \
29 : 'a', # CXCursor_TemplateTemplateParameter (A C++ template template \
# parameter) \
30 : 'f', # CXCursor_FunctionTemplate (A C++ function template) \
31 : 'p', # CXCursor_ClassTemplate (A C++ class template) \
32 : '32', # CXCursor_ClassTemplatePartialSpecialization | |
= [t.to_dict() for t in sd['thick']]
sd['thin'] = [t.to_dict() for t in sd['thin']]
return sd
def from_dict(self, sd):
""" Load values from a sarcomere dict. Values read in correspond to
the current output documented in to_dict.
"""
# Warn of possible version mismatches
read, current = sd['version'], self.version
if read != current:
import warnings
warnings.warn("Versioning mismatch, reading %0.1f into %0.1f."
%(read, current))
# Get filaments in right orientations
self.__init__(
lattice_spacing=sd['_initial_lattice_spacing'],
z_line=sd['_initial_z_line'],
poisson=sd['poisson_ratio'],
actin_permissiveness=sd['actin_permissiveness'],
timestep_len=sd['timestep_len'],
time_dependence=sd['time_dependence'],
starts=(sd['_thin_starts'], sd['_thick_starts'])
)
# Local keys
self.current_timestep = sd['current_timestep']
self._z_line = sd['_z_line']
self._lattice_spacing = sd['_lattice_spacing']
self.hiding_line = sd['hiding_line']
if 'last_transitions' in sd.keys():
self.last_transitions = sd['last_transitions']
# Sub-structure keys
for data, thick in zip(sd['thick'], self.thick):
thick.from_dict(data)
for data, thin in zip(sd['thin'], self.thin):
thin.from_dict(data)
def run(self, time_steps=100, callback=None, bar=True):
"""Run the model for the specified number of timesteps
Parameters:
time_steps: number of time steps to run the model for (100)
callback: function to be executed after each time step to
collect data. The callback function takes the sarcomere
in its current state as its only argument. (Defaults to
the axial force at the M-line if not specified.)
bar: progress bar control,False means don't display, True
means give us the basic progress reports, if a function
is passed, it will be called as f(completed_steps,
total_steps, sec_left, sec_passed, process_name).
(Defaults to True)
Returns:
output: the results of the callback after each timestep
"""
# Callback defaults to the axial force at the M-line
if callback is None:
callback = lambda sarc: sarc.axialforce()
# Create a place to store callback information and note the time
output = []
tic = time.time()
# Run through each timestep
for i in range(time_steps):
self.timestep()
output.append(callback(self))
# Update us on how it went
toc = int((time.time()-tic) / (i+1) * (time_steps-i-1))
proc_name = mp.current_process().name
if bar == True:
sys.stdout.write("\n" + proc_name +
" finished timestep %i of %i, %ih%im%is left"\
%(i+1, time_steps, toc/60/60, toc/60%60, toc%60))
sys.stdout.flush()
elif type(bar) == type(lambda x:x):
bar(i, time_steps, toc, time.time()-tic, proc_name)
return output
def timestep(self, current=None):
"""Move the model one step forward in time, allowing the
myosin heads a chance to bind and then balancing forces
"""
# Record our passage through time
if current is not None:
self.current_timestep = current
else:
self.current_timestep += 1
# Update bound states
self.last_transitions = [thick.transition() for thick in self.thick]
# Settle forces
self.settle()
@property
def current_timestep(self):
"""Return the current timestep"""
return self._current_timestep
@current_timestep.setter
def current_timestep(self, new_timestep):
"""Set the current timestep"""
# Update boundary conditions
self.update_hiding_line()
td = self.time_dependence
i = new_timestep
if td is not None:
if 'lattice_spacing' in td:
self.lattice_spacing = td['lattice_spacing'][i]
if 'z_line' in td:
self.z_line = td['z_line'][i]
if 'actin_permissiveness' in td:
self.actin_permissiveness = td['actin_permissiveness'][i]
self._current_timestep = i
return
@property
def actin_permissiveness(self):
"""How active & open to binding, 0 to 1, are binding sites?"""
return [thin.permissiveness for thin in self.thin]
@actin_permissiveness.setter
def actin_permissiveness(self, new_permissiveness):
"""Assign all binding sites the new permissiveness, 0 to 1"""
for thin in self.thin:
thin.permissiveness = new_permissiveness
@property
def z_line(self):
"""Axial location of the z-line, length of the half sarcomere"""
return self._z_line
@z_line.setter
def z_line(self, new_z_line):
"""Set a new z-line, updating the lattice spacing at the same time"""
self._z_line = new_z_line
self.update_ls_from_poisson_ratio()
@property
def lattice_spacing(self):
"""Return the current lattice spacing"""
return self._lattice_spacing
@lattice_spacing.setter
def lattice_spacing(self, new_lattice_spacing):
"""Assign a new lattice spacing"""
self._lattice_spacing = new_lattice_spacing
@staticmethod
def ls_to_d10(face_dist):
"""Convert face-to-face lattice spacing to d10 spacing.
Governing equations:
ls = ftf, the face to face distance
filcenter_dist = face_dist + .5 * dia_actin + .5 * dia_myosin
d10 = 1.5 * filcenter_dist
Values:
dia_actin: 9nm [1]_
dia_myosin: 16nm [2]_
example d10: 37nm for cardiac muscle at 2.2um [3]_
References:
.. [1] Egelman 1985, The structure of F-actin.
J Muscle Res Cell Motil, Pg 130, values from 9 to 10 nm
.. [2] Woodhead et al. 2005, Atomic model of a myosin filament in
the relaxed state. Nature, Pg 1195, in tarantula filament
.. [3] Millman 1998, The filament lattice of striated muscle.
Physiol Rev, Pg 375
Note: Arguably this should be moved to a support class as it really
isn't something the half-sarcomere knows about or does. I'm leaving it
here as a convenience for now.
Parameters:
face_dist: face to face lattice spacing in nm
Returns:
d10: d10 spacing in nm
"""
filcenter_dist = face_dist + 0.5 * 9 + 0.5 * 16
d10 = 1.5* filcenter_dist
return d10
@staticmethod
def d10_to_ls(d10):
"""Convert d10 spacing to face-to-face lattice spacing
Governing equations: See ls_to_d10
Values: See ls_to_d10
Parameters:
d10: d10 spacing in nm
Returns:
face_dist: face to face lattice spacing in nm
"""
filcenter_dist = d10 * 2/3
face_dist = filcenter_dist - 0.5 * 9 - 0.5 * 16
return face_dist
def axialforce(self):
"""Sum of each thick filament's axial force on the M-line """
return sum([thick.effective_axial_force() for thick in self.thick])
def radialtension(self):
"""The sum of the thick filaments' radial tensions"""
return sum([t.radialtension() for t in self.thick])
def radialforce(self):
"""The sum of the thick filaments' radial forces, as a (y,z) vector"""
return np.sum([t.radial_force_of_filament() for t in self.thick], 0)
def _single_settle(self, factor=0.95):
"""Settle down now, just a little bit"""
thick = [thick.settle(factor) for thick in self.thick]
thin = [thin.settle(factor) for thin in self.thin]
return np.max((np.max(np.abs(thick)), np.max(np.abs(thin))))
def settle(self):
"""Jiggle those locations around until the residual forces are low
We choose the convergence limit so that 95% of thermal forcing events
result in a deformation that produces more axial force than the
convergence value, 0.12pN.
"""
converge_limit=0.12 # see doc string
converge = self._single_settle()
while converge>converge_limit:
converge = self._single_settle()
def _get_residual(self):
"""Get the residual force at every point in the half-sarcomere"""
thick_f = np.hstack([t.axialforce() for t in self.thick])
thin_f = np.hstack([t.axialforce() for t in self.thin])
mash = np.hstack([thick_f, thin_f])
return mash
def get_frac_in_states(self):
"""Calculate the fraction of cross-bridges in each state"""
nested = [t.get_states() for t in self.thick]
xb_states = [xb for fil in nested for face in fil for xb in face]
num_in_state = [xb_states.count(state) for state in range(3)]
frac_in_state = [n/float(len(xb_states)) for n in num_in_state]
return frac_in_state
def update_ls_from_poisson_ratio(self):
"""Update the lattice spacing consistant with the poisson ratio,
initial lattice spacing, current z-line, and initial z-line
Governing equations
===================
Poisson ratio := ν
ν = dε_r/dε_z = Δr/r_0 / Δz/z_0
From Mathematica derivation
γ := center to center distance between filaments
γ(ν, γ_0, z_0, Δz) = γ_0 (z_0/(z_0+Δz))^ν
And since we want the face-to-face distance, aka ls, we convert with:
γ = ls + 0.5 (dia_actin + dia_myosin)
and
γ_0 = ls_0 + 0.5 (dia_actin + dia_myosin)
and the simplifying
β = 0.5 (dia_actin + dia_myosin)
to get
ls = (ls_0 + β) (z_0/(z_0 + Δz))^ν - β
which is what we implement below.
Note: this is a novel derivation and so there is no current
citation to be invoked.
Values: See ls_to_d10
Parameters:
None
Returns:
None
"""
beta = 0.5 * (9 + 16)
ls_0 = self._initial_lattice_spacing
z_0 = self._initial_z_line
nu = self.poisson_ratio
dz = self.z_line - z_0
ls = (ls_0 + beta) * (z_0/(z_0 + dz))**nu - beta
self.lattice_spacing = ls
return
def update_hiding_line(self):
"""Update the line determining which actin sites are unavailable"""
farthest_actin = min([min(thin.axial) for thin in self.thin])
self.hiding_line = -farthest_actin
def resolve_address(self, address):
"""Give back a link to the object specified in the address
Addresses are formatted as the object type (string) followed by a list
of the indices that the object occupies in each level of organization.
Valid string values are:
thin_fil
thin_face
bs
thick_fil
crown
thick_face
xb
and an example valid address would be ('bs', 1, 14) for the binding
site at index 14 on the thin filament at index 1.
"""
if address[0] == 'thin_fil':
return self.thin[address[1]]
elif address[0] in ['thin_face', 'bs']:
return self.thin[address[1]].resolve_address(address)
elif address[0] == 'thick_fil':
return self.thick[address[1]]
elif address[0] in ['crown', 'thick_face', 'xb']:
return self.thick[address[1]].resolve_address(address)
import warnings
warnings.warn("Unresolvable address: %s"%str(address))
def display_axial_force_end(self):
""" Show an end view with axial forces | |
None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# <NAME>, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
self.b2jhas = b2jhas = b2j.has_key
for i in xrange(len(b)):
elt = b[i]
if b2jhas(elt):
b2j[elt].append(i)
else:
b2j[elt] = [i]
# Now b2j.keys() contains elements uniquely, and especially when
# the sequence is a string, that's usually a good deal smaller
# than len(string). The difference is the number of isjunk calls
# saved.
isjunk, junkdict = self.isjunk, {}
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junkdict[elt] = 1 # value irrelevant; it's a set
del b2j[elt]
# Now for x in b, isjunk(x) == junkdict.has_key(x), but the
# latter is much faster. Note too that while there may be a
# lot of junk in the sequence, the number of *unique* junk
# elements is probably small. So the memory burden of keeping
# this dict alive is likely trivial compared to the size of b2j.
self.isbjunk = junkdict.has_key
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(0, 4, 5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(1, 0, 4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
(0, 0, 0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in xrange(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return besti, bestj, bestsize
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[(0, 0, 2), (3, 2, 2), (5, 4, 0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
self.matching_blocks = []
la, lb = len(self.a), len(self.b)
self.__helper(0, la, 0, lb, self.matching_blocks)
self.matching_blocks.append( (la, lb, 0) )
return self.matching_blocks
# builds list of matching blocks covering a[alo:ahi] and
# b[blo:bhi], appending them in increasing order to answer
def __helper(self, alo, ahi, blo, bhi, answer):
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k:
if alo < i and blo < j:
self.__helper(alo, i, blo, j, answer)
answer.append(x)
if i+k < ahi and j+k < bhi:
self.__helper(i+k, ahi, j+k, bhi, answer)
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print ("%7s a[%d:%d] | |
subclass = None
superclass = None
def __init__(self, Function_Name=None, Entry_Point=None, Ordinal=None):
self.Function_Name = Function_Name
self.Entry_Point = Entry_Point
self.Ordinal = Ordinal
def factory(*args_, **kwargs_):
if PEExportedFunctionType.subclass:
return PEExportedFunctionType.subclass(*args_, **kwargs_)
else:
return PEExportedFunctionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Function_Name(self): return self.Function_Name
def set_Function_Name(self, Function_Name): self.Function_Name = Function_Name
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Entry_Point(self): return self.Entry_Point
def set_Entry_Point(self, Entry_Point): self.Entry_Point = Entry_Point
def validate_HexBinaryObjectPropertyType(self, value):
# Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None.
pass
def get_Ordinal(self): return self.Ordinal
def set_Ordinal(self, Ordinal): self.Ordinal = Ordinal
def validate_NonNegativeIntegerObjectPropertyType(self, value):
# Validate type cybox_common.NonNegativeIntegerObjectPropertyType, a restriction on None.
pass
def hasContent_(self):
if (
self.Function_Name is not None or
self.Entry_Point is not None or
self.Ordinal is not None
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEExportedFunctionType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType'):
pass
def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Function_Name is not None:
self.Function_Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Function_Name', pretty_print=pretty_print)
if self.Entry_Point is not None:
self.Entry_Point.export(lwrite, level, 'WinExecutableFileObj:', name_='Entry_Point', pretty_print=pretty_print)
if self.Ordinal is not None:
self.Ordinal.export(lwrite, level, 'WinExecutableFileObj:', name_='Ordinal', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Function_Name':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Function_Name(obj_)
elif nodeName_ == 'Entry_Point':
obj_ = cybox_common.HexBinaryObjectPropertyType.factory()
obj_.build(child_)
self.set_Entry_Point(obj_)
elif nodeName_ == 'Ordinal':
obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_Ordinal(obj_)
# end class PEExportedFunctionType
class PEResourceListType(GeneratedsSuper):
"""PEResourceListType specifies a list of resources found in the PE
file."""
subclass = None
superclass = None
def __init__(self, Resource=None):
if Resource is None:
self.Resource = []
else:
self.Resource = Resource
def factory(*args_, **kwargs_):
if PEResourceListType.subclass:
return PEResourceListType.subclass(*args_, **kwargs_)
else:
return PEResourceListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Resource(self): return self.Resource
def set_Resource(self, Resource): self.Resource = Resource
def add_Resource(self, value): self.Resource.append(value)
def insert_Resource(self, index, value): self.Resource[index] = value
def hasContent_(self):
if (
self.Resource
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEResourceListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEResourceListType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEResourceListType'):
pass
def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEResourceListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Resource_ in self.Resource:
if isinstance(Resource_, PEVersionInfoResourceType):
Resource_.export(lwrite, level, 'WinExecutableFileObj:', name_='VersionInfoResource', pretty_print=pretty_print)
elif isinstance(Resource_, PEResourceType):
Resource_.export(lwrite, level, 'WinExecutableFileObj:', name_='Resource', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Resource':
obj_ = PEResourceType.factory()
obj_.build(child_)
self.add_Resource(obj_)
elif nodeName_ == 'VersionInfoResource':
obj_ = PEVersionInfoResourceType.factory()
obj_.build(child_)
self.add_Resource(obj_)
# end class PEResourceListType
class PEImportedFunctionType(GeneratedsSuper):
"""PEImportedFunctionType specifies the type describing imported
functions."""
subclass = None
superclass = None
def __init__(self, Function_Name=None, Hint=None, Ordinal=None, Bound=None, Virtual_Address=None):
self.Function_Name = Function_Name
self.Hint = Hint
self.Ordinal = Ordinal
self.Bound = Bound
self.Virtual_Address = Virtual_Address
def factory(*args_, **kwargs_):
if PEImportedFunctionType.subclass:
return PEImportedFunctionType.subclass(*args_, **kwargs_)
else:
return PEImportedFunctionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Function_Name(self): return self.Function_Name
def set_Function_Name(self, Function_Name): self.Function_Name = Function_Name
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Hint(self): return self.Hint
def set_Hint(self, Hint): self.Hint = Hint
def validate_HexBinaryObjectPropertyType(self, value):
# Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None.
pass
def get_Ordinal(self): return self.Ordinal
def set_Ordinal(self, Ordinal): self.Ordinal = Ordinal
def validate_NonNegativeIntegerObjectPropertyType(self, value):
# Validate type cybox_common.NonNegativeIntegerObjectPropertyType, a restriction on None.
pass
def get_Bound(self): return self.Bound
def set_Bound(self, Bound): self.Bound = Bound
def get_Virtual_Address(self): return self.Virtual_Address
def set_Virtual_Address(self, Virtual_Address): self.Virtual_Address = Virtual_Address
def hasContent_(self):
if (
self.Function_Name is not None or
self.Hint is not None or
self.Ordinal is not None or
self.Bound is not None or
self.Virtual_Address is not None
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEImportedFunctionType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType'):
pass
def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Function_Name is not None:
self.Function_Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Function_Name', pretty_print=pretty_print)
if self.Hint is not None:
self.Hint.export(lwrite, level, 'WinExecutableFileObj:', name_='Hint', pretty_print=pretty_print)
if self.Ordinal is not None:
self.Ordinal.export(lwrite, level, 'WinExecutableFileObj:', name_='Ordinal', pretty_print=pretty_print)
if self.Bound is not None:
self.Bound.export(lwrite, level, 'WinExecutableFileObj:', name_='Bound', pretty_print=pretty_print)
if self.Virtual_Address is not None:
self.Virtual_Address.export(lwrite, level, 'WinExecutableFileObj:', name_='Virtual_Address', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Function_Name':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Function_Name(obj_)
elif nodeName_ == 'Hint':
obj_ = cybox_common.HexBinaryObjectPropertyType.factory()
obj_.build(child_)
self.set_Hint(obj_)
elif nodeName_ == 'Ordinal':
obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_Ordinal(obj_)
elif nodeName_ == 'Bound':
obj_ = cybox_common.HexBinaryObjectPropertyType.factory()
obj_.build(child_)
self.set_Bound(obj_)
elif nodeName_ == 'Virtual_Address':
obj_ = cybox_common.HexBinaryObjectPropertyType.factory()
obj_.build(child_)
self.set_Virtual_Address(obj_)
# end class PEImportedFunctionType
class PEImportListType(GeneratedsSuper):
"""PEImportListType specifies a list of functions in an import data
section."""
subclass = None
superclass = None
def __init__(self, Import=None):
if Import is None:
self.Import = []
else:
self.Import = Import
def factory(*args_, **kwargs_):
if PEImportListType.subclass:
return PEImportListType.subclass(*args_, **kwargs_)
else:
return PEImportListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Import(self): return self.Import
def set_Import(self, Import): self.Import = Import
def add_Import(self, value): self.Import.append(value)
def insert_Import(self, index, value): self.Import[index] = value
def hasContent_(self):
if (
self.Import
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEImportListType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEImportListType'):
pass
def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Import_ in self.Import:
Import_.export(lwrite, level, 'WinExecutableFileObj:', name_='Import', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Import':
obj_ = PEImportType.factory()
obj_.build(child_)
self.Import.append(obj_)
# end class PEImportListType
class PESectionType(GeneratedsSuper):
"""The PESectionType type is intended as container for the properties
relevant to PE binary sections. A PE Section consists of a
header and data. The PESectionType contains properties that
describe the Section Header and metadata computed about the
section (e.g., hashes, entropy)."""
subclass = None
superclass = None
def __init__(self, Section_Header=None, Data_Hashes=None, Entropy=None, Header_Hashes=None):
self.Section_Header = Section_Header
self.Data_Hashes = Data_Hashes
self.Entropy = Entropy
self.Header_Hashes = Header_Hashes
def factory(*args_, **kwargs_):
if PESectionType.subclass:
return PESectionType.subclass(*args_, **kwargs_)
else:
return PESectionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Section_Header(self): return self.Section_Header
def set_Section_Header(self, Section_Header): self.Section_Header = Section_Header
def get_Data_Hashes(self): return self.Data_Hashes
def set_Data_Hashes(self, Data_Hashes): self.Data_Hashes = Data_Hashes
| |
(cov_type == 'full')
if full_cov:
Kss = kernel(X)
Qss = W.matmul(W.t())
cov = Kss - Qss
else:
Kssdiag = kernel(X, diag=True)
Qssdiag = W.pow(2).sum(dim=-1)
# Theoretically, Kss - Qss is non-negative; but due to numerical
# computation, that might not be the case in practice.
cov = (Kssdiag - Qssdiag).clamp(min=0)
if f_scale_tril is not None:
W_S_shape = (X.size(0),) + f_scale_tril.shape[1:]
W_S = W.matmul(S_2D).reshape(W_S_shape)
# convert W_S_shape from M x N x latent_shape to latent_shape x M x N
W_S = W_S.permute(list(range(2, W_S.dim())) + [0, 1])
if full_cov:
St_Wt = W_S.transpose(-2, -1)
K = W_S.matmul(St_Wt)
cov = cov + K
else:
Kdiag = W_S.pow(2).sum(dim=-1)
cov = cov + Kdiag
else:
if full_cov:
cov = cov.expand(out_dims, M, M)
else:
cov = var.expand(out_dims, M)
#loc = loc.view(out_dims, samp, -1).permute(1, 0, 2) # K, N, T
if full_cov:
assert samp == 1
cov = cov[None, ...] # K, N, T, T
else:
cov = cov.view(out_dims, samp, -1).permute(1, 0, 2) # K, N, T
return loc, cov, Lff
def cg_batch(A_bmm, B, M_bmm=None, X0=None, rtol=1e-3, atol=0., maxiter=None, verbose=False):
"""
Solves a batch of PD matrix linear systems using the preconditioned CG algorithm.
This function solves a batch of matrix linear systems of the form
A_i X_i = B_i, i=1,...,K,
where A_i is a n x n positive definite matrix and B_i is a n x m matrix,
and X_i is the n x m matrix representing the solution for the ith system.
Args:
A_bmm: A callable that performs a batch matrix multiply of A and a K x n x m matrix.
B: A K x n x m matrix representing the right hand sides.
M_bmm: (optional) A callable that performs a batch matrix multiply of the preconditioning
matrices M and a K x n x m matrix. (default=identity matrix)
X0: (optional) Initial guess for X, defaults to M_bmm(B). (default=None)
rtol: (optional) Relative tolerance for norm of residual. (default=1e-3)
atol: (optional) Absolute tolerance for norm of residual. (default=0)
maxiter: (optional) Maximum number of iterations to perform. (default=5*n)
verbose: (optional) Whether or not to print status messages. (default=False)
"""
K, n, m = B.shape
if M_bmm is None:
M_bmm = lambda x: x
if X0 is None:
X0 = M_bmm(B)
if maxiter is None:
maxiter = 5 * n
assert B.shape == (K, n, m)
assert X0.shape == (K, n, m)
assert rtol > 0 or atol > 0
assert isinstance(maxiter, int)
X_k = X0
R_k = B - A_bmm(X_k)
Z_k = M_bmm(R_k)
P_k = torch.zeros_like(Z_k)
P_k1 = P_k
R_k1 = R_k
R_k2 = R_k
X_k1 = X0
Z_k1 = Z_k
Z_k2 = Z_k
B_norm = torch.norm(B, dim=1)
stopping_matrix = torch.max(rtol*B_norm, atol*torch.ones_like(B_norm))
if verbose:
print("%03s | %010s %06s" % ("it", "dist", "it/s"))
optimal = False
start = time.perf_counter()
for k in range(1, maxiter + 1):
start_iter = time.perf_counter()
Z_k = M_bmm(R_k)
if k == 1:
P_k = Z_k
R_k1 = R_k
X_k1 = X_k
Z_k1 = Z_k
else:
R_k2 = R_k1
Z_k2 = Z_k1
P_k1 = P_k
R_k1 = R_k
Z_k1 = Z_k
X_k1 = X_k
denominator = (R_k2 * Z_k2).sum(1)
denominator[denominator == 0] = 1e-8
beta = (R_k1 * Z_k1).sum(1) / denominator
P_k = Z_k1 + beta.unsqueeze(1) * P_k1
denominator = (P_k * A_bmm(P_k)).sum(1)
denominator[denominator == 0] = 1e-8
alpha = (R_k1 * Z_k1).sum(1) / denominator
X_k = X_k1 + alpha.unsqueeze(1) * P_k
R_k = R_k1 - alpha.unsqueeze(1) * A_bmm(P_k)
end_iter = time.perf_counter()
residual_norm = torch.norm(A_bmm(X_k) - B, dim=1)
if verbose:
print("%03d | %8.4e %4.2f" %
(k, torch.max(residual_norm-stopping_matrix),
1. / (end_iter - start_iter)))
if (residual_norm <= stopping_matrix).all():
optimal = True
break
end = time.perf_counter()
if verbose:
if optimal:
print("Terminated in %d steps (reached maxiter). Took %.3f ms." %
(k, (end - start) * 1000))
else:
print("Terminated in %d steps (optimal). Took %.3f ms." %
(k, (end - start) * 1000))
info = {
"niter": k,
"optimal": optimal
}
return X_k, info
class CG(torch.autograd.Function):
def __init__(self, A_bmm, M_bmm=None, rtol=1e-3, atol=0., maxiter=None, verbose=False):
self.A_bmm = A_bmm
self.M_bmm = M_bmm
self.rtol = rtol
self.atol = atol
self.maxiter = maxiter
self.verbose = verbose
def forward(self, B, X0=None):
X, _ = cg_batch(self.A_bmm, B, M_bmm=self.M_bmm, X0=X0, rtol=self.rtol,
atol=self.atol, maxiter=self.maxiter, verbose=self.verbose)
return X
def backward(self, dX):
dB, _ = cg_batch(self.A_bmm, dX, M_bmm=self.M_bmm, rtol=self.rtol,
atol=self.atol, maxiter=self.maxiter, verbose=self.verbose)
return dB
"""GPyTorch
import warnings
import torch
from .. import settings
from .deprecation import bool_compat
from .warnings import NumericalWarning
def _default_preconditioner(x):
return x.clone()
@torch.jit.script
def _jit_linear_cg_updates(
result, alpha, residual_inner_prod, eps, beta, residual, precond_residual, mul_storage, is_zero, curr_conjugate_vec
):
# # Update result
# # result_{k} = result_{k-1} + alpha_{k} p_vec_{k-1}
result = torch.addcmul(result, alpha, curr_conjugate_vec, out=result)
# beta_{k} = (precon_residual{k}^T r_vec_{k}) / (precon_residual{k-1}^T r_vec_{k-1})
beta.resize_as_(residual_inner_prod).copy_(residual_inner_prod)
torch.mul(residual, precond_residual, out=mul_storage)
torch.sum(mul_storage, -2, keepdim=True, out=residual_inner_prod)
# Do a safe division here
torch.lt(beta, eps, out=is_zero)
beta.masked_fill_(is_zero, 1)
torch.div(residual_inner_prod, beta, out=beta)
beta.masked_fill_(is_zero, 0)
# Update curr_conjugate_vec
# curr_conjugate_vec_{k} = precon_residual{k} + beta_{k} curr_conjugate_vec_{k-1}
curr_conjugate_vec.mul_(beta).add_(precond_residual)
@torch.jit.script
def _jit_linear_cg_updates_no_precond(
mvms,
result,
has_converged,
alpha,
residual_inner_prod,
eps,
beta,
residual,
precond_residual,
mul_storage,
is_zero,
curr_conjugate_vec,
):
torch.mul(curr_conjugate_vec, mvms, out=mul_storage)
torch.sum(mul_storage, dim=-2, keepdim=True, out=alpha)
# Do a safe division here
torch.lt(alpha, eps, out=is_zero)
alpha.masked_fill_(is_zero, 1)
torch.div(residual_inner_prod, alpha, out=alpha)
alpha.masked_fill_(is_zero, 0)
# We'll cancel out any updates by setting alpha=0 for any vector that has already converged
alpha.masked_fill_(has_converged, 0)
# Update residual
# residual_{k} = residual_{k-1} - alpha_{k} mat p_vec_{k-1}
torch.addcmul(residual, -alpha, mvms, out=residual)
# Update precond_residual
# precon_residual{k} = M^-1 residual_{k}
precond_residual = residual.clone()
_jit_linear_cg_updates(
result,
alpha,
residual_inner_prod,
eps,
beta,
residual,
precond_residual,
mul_storage,
is_zero,
curr_conjugate_vec,
)
def linear_cg(
matmul_closure,
rhs,
n_tridiag=0,
tolerance=None,
eps=1e-10,
stop_updating_after=1e-10,
max_iter=None,
max_tridiag_iter=None,
initial_guess=None,
preconditioner=None,
):
""
Implements the linear conjugate gradients method for (approximately) solving systems of the form
lhs result = rhs
for positive definite and symmetric matrices.
Args:
- matmul_closure - a function which performs a left matrix multiplication with lhs_mat
- rhs - the right-hand side of the equation
- n_tridiag - returns a tridiagonalization of the first n_tridiag columns of rhs
- tolerance - stop the solve when the max residual is less than this
- eps - noise to add to prevent division by zero
- stop_updating_after - will stop updating a vector after this residual norm is reached
- max_iter - the maximum number of CG iterations
- max_tridiag_iter - the maximum size of the tridiagonalization matrix
- initial_guess - an initial guess at the solution `result`
- precondition_closure - a functions which left-preconditions a supplied vector
Returns:
result - a solution to the system (if n_tridiag is 0)
result, tridiags - a solution to the system, and corresponding tridiagonal matrices (if n_tridiag > 0)
""
# Unsqueeze, if necesasry
is_vector = rhs.ndimension() == 1
if is_vector:
rhs = rhs.unsqueeze(-1)
# Some default arguments
if max_iter is None:
max_iter = settings.max_cg_iterations.value()
if max_tridiag_iter is None:
max_tridiag_iter = settings.max_lanczos_quadrature_iterations.value()
if initial_guess is None:
initial_guess = torch.zeros_like(rhs)
if tolerance is None:
if settings._use_eval_tolerance.on():
tolerance = settings.eval_cg_tolerance.value()
else:
tolerance = settings.cg_tolerance.value()
if preconditioner is None:
preconditioner = _default_preconditioner
precond = False
else:
precond = True
# If we are running m CG iterations, we obviously can't get more than m Lanczos coefficients
if max_tridiag_iter > max_iter:
raise RuntimeError("Getting a tridiagonalization larger than the number of CG iterations run is not possible!")
# Check matmul_closure object
if torch.is_tensor(matmul_closure):
matmul_closure = matmul_closure.matmul
elif not callable(matmul_closure):
raise RuntimeError("matmul_closure must be a tensor, or a callable object!")
# Get some constants
batch_shape = rhs.shape[:-2]
num_rows = rhs.size(-2)
n_iter = min(max_iter, num_rows) if settings.terminate_cg_by_size.on() else max_iter
n_tridiag_iter = min(max_tridiag_iter, num_rows)
eps = torch.tensor(eps, dtype=rhs.dtype, device=rhs.device)
# Get the norm of the rhs - used for convergence checks
# Here we're going to make almost-zero norms actually be 1 (so we don't get divide-by-zero issues)
# But we'll store which norms were actually close to zero
rhs_norm = rhs.norm(2, dim=-2, keepdim=True)
rhs_is_zero = rhs_norm.lt(eps)
rhs_norm = rhs_norm.masked_fill_(rhs_is_zero, 1)
# Let's normalize. We'll un-normalize afterwards
rhs = rhs.div(rhs_norm)
# residual: residual_{0} = b_vec - lhs x_{0}
residual = rhs - matmul_closure(initial_guess)
# result <- | |
Crystal":0xf0ede0,
"Salt Glaze":0xcfd4ce,
"Salt Island Green":0x757261,
"Salt Lake":0x74c6d3,
"Salt Mountain":0xd7fefe,
"Salt n Pepa":0xdcd9db,
"Salt Pebble":0xf9ecea,
"Salt Spray":0xa7c5ce,
"Salt Steppe":0xeeddbb,
"Salt Water":0x95bbd8,
"Salt Water Taffy":0xd1ab99,
"Saltbox Blue":0x65758a,
"Salted Caramel":0xebb367,
"Salted Caramel Popcorn":0xfdb251,
"Salted Pretzel":0x816b56,
"Saltpan":0xeef3e5,
"Saltwater":0xc2d0de,
"Salty Breeze":0xdde2d7,
"Salty Cracker":0xe2c681,
"Salty Dog":0x234058,
"Salty Ice":0xcce2f3,
"Salty Seeds":0xc1b993,
"Salty Tear":0xcfebed,
"Salty Tears":0xbacad4,
"Salty Thyme":0x96b403,
"Salty Vapour":0xcbdee3,
"Salute":0x282b34,
"Salvation":0x514e5c,
"Salvia":0xa8b59e,
"Salvia Divinorum":0x929752,
"Samantha's Room":0xf2d7e6,
"Samba":0xa2242f,
"<NAME>":0xf0e0d4,
"Sambuca":0x3b2e25,
"Sambucus":0x17182b,
"Samoan Sun":0xfbc85f,
"Samovar Silver":0xb8bebe,
"Samphire Green":0x4db560,
"San Antonio Sage":0xa69474,
"San Carlos Plaza":0xd9bb8e,
"San Felix":0x2c6e31,
"San Francisco Fog":0xc4c2bc,
"San Francisco Mauve":0x936a6d,
"San Gabriel Blue":0x2f6679,
"San Juan":0x445761,
"San Marino":0x4e6c9d,
"San Miguel Blue":0x527585,
"Sanctuary":0xd4c9a6,
"Sanctuary Spa":0x66b2e4,
"Sand":0xe2ca76,
"Sand Blast":0xdecbab,
"Sand Brown":0xcba560,
"Sand Castle":0xe5d9c6,
"Sand Crystal":0xffeeda,
"Sand Dagger":0xe6ddd2,
"Sand Dance":0xe0c8b9,
"Sand Diamond":0xfae8bc,
"Sand Dollar":0xdecdbe,
"Sand Dollar White":0xfae3c9,
"Sand Drift":0xe5e0d3,
"Sand Dune":0xe3d2c0,
"Sand Fossil":0xdecfb3,
"Sand Grain":0xe3e4d9,
"Sand Island":0xf4d1c2,
"Sand Motif":0xddc6a8,
"Sand Paper":0xccbb88,
"Sand Pearl":0xe7d4b6,
"Sand Pebble":0xb09d7f,
"Sand Puffs":0xe6e5d3,
"Sand Pyramid":0xddcc77,
"Sand Ripples":0xc1b7b0,
"Sand Shark":0x5a86ad,
"Sand Trail":0xd0c6a1,
"Sand Trap":0xbba595,
"Sand Verbena":0x9f90c1,
"Sand Yellow":0xfdee73,
"Sandal":0xa3876a,
"Sandalwood":0x615543,
"Sandalwood Beige":0xf2d1b1,
"Sandalwood Grey Blue":0x005160,
"Sandalwood Tan":0x907f68,
"Sandbank":0xe9d5ad,
"Sandbar":0xcbbfad,
"Sandblast":0xf5c9bf,
"Sandcastle":0xe5d7c4,
"Sanderling":0xc8ab96,
"Sandgrass Green":0x93917f,
"Sanding Sugar":0xefebde,
"Sandpaper":0xd7b1a5,
"Sandpiper":0xebdac8,
"Sandpiper Cove":0x717474,
"Sandpit":0x9e7c5e,
"Sandpoint":0xeacdb0,
"Sandrift":0xaf937d,
"Sands of Time":0xbca38b,
"Sandshell":0xd8ccbb,
"Sandstone":0xc9ae74,
"Sandstone Cliff":0xd2c9b7,
"Sandstone Grey":0x857266,
"Sandstone Grey Green":0x88928c,
"Sandstone Palette":0xd9ccb6,
"Sandstone Red Grey":0x886e70,
"Sandstorm":0xecd540,
"Sandwashed Driftwood":0x706859,
"Sandwashed Glassshard":0xdee8e3,
"Sandwisp":0xdecb81,
"Sandworm":0xfce883,
"Sandy":0xf1da7a,
"Sandy Ash":0xe4ded5,
"Sandy Bay":0xfad7b3,
"Sandy Beach":0xf9e2d0,
"Sandy Bluff":0xaca088,
"Sandy Clay":0xdbd0bd,
"Sandy Day":0xd7cfc1,
"Sandy Pail":0xd2c098,
"Sandy Ridge":0xa18e77,
"Sandy Shoes":0x847563,
"Sandy Shore":0xf2e9bb,
"Sandy Tan":0xfdd9b5,
"Sandy Taupe":0x967111,
"Sandy Toes":0xc7b8a4,
"Sang de Boeuf":0x771100,
"Sango Pink":0xf5b1aa,
"Sango Red":0xf8674f,
"Sangoire Red":0x881100,
"Sangria":0xb14566,
"Sanguinary":0xf01a4d,
"Sanguine Brown":0x6c3736,
"Sanskrit":0xe69332,
"Santa Fe":0xb16d52,
"Santa Fe Sunrise":0xcc9469,
"Santa Fe Sunset":0xa75a4c,
"Santa Fe Tan":0xd5ad85,
"Santana Soul":0x714636,
"Santas Grey":0x9fa0b1,
"Santiago Orange":0xe95f24,
"Santo":0xd6d2ce,
"Santolina Blooms":0xe3d0d5,
"Santorini":0x41b0d0,
"Santorini Blue":0x416d83,
"Sap Green":0x5c8b15,
"Sapless Green":0xbebdac,
"Sapling":0xe1d5a6,
"Sappanwood":0x9e3d3f,
"Sappanwood Incense":0xa24f46,
"Sappanwood Perfume":0xa86965,
"Sapphire":0x0f52ba,
"Sapphire Blue":0x0067bc,
"Sapphire Fog":0x99a8c9,
"Sapphire Glitter":0x0033cc,
"Sapphire Lace":0x235c8e,
"Sapphire Light Yellow":0xcdc7b4,
"Sapphire Pink":0x887084,
"Sapphire Shimmer Blue":0x5776af,
"Sapphire Siren":0x662288,
"Sapphire Sparkle":0x135e84,
"Sapphire Stone":0x41495d,
"Sapphireberry":0xc9e5ee,
"Sarah's Garden":0x00aac1,
"Saratoga":0x555b2c,
"Sarawak White Pepper":0xf4eeba,
"Sarcoline":0xffddaa,
"Sardinia Beaches":0x28a4cb,
"Sargasso Sea":0x35435a,
"Sari":0xe47c64,
"Sarsaparilla":0x5b4c44,
"Saruk Grey":0x817265,
"Sashay Sand":0xcfb4a8,
"Sasquatch Socks":0xff4681,
"Sassafras":0x54353b,
"Sassafras Tea":0xdbd8ca,
"Sassy":0xc18862,
"Sassy Grass":0x7a8c31,
"Sassy Green":0xbba86a,
"Sassy Pink":0xf6cefc,
"Sassy Yellow":0xf0c374,
"Satan":0xe63626,
"Satellite":0x9f8d89,
"Satin Black":0x4e5152,
"Satin Blush":0xffe4c6,
"Satin Chocolate":0x773344,
"Satin Cream White":0xfdf3d5,
"Satin Deep Black":0x1c1e21,
"Satin Flower":0xb48fbd,
"Satin Green":0xc7dfb8,
"Satin Latour":0xfad7b0,
"Satin Lime":0x33ee00,
"Satin Linen":0xe6e4d4,
"Satin Pink":0xfbe0dc,
"Satin Purse":0xfff8ee,
"Satin Ribbon":0xffd8dc,
"Satin Sheen Gold":0xcba135,
"Satin Soft Blue":0x9cadc7,
"Satin Soil":0x6b4836,
"Satin Souffle":0xefe0bc,
"Satin Weave":0xf3edd9,
"Satin White":0xcfd5db,
"Satire":0xc4c2cd,
"Sativa":0xb5bf50,
"Satoimo Brown":0x654321,
"Satsuma Imo Red":0x96466a,
"Sattle":0xaa6622,
"Saturated Sky":0x4b4cfc,
"Saturn":0xfae5bf,
"Saturn Grey":0xb8b19f,
"Saturnia":0xdddbce,
"Satyr Brown":0xbca17a,
"Saucisson":0x882c17,
"Saucy Gold":0xb6743b,
"Saudi Sand":0x9e958a,
"Sauna Steam":0xedebe1,
"Sausage Roll":0xebdfcd,
"Sausalito":0xf4e5c5,
"Sausalito Port":0x5d6f85,
"Sausalito Ridge":0x6a5d53,
"Sauteed Mushroom":0xab9378,
"Sauterne":0xc5a253,
"Sauvignon":0xf4eae4,
"Sauvignon Blanc":0xb18276,
"Savanna":0x874c44,
"Savannah":0xd1bd92,
"Savannah Grass":0xbabc72,
"Savannah Sun":0xffb989,
"Saveloy":0xaa2200,
"Savile Row":0xc0b7cf,
"Saving Light":0x550011,
"Savon de Provence":0xeed9b6,
"Savory Salmon":0xd19c97,
"Savoy":0x87b550,
"Savoy Blue":0x4b61d1,
"Sawdust":0xf6e9cf,
"Sawgrass":0xd1cfc0,
"Sawgrass Basket":0xc3b090,
"Sawgrass Cottage":0xd3cda2,
"Sawshark":0xaa7766,
"Sawtooth Aak":0xec956c,
"Saxon":0xabc1a0,
"Saxon Blue":0x435965,
"Saxony Blue":0x1f6680,
"Saxophone Gold":0xceaf81,
"Sayward Pine":0x383739,
"Sazerac":0xf5dec4,
"Scab Red":0x8b0000,
"Scallion":0x6b8e23,
"Scallop Shell":0xfbd8c9,
"Scalloped Oak":0xf2d1a0,
"Scalloped Potato":0xfce3cf,
"Scalloped Shell":0xf3e9e0,
"Scallywag":0xe5d5bd,
"Scaly Green":0x027275,
"Scampi":0x6f63a0,
"Scanda":0x6b8ca9,
"Scandal":0xadd9d1,
"Scandalous Rose":0xdfbdd0,
"Scandinavian Sky":0xc2d3d6,
"Scapa Flow":0x6b6a6c,
"Scarab":0x23312d,
"Scarabaeus Sacer":0x414040,
"Scarabœus Nobilis":0x7d8c55,
"Scarborough":0x809391,
"Scarlet":0xff2400,
"Scarlet Apple":0x922e4a,
"Scarlet Flame":0x993366,
"Scarlet Gum":0x4a2d57,
"Scarlet Ibis":0xf45520,
"Scarlet Past":0xa53b3d,
"Scarlet Red":0xb63e36,
"Scarlet Ribbons":0xa4334a,
"Scarlet Sage":0x9d202f,
"Scarlet Shade":0x7e2530,
"Scarpetta":0x8ca468,
"Scatman Blue":0x647983,
"Scattered Showers":0x7b8285,
"Scenario":0x81a79e,
"Scene Stealer":0xaf6d62,
"Scenic Path":0xcec5b4,
"Scented Clove":0x61524c,
"Scented Frill":0xcaaeb8,
"Scented Valentine":0xf3d9d6,
"Sceptre Blue":0x353542,
"Schabziger Yellow":0xeeeebb,
"Schauss Pink":0xff91af,
"Schiaparelli Pink":0xe84998,
"Schiava Blue":0x192961,
"Schindler Brown":0x8b714c,
"Schist":0x87876f,
"Schnipo":0xdd8855,
"Scholarship":0x586a7d,
"School Bus":0xffd800,
"School Ink":0x31373f,
"Schooner":0x8d8478,
"Sci-fi Petrol":0x006666,
"Sci-Fi Takeout":0x00604b,
"Science Blue":0x0076cc,
"Scintillating Violet":0x764663,
"Sconce":0xae935d,
"Sconce Gold":0x957340,
"Scoop of Dark Matter":0x110055,
"Scooter":0x308ea0,
"Scorched":0x351f19,
"Scorched Brown":0x4d0001,
"Scorched Earth":0x44403d,
"Scorched Metal":0x423d27,
"Scorpion":0x6a6466,
"Scorpion Grass Blue":0x99aac8,
"Scorpion Venom":0x97ea10,
"Scorpy Green":0x8eef15,
"Scorzonera Brown":0x544e03,
"Scotch Blue":0x000077,
"Scotch Bonnet":0xfe9f00,
"Scotch Lassie":0x649d85,
"Scotch Mist":0xeee7c8,
"Scotchtone":0xebccb9,
"Scotland Isle":0x87954f,
"Scotland Road":0x9baa9a,
"Scots Pine":0x5f653b,
"Scott Base":0x66a3c3,
"Scouring Rush":0x3b7960,
"Screamer Pink":0xab0040,
"Screamin' Green":0x66ff66,
"Screaming Bell Metal":0xc16f45,
"Screaming Magenta":0xcc00cc,
"Screaming Skull":0xf0f2d2,
"Screech Owl":0xeae4d8,
"Screed Grey":0x9a908a,
"Screen Gem":0x9d7798,
"Screen Glow":0x66eeaa,
"Screen Test":0x999eb0,
"Scribe":0x9fabb6,
"Script Ink":0x60616b,
"Script White":0xdbdddf,
"Scrofulous Brown":0xdba539,
"Scroll":0xefe0cb,
"Scroll of Wisdom":0xf3e5c0,
"Scrolled Parchment":0xe9ddc9,
"Scrub":0x3d4031,
"Scuba":0x6392b7,
"Scuba Blue":0x00abc0,
"Scud":0xacd7c8,
"Scuff Blue":0x0044ee,
"Sculptor Clay":0xccc3b4,
"Sculptural Silver":0xd1dad5,
"Scurf Green":0x02737a,
"Sè Lèi Orange":0xfc824a,
"Sea":0x3c9992,
"Sea Anemone":0xe8dad6,
"Sea Angel":0x98bfca,
"Sea Beast":0x62777e,
"Sea Bed":0x29848d,
"Sea Blithe":0x41545c,
"Sea Blue":0x006994,
"Sea Breeze":0xa4bfce,
"Sea Breeze Green":0xc9d9e7,
"Sea Buckthorn":0xffbf65,
"Sea Cabbage":0x519d76,
"Sea Caller":0x45868b,
"Sea Cap":0xe4f3df,
"Sea Capture":0x61bddc,
"Sea Cave":0x005986,
"Sea Challenge":0x2c585c,
"Sea Cliff":0xa5c7df,
"Sea Creature":0x00586d,
"Sea Crystal":0x608ba6,
"Sea Current":0x4c959d,
"Sea Deep":0x2d3c44,
"Sea Drifter":0x4b7794,
"Sea Drive":0xc2d2e0,
"Sea Elephant":0x77675c,
"Sea Fantasy":0x1a9597,
"Sea Fern":0x656d54,
"Sea Foam":0x87e0cf,
"Sea Foam Mist":0xcbdce2,
"Sea Fog":0xdfddd6,
"Sea Frost":0xd5dcdc,
"Sea Garden":0x568e88,
"Sea Glass":0xafc1bf,
"Sea Glass Teal":0xa0e5d9,
"Sea Goddess":0x216987,
"Sea Going":0x2a2e44,
"Sea Grape":0x3300aa,
"Sea Grass":0x67ad83,
"Sea Green":0x53fca1,
"Sea Haze Grey":0xcbd9d4,
"Sea Hunter":0x245878,
"Sea Ice":0xd7f2ed,
"Sea Kale":0x30a299,
"Sea Kelp":0x354a55,
"Sea Lavender":0xcfb1d8,
"Sea Lettuce":0x67a181,
"Sea Life":0x5dc6bf,
"Sea Lion":0x7f8793,
"Sea Loch":0x6e99d1,
"Sea Mariner":0x434a54,
"Sea Mark":0x92b6cf,
"Sea Mist":0xdbeee0,
"Sea Monster":0x658c7b,
"Sea Moss":0x254445,
"Sea Nettle":0xf47633,
"Sea Note":0x5482c2,
"Sea Nymph":0x8aaea4,
"Sea of Atlantis":0x2d535a,
"Sea of Tranquility":0x81d1da,
"Sea Paint":0x00507a,
"Sea Palm":0x72897e,
"Sea Pea":0x457973,
"Sea Pearl":0xe0e9e4,
"Sea Pine":0x4c6969,
"Sea Pink":0xdb817e,
"Sea Quest":0x3e7984,
"Sea Radish":0x799781,
"Sea Ridge":0x45a3cb,
"Sea Rover":0xa3d1e2,
"Sea Salt":0xf1e6de,
"Sea Serpent":0x4bc7cf,
"Sea Serpent's Tears":0x5511cc,
"Sea Sight":0x00789b,
"Sea Sparkle":0x469ba7,
"Sea Spray":0xd2ebea,
"Sea Sprite":0xb7ccc7,
"Sea Squash":0xbaa243,
"Sea Star":0x4d939a,
"Sea Swimmer":0x337f86,
"Sea Turtle":0x818a40,
"Sea Urchin":0x367d83,
"Sea Wind":0xaccad5,
"Sea Wonder":0x0f9bc0,
"Seaborne":0x7aa5c9,
"Seabrook":0x4b81af,
"Seabuckthorn Yellow Brown":0xcd7b00,
"Seachange":0x3e8896,
"Seacrest":0xbfd1b3,
"Seafair Green":0xb8f8d8,
"Seafoam Blue":0x78d1b6,
"Seafoam Green":0x99bb88,
"Seafoam Pearl":0xc2ecd8,
"Seafoam Spray":0xdaefce,
"Seaglass":0xd0e6de,
"Seagrass":0xbcc6a2,
"Seagrass Green":0x264e50,
"Seagull":0xe0ded8,
"Seagull Grey":0xd9d9d2,
"Seagull Wail":0xc7bda8,
"Seal Blue":0x475663,
"Seal Brown":0x321414,
"Seal Grey":0x8a9098,
"Seal Pup":0x65869b,
"Sealegs":0x6b8b8b,
"Sealskin":0x48423c,
"Sealskin Shadow":0xe9ece6,
"Seamount":0x15646d,
"Seance":0x69326e,
"Seaplane Grey":0x3a3f41,
"Seaport":0x005e7d,
"Seaport Steam":0xaecac8,
"Searching Blue":0x6c7f9a,
"Searchlight":0xeff0bf,
"Seared Earth":0x9a5633,
"Seared Grey":0x495157,
"Searing Gorge Brown":0x6b3b23,
"Seascape Blue":0xa6bad1,
"Seascape Green":0xb5e4e4,
"Seashell":0xfff5ee,
"Seashell Cove":0x104c77,
"Seashell Peach":0xfff6de,
"Seashell Pink":0xf7c8c2,
"Seashore Dreams":0xb5dcef,
"Seaside Sand":0xf2e9d7,
"Seaside Villa":0xe9d5c9,
"Season Finale":0xbea27b,
"Seasonal Beige":0xe6b99f,
"Seasoned Acorn":0x7f6640,
"Seasoned Apple Green":0x8db600,
"Seasoned Salt":0xcec2a1,
"Seattle Red":0x7d212a,
"Seawashed Glass":0xa9c095,
"Seaweed":0x18d17b,
"Seaweed Green":0x35ad6b,
"Seaweed Salad":0x7d7b55,
"Seaweed Tea":0x5d7759,
"Seaweed Wrap":0x4d473d,
"Seaworld":0x125459,
"Seaworthy":0x314d58,
"Sebright Chicken":0xbd5701,
"Secluded Canyon":0xc6876f,
"Secluded Green":0x6f6d56,
"Secluded Woods":0x495a52,
"Second Nature":0x585642,
"Second Pour":0x887ca4,
"Second Wind":0xdfece9,
"Secrecy":0x50759e,
"Secret Blush":0xe1d2d5,
"Secret Cove":0x68909d,
"Secret Crush":0xd7dfd6,
"Secret Garden":0x11aa66,
"Secret Glade":0xb5b88d,
"Secret Journal":0x7c6055,
"Secret Meadow":0x72754f,
"Secret of Mana":0x4166f5,
"Secret Passageway":0x6d695e,
"Secret Path":0x737054,
"Secret Safari":0xc6bb68,
"Secret Scent":0xe3d7dc,
"Secret Society":0x464e5a,
"Secret Story":0xff1493,
"Secure Blue":0x5389a1,
"Security":0xd6e1c2,
"Sedate Gray":0xd1cdbf,
"Sedge":0xb1a591,
"Sedge Green":0x707a68,
"Sedia":0xb0a67e,
"Sedona":0xe7e0cf,
"Sedona at Sunset":0xbf7c45,
"Sedona Pink":0xd6b8a7,
"Sedona Sage":0x686d6c,
"Sedona Shadow":0x665f70,
"Seduction":0xfbf2bf,
"Seductive Thorns":0xa2c748,
"Seed Pearl":0xe6dac4,
"Seedless Grape":0xd3c3d4,
"Seedling":0xc0cba1,
"Seeress":0xa99ba9,
"Sefid White":0xfff1f1,
"Seiheki Green":0x3a6960,
"Seiji Green":0x819c8b,
"Sekichiku Pink":0xe5abbe,
"Sekkasshoku Brown":0x683f36,
"Selago":0xe6dfe7,
"Selective Yellow":0xffba00,
"Self Powered":0x8c7591,
"Self-Destruct":0xc2b398,
"Seljuk Blue":0x4488ee,
"Sell Gold":0xd4ae5e,
"Sell Out":0x90a2b7,
"Semi Opal":0xab9649,
"Semi Sweet":0x6b5250,
"Semi Sweet Chocolate":0x6b4226,
"Semi-Precious":0x659b97,
"Semolina":0xceb899,
"Semolina Pudding":0xffe8c7,
"Sēn Lín Lǜ Forest":0x4ca973,
"Senate":0x4a515f,
"Sencha Brown":0x824b35,
"Seneca Rock":0x9a927f,
"Senior Moment":0xfdecc7,
"Sensai Brown":0x494a41,
"Sensaicha brown":0x3b3429,
"Sensaimidori Green":0x374231,
"Sensational Sand":0xbfa38d,
"Sensible Hue":0xead7b4,
"Sensitive Scorpion":0xcc2266,
"Sensitive Tint":0xcec9cc,
"Sensitivity":0xa1b0be,
"Sensual Climax":0xda3287,
"Sensual Fumes":0xcd68e2,
"Sensual Peach":0xffd2b6,
"Sensuous":0xb75e6b,
"Sensuous Gray":0x837d7f,
"Sentimental":0xe6d8d2,
"Sentimental Beige":0xe0d8c5,
"Sentimental Lady":0xc4d3dc,
"Sentimental Pink":0xf8eef4,
"Sentinel":0xd2e0d6,
"Sephiroth Grey":0x8c92ac,
"Sepia":0x704214,
"Sepia Black":0x2b0202,
"Sepia Brown":0x4b3526,
"Sepia Filter":0xcbb499,
"Sepia Rose":0xd4bab6,
"Sepia Skin":0x9f5c42,
"Sepia Tint":0x897560,
"Sepia Tone":0xb8a88a,
"Sepia Wash":0x995915,
"Sepia Yellow":0x8c7340,
"September Gold":0x8d7548,
"September Morn":0xede6b3,
"September Morning":0xffe9bb,
"September Song":0xd5d8c8,
"September Sun":0xfdd7a2,
"Sequesta":0xd4d158,
"Sequin":0xe1c28d,
"Sequoia":0x804839,
"Sequoia Dusk":0x795953,
"Sequoia Fog":0xc5bbaf,
"Sequoia Grove":0x935e4e,
"Sequoia Lake":0x506c6b,
"Sequoia Redwood":0x763f3d,
"Serape":0xd88b4d,
"Seraphim Sepia":0xd7824b,
"Seraphinite":0x616f65,
"Serbian Green":0x3e644f,
"Serena":0xcfd0c1,
"Serenade":0xfce9d7,
"Serendibite Black":0x4a4354,
"Serendipity":0xbde1d8,
"Serene":0xdce3e4,
"Serene Blue":0x1199bb,
"Serene Breeze":0xbdd9d0,
"Serene Journey":0xcfd8d1,
"Serene Peach":0xf5d3b7,
"Serene Scene":0xd2c880,
"Serene Sea":0x78a7c3,
"Serene Setting":0xc5d2d9,
"Serene Sky":0xc3e3eb,
"Serene Stream":0x819daa,
"Serene Thought":0xc5c0ac,
"Serenely":0xced7d5,
"Serengeti Dust":0xe7dbc9,
"Serengeti Grass":0xab9579,
"Serengeti Green":0x77cc88,
"Serengeti Sand":0xfce7d0,
"Sereni Teal":0x76baa8,
"Serenity":0x91a8d0,
"Serious Gray":0x7d848b,
"Serious Grey":0xcec9c7,
"Seriously Sand":0xdcccb4,
"Serpent":0x817f6d,
"Serpentine":0x9b8e54,
"Serpentine Green":0xa2b37a,
"Serpentine Shadow":0x003300,
"Serrano Pepper":0x556600,
"Seryi Grey":0x9ca9ad,
"Sesame":0xbaa38b,
"Sesame Crunch":0xc26a35,
"Sesame Seed":0xe1d9b8,
"Sesame Street Green":0x00a870,
"Settlement":0x7e7970,
"Settler":0x8b9cac,
"Seven Days of Rain":0xd3dae1,
"Seven Seas":0x4a5c6a,
"Seven Veils":0xe3b8bd,
"Severe Seal":0xeee7de,
"Seville Scarlet":0x955843,
"Shabby Chic":0xbb8a8e,
"Shabby Chic Pink":0xefddd6,
"Shade of Amber":0xff7e00,
"Shade of Bone Marrow":0x889988,
"Shade of Marigold":0xb88a3d,
"Shade of Mauve":0xae7181,
"Shade of Violet":0x8601af,
"Shade-Grown":0x4e5147,
"Shaded Fern":0x786947,
"Shaded Fuchsia":0x664348,
"Shaded Glen":0x8e824a,
"Shaded Hammock":0x859c9b,
"Shaded Spruce":0x00585e,
"Shaded Sun":0xf3eba5,
"Shades On":0x605f5f,
"Shadow":0x837050,
"Shadow Azalea Pink":0xe96a97,
"Shadow Blue":0x778ba5,
"Shadow Cliff":0x7a6f66,
"Shadow Dance":0x877d83,
"Shadow Effect":0x788788,
"Shadow Gargoyle":0x686767,
"Shadow Green":0x9ac0b6,
"Shadow Grey":0xbba5a0,
"Shadow Leaf":0x395648,
"Shadow Lime":0xcfe09d,
"Shadow Mountain":0x585858,
"Shadow of the Colossus":0xa3a2a1,
"Shadow Planet":0x221144,
"Shadow Purple":0x4e334e,
"Shadow Ridge":0x5b5343,
"Shadow Warrior":0x1a2421,
"Shadow White":0xeef1ea,
"Shadow Wood":0x5e534a,
"Shadow Woods":0x8a795d,
"Shadowdancer":0x111155,
"Shadowed Steel":0x4b4b4b,
"Shadows":0x6b6d6a,
"Shady":0xdbd6cb,
"Shady Blue":0x42808a,
"Shady Character":0x4c4b4c,
"Shady Glade":0x006e5b,
"Shady Green":0x635d4c,
"Shady Grey":0x849292,
"Shady Lady":0x9f9b9d,
"Shady Neon Blue":0x5555ff,
"Shady Oak":0x73694b,
"Shady Pink":0xc4a1af,
"Shady White":0xf0e9df,
"<NAME>":0x939689,
"Shagbark Olive":0x645d41,
"Shaggy Barked":0xb3ab98,
"Shagreen":0xcbc99d,
"Shaker Blue":0x748c96,
"Shaker Grey":0x6c6556,
"Shaker Peg":0x886a3f,
"Shakespeare":0x609ab8,
"Shakker Red":0x7f4340,
"Shakshuka":0xaa3311,
"Shaku-Do Copper":0x752100,
"Shale":0x4a3f41,
"Shale Green":0x739072,
"Shale Grey":0x899da3,
"Shalimar":0xf8f6a8,
"Shallot Bulb":0x7b8d73,
"Shallot Leaf":0x505c3a,
"Shallow End":0xc5f5e8,
"Shallow Sea":0x9ab8c2,
"Shallow Shoal":0x9dd6d4,
"Shallow Shore":0xb0dec8,
"Shallow Water":0x8af1fe,
"Shallow Water Ground":0x8caeac,
"Shamanic Journey":0xcc855a,
"Shampoo":0xffcff1,
"Shamrock":0x009e60,
"Shamrock Field":0x358d52,
"Shamrock Green":0x4ea77d,
"Shān Hú Hóng Coral":0xfa9a85,
"Shandy":0xffe670,
"Shanghai Jade":0xaad9bb,
"Shanghai Peach":0xd79a91,
"Shanghai Silk":0xc8dfc3,
"Shangri La":0xecd4d2,
"Shani Purple":0x4c1050,
"Shank":0xa18b5d,
"Sharbah Fizz":0x9be3d7,
"Sharegaki Persimmon":0xffa26b,
"Shark":0xcadcde,
"Shark Bait":0xee6699,
"Shark Fin":0x969795,
"Shark Tooth":0xe4e1d3,
"Sharknado":0x35524a,
"Sharkskin":0x838487,
"Sharp Blue":0x2b3d54,
"Sharp Green":0xc6ec7a,
"Sharp Grey":0xc9cad1,
"Sharp Pebbles":0xdbd6d8,
"Sharp Yellow":0xecc043,
"Sharp-Rip Drill":0xeae1d6,
"Shasta Lake":0x355c74,
"Shattan Gold":0xbb5577,
"Shattell":0xb5a088,
"Shattered Ice":0xdaeee6,
"Shattered Porcelain":0xeee2e0,
"Shattered Sky":0xd0dde9,
"Shattered White":0xf1f1e5,
"Shaved Chocolate":0x543b35,
"Shaved Ice":0xa9b4ba,
"Shaving Cream":0xe1e5e5,
"Shawarma":0xdd9955,
"She Loves Pink":0xe39b96,
"Shea":0xf8f1eb,
"Shearwater Black":0x5b5b6c,
"Shebang":0x81876f,
"Sheen Green":0x8fd400,
"Sheepskin":0xdab58f,
"Sheepskin Gloves":0xad9e87,
"Sheer Apricot":0xf3c99d,
"Sheer Green":0xb0c69a,
"Sheer Lavender":0xefe2f2,
"Sheer Lilac":0xb793c0,
"Sheer Peach":0xfff7e7,
"Sheer Pink":0xf6e5db,
"Sheer Rosebud":0xffe8e5,
"Sheer Scarf":0xe3d6ca,
"Sheer Sunlight":0xfffedf,
"Sheet Blue":0x52616f,
"Sheet Metal":0x5e6063,
"Sheffield":0x638f7b,
"Sheffield Grey":0x6b7680,
"Sheikh Zayed White":0xe6efef,
"Shell":0xe1cfc6,
"Shell Brook":0xeee7e6,
"Shell Brown":0x56564b,
"Shell Coral":0xea9575,
"Shell Ginger":0xf9e4d6,
"Shell Haven":0xebdfc0,
"Shell Pink":0xf88180,
"Shell Tint":0xfdd7ca,
"Shell White":0xf0ebe0,
"Shelter":0xb8986c,
"Sheltered Bay":0x758f9a,
"Shēn Chéng Orange":0xc03f20,
"Shēn Hóng Red":0xbe0620,
"Shepherd's Warning":0xc06f68,
"Sheraton Sage":0x8f8666,
"Sherbet Fruit":0xf8c8bb,
"Sheriff":0xebcfaa,
"Sheringa Rose":0x735153,
"Sherpa Blue":0x00494e,
"Sherry Cream":0xf9e4db,
"Sherwood Forest":0x555a4c,
"Sherwood Green":0x1b4636,
"Shetland Lace":0xdfd0c0,
"<NAME>":0xe29f31,
"Shiffurple":0x9900aa,
"Shifting Sand":0xd8c0ad,
"Shiitake":0xa5988a,
"Shiitake Mushroom":0x736253,
"Shikon":0x2b2028,
"Shilo":0xe6b2a6,
"Shimmer":0x88c7e9,
"Shimmering Blue":0x82dbcc,
"Shimmering Blush":0xd98695,
"Shimmering Brook":0x64b3d3,
"Shimmering Champagne":0xf3debc,
"Shimmering Expanse Cyan":0x45e9fd,
"Shimmering Glade":0xa4943a,
"Shimmering Love":0xff88cc,
"Shimmering Pool":0xd2efe6,
"Shimmering Sea":0x2b526a,
"Shimmering Sky":0xdbd1e8,
"Shin Godzilla":0x9a373f,
"Shinbashi":0x59b9c6,
"Shinbashi Azure":0x006c7f,
"Shindig":0x00a990,
"Shine Baby Shine":0xa85e6e,
"Shiner":0x773ca7,
"Shingle Fawn":0x745937,
"Shining Armor":0x908b8e,
"Shining Gold":0xbad147,
"Shining Knight":0x989ea7,
"Shining Silver":0xc7c7c9,
"Shinkansen White":0xdacdcd,
"Shinshu":0x8f1d21,
"Shiny Armor":0xa1a9a8,
"Shiny Gold":0xae9f65,
"Shiny Kettle":0xcea190,
"Shiny Luster":0xdbdddb,
"Shiny Nickel":0xccd3d8,
"Shiny Rubber":0x3a363b,
"Shiny Shamrock":0x5fa778,
"Shiny Silk":0xf7ecca,
"Ship Cove":0x7988ab,
"Ship Grey":0x3e3a44,
"Ship Steering Wheel":0x62493b,
"Ship's Harbour":0x4f84af,
"Ship's Officer":0x2d3a49,
"Shipwreck":0x968772,
"Shipyard":0x4f6f85,
"<NAME>":0xc48e69,
"Shiraz":0x842833,
"Shire":0x646b59,
"Shire Green":0x68e52f,
"Shiroi White":0xebf5f0,
"Shironeri Silk":0xfeddcb,
"Shirt Blue":0x6598af,
"Shisha Coal":0x3c3b3c,
"Shishi Pink":0xefab93,
"Shishito Pepper Green":0xbbf90f,
"Shiso Green":0x63a950,
"Shiva Blue":0x99dbfe,
"Shock Jockey":0xbb88aa,
"Shocking":0xe899be,
"Shocking Pink":0xfe02a2,
"Shockwave":0x72c8b8,
"Shoe Wax":0x2b2b2b,
"Shoelace":0xeae4d9,
"Shoelace Beige":0xf6ebd3,
"Shōji":0xded5c7,
"Shoji White":0xe6dfd3,
"Shojo's Blood":0xe2041b,
"Shōjōhi Red":0xdc3023,
"Shooting Star":0xecf0eb,
"Shopping Bag":0x5a4743,
"Shore Water":0x6797a2,
"Shoreland":0xead9cb,
"Shoreline Green":0x58c6ab,
"Shoreline Haze":0xd2cbbc,
"Short and Sweet":0xedd1d3,
"Short Phase":0xbbdfd5,
"Shortbread":0xf5e6d3,
"Shortbread Cookie":0xeaceb0,
"Shortcake":0xeedaac,
"Shortgrass Prairie":0x9e957c,
"Shot Over":0x4a5c69,
"Shot-Put":0x716b63,
"Shovel Knight":0x37c4fa,
"Show Business":0xdd835b,
"Show Stopper":0xa42e37,
"Shower":0x9fadb7,
"Showstopper":0x7f607f,
"Shrimp":0xe29a86,
"Shrimp Boat":0xf5be9d,
"Shrimp Boudin":0xdbbfa3,
"Shrimp Cocktail":0xf4a460,
"Shrimp Toast":0xf7c5a0,
"Shrine of Pleasures":0xcc3388,
"Shrinking Violet":0x5d84b1,
"Shrub Green":0x003636,
"Shrubbery":0xa9c08a,
"Shrubby Lichen":0xb5d1db,
"Shu Red":0xeb6101,
"Shǔi Cǎo Lǜ Green":0x40826d,
"Shui Jiao Dumpling":0xdccca3,
"Shukra Blue":0x2b64ad,
"Shuriken":0x333344,
"Shutter Blue":0x666e7f,
"Shutter Copper":0xbb6622,
"Shutter Grey":0x797f7d,
"Shutterbug":0xbba262,
"Shutters":0x6c705e,
"Shuttle Grey":0x61666b,
"Shy Beige":0xe2ded6,
"Shy Blunt":0xd3d8de,
"Shy Candela":0xd6dde6,
"Shy Cupid":0xf0d6ca,
"Shy Denim":0xd7dadd,
"Shy Girl":0xffd7cf,
"Shy Green":0xe5e8d9,
"Shy Guy Red":0xaa0055,
"Shy Mint":0xe0e4db,
"Shy Moment":0xaaaaff,
"Shy Pink":0xdfd9dc,
"Shy Smile":0xdcbbbe,
"Shy Violet":0xd6c7d6,
"Shylock":0x5ab9a4,
"Shyness":0xf3f3d9,
"Siam":0x686b50,
"Siam Gold":0x896f40,
"Siamese Green":0x9dac79,
"Siamese Kitten":0xefe1d5,
"Siberian Fur":0xeee2d5,
"Siberian Green":0x4e6157,
"<NAME>":0xff44ff,
"Sicilian Villa":0xfcc792,
"S<NAME>":0xc1c6ad,
"Sick Blue":0x502d86,
"Sick Green":0x9db92c,
"Sickly Green":0x94b21c,
"Sickly Yellow":0xd0e429,
"Sidecar":0xe9d9a9,
"Sidekick":0xbfc3ae,
"Sideshow":0xe2c591,
"Sidewalk Chalk Blue":0xdbe9ed,
"Sidewalk Chalk Pink":0xf7ccc4,
"Sidewalk Grey":0x7b8f99,
"Sienna":0xa9561e,
"Sienna Buff":0xcda589,
"<NAME>ust":0xdcc4ac,
"Sienna Ochre":0xde9f83,
"Sienna Red":0xb1635e,
"Sienna Yellow":0xf1d28c,
"Sierra":0x985c41,
"Sierra Foothills":0xa28a67,
"Sierra Madre":0xc2bcae,
"Sierra Redwood":0x924e3c,
"Sierra Sand":0xafa28f,
"Siesta":0xf0c3a7,
"Siesta Dreams":0xc9a480,
"Siesta Rose":0xec7878,
"Siesta Sands":0xf1e6e0,
"Siesta Tan":0xe9d8c8,
"Siesta White":0xcbdadb,
"Sightful":0x76a4a6,
"Sigmarite":0xcaad76,
"Sign of Spring":0xe3ede2,
"Sign of the Crown":0xfce299,
"Signal Green":0x33ff00,
"Signal Grey":0x838684,
"Signal Pink":0xb15384,
"Signal White":0xecece6,
"Signature Blue":0x455371,
"Silence":0xeaede5,
"Silence is Golden":0xc2a06d,
"Silent Breath":0xe9f1ec,
"Silent Breeze":0xc6eaec,
"Silent Delight":0xe5e7e8,
"Silent Film":0x9fa5a5,
"Silent Ivory":0xfef2c7,
"Silent Night":0x526771,
"Silent Ripple":0xabe3de,
"Silent Sage":0x729988,
"Silent Sands":0xa99582,
"Silent Sea":0x3a4a63,
"Silent Smoke":0xdbd7ce,
"Silent Storm":0xc3c7bd,
"Silent Tide":0x7c929a,
"Silent White":0xe5e7e4,
"Silentropae Cloud":0xccbbbb,
"Silhouette":0xcbcdc4,
"Silica Sand":0xede2e0,
"Silicate Green":0x88b2a9,
"Silicate Light Turquoise":0xcddad3,
"Siliceous Red":0x5a3d4a,
"Silicone Seduction":0xebe0ca,
"Silicone Serena":0xdcdccf,
"Silithus Brown":0xd57b65,
"Silk":0xbbada1,
"Silk Chiffon":0xccbfc7,
"Silk Crepe Grey":0x354e4b,
"Silk Crepe Mauve":0x6e7196,
"Silk Dessou":0xeee9dc,
"Silk Elegance":0xf6e8de,
"Silk Gown":0xfceedb,
"Silk Jewel":0x02517a,
"Silk Khimar":0x70939e,
"Silk Lilac":0x9188b5,
"Silk Lining":0xfcefe0,
"Silk Pillow":0xf3f0ea,
"Silk Ribbon":0xc86e8b,
"Silk Road":0x97976f,
"Silk Sails":0xf6eecd,
"Silk Sari":0x009283,
"Silk Sheets":0xefdddf,
"Silk Sox":0xa5b2c7,
"Silk Star":0xf5eec6,
"Silk Stone":0xcc9999,
"Silken Peacock":0x427584,
"Silken Pine":0x495d5a,
"Silken Raspberry":0xaa7d89,
"Silken Tofu":0xfef6d8,
"Silkie Chicken":0xfdefdb,
"Silkworm":0xeeeecc,
"Silky Bamboo":0xeae0cd,
"Silky Green":0xbdc2bb,
"Silky Mint":0xd7ecd9,
"Silky Pink":0xffddf4,
"Silky Tofu":0xfff5e4,
"Silky White":0xeeebe2,
"Silky Yogurt":0xf2f3cd,
"Silly Puddy":0xf4b0bb,
"Silt":0x8a7d72,
"Silt Green":0xa9bdb1,
"Silver":0xc0c0c0,
"Silver Ash":0xdddbd0,
"Silver Bells":0xb8b4b6,
"Silver Birch":0xd2cfc4,
"Silver Bird":0xfbf5f0,
"Silver Blue":0x8a9a9a,
"Silver Blueberry":0x5b7085,
"Silver Bullet":0xb6b5b8,
"Silver Chalice":0xacaea9,
"Silver Charm":0xadb0b4,
"Silver City":0xe2e4e9,
"Silver Cloud":0xbeb7b0,
"Silver Clouds":0xa6aaa2,
"Silver Creek":0xd9dad2,
"Silver Cross":0xcdc5c2,
"Silver Dagger":0xc1c1d1,
"Silver Dollar":0xbdb6ae,
"Silver Drop":0x9ab2a9,
"Silver Dust":0xe8e7e0,
"Silver Feather":0xedebe7,
"Silver Fern":0xe1ddbf,
"Silver Filigree":0x7f7c81,
"Silver Fir Blue":0x7196a2,
"Silver Fox":0xbdbcc4,
"Silver Grass":0xc6cec3,
"Silver Grass Traces":0xdfe4dc,
"Silver Gray":0xb8b2a2,
"Silver Green":0xd7d7c7,
"Silver Grey":0xa8a8a4,
"Silver Hill":0x6d747b,
"Silver Lake":0xdedddd,
"Silver Lake Blue":0x618bb9,
"Silver Laurel":0xd8dcc8,
"Silver Leaf":0x9db7a5,
"Silver Linden Grey":0x859382,
"Silver Lined":0xbbbfc3,
"Silver Lining":0xbdb6ab,
"Silver Lustre":0xa8a798,
"Silver Maple Green":0x71776e,
"Silver Marlin":0xc8c8c0,
"Silver Mauve":0xdbccd3,
"Silver Medal":0xd6d6d6,
"Silver Mine":0xbec2c1,
"Silver Mink":0x9f8d7c,
"Silver Moon":0xd9d7c9,
"Silver Peony":0xe7cfc7,
"Silver Pink":0xdcb1af,
"Silver Polish":0xc6c6c6,
"Silver Rose":0xd29ea6,
"Silver Rust":0xc9a0df,
"Silver Sage":0x938b78,
"Silver Sand":0xbebdb6,
"Silver Sands":0xdadedd,
"Silver Sateen":0xc7c6c0,
"Silver Sconce":0xa19fa5,
"Silver Screen":0xa6aeaa,
"Silver Service":0xb2aaaa,
"Silver Setting":0xd8dadb,
"Silver Shadow":0xd8dad8,
"Silver Skate":0x87a1b1,
"Silver Sky":0xeaece9,
"Silver Snippet":0x8e9090,
"Silver Spoon":0xd3d3d2,
"Silver Springs":0xb7bdc4,
"Silver Spruce":0xcadfdd,
"Silver Star":0x98a0b8,
"Silver Storm":0x8599a8,
"Silver Strand":0xb8c7ce,
"Silver Strand Beach":0xcacdca,
"Silver Strawberry":0xf2c1c0,
"Silver Surfer":0x7e7d88,
"Silver Sweetpea":0xc4c9e2,
"Silver Thistle Beige":0xe7d5c5,
"Silver Tinsel":0xb6b3a9,
"Silver Tipped Sage":0xbfc2bf,
"Silver Tradition":0xd9d9d3,
"Silver Tree":0x67be90,
"Silver Willow Green":0x637c5b,
"Silverado":0x6a6472,
"Silverado Ranch":0xa7a89b,
"Silverado Trail":0xb7bbc6,
"Silverbeet":0x5a6a43,
"Silverberry":0xbebbc9,
"Silverfish":0x8d95aa,
"Silvermist":0xb0b8b2,
"Silverpine":0x4e6866,
"Silverpine Cyan":0x8ae8ff,
"Silverplate":0xc2c0ba,
"Silverpointe":0xd1d2cb,
"Silverstone":0xb1b3b3,
"Silverton":0xbfd9ce,
"Silverware":0xb8b8bf,
"Silvery Moon":0xe6e5dc,
"Silvery Streak":0xd5dbd5,
"Simmered Seaweed":0x4c3d30,
"Simmering Ridge":0xcb9281,
"Simmering Smoke":0xa99f96,
"Simple Pink":0xf9a3aa,
"Simple Serenity":0xc8d9e5,
"Simple Silhouette":0x7a716e,
"Simple Stone":0xcdc7b7,
"Simple White":0xdfd9d2,
"Simplicity":0xced0db,
"Simplify Beige":0xd6c7b9,
"Simply Blue":0xadbbc9,
"Simply Delicious":0xffd2c1,
"Simply Elegant":0xcedde7,
"Simply Green":0x009b75,
"Simply Peachy":0xffc06c,
"Simply Posh":0x8cb9d4,
"Simply Sage":0xa7a996,
"Simply Sparkling":0xb0c5e0,
"Simply Taupe":0xad9f93,
"Simply Violet":0xa6a1d7,
"Simpson Surprise":0x82856d,
"Simpsons Yellow":0xffd90f,
"Sin City":0xcfa236,
"Sinatra":0x4675b7,
"Sinbad":0xa6d5d0,
"Sinful":0x645059,
"Singapore Orchid":0xa020f0,
"Singing Blue":0x0074a4,
"Singing in the Rain":0x8e9c98,
"Singing the Blues":0x2b4d68,
"Sinister":0x12110e,
"Sinister Minister":0x353331,
"Sinister Mood":0xa89c94,
"Siniy Blue":0x4c4dff,
"Sinkhole":0x49716d,
"Sinking Sand":0xd8b778,
"Sinner's City":0xfee5cb,
"Sinoper Red":0xbb1111,
"Sinopia":0xcb410b,
"Sip of Mint":0xdedfc9,
"Sip of Nut Milk":0xeae2df,
"Sir Edmund":0x20415d,
"Siren":0x69293b,
"Sirocco":0x68766e,
"Sis Kebab":0x884411,
"Sisal":0xc5baa0,
"Siskin Green":0xc8c76f,
"Siskin Sprout":0x7a942e,
"Site White":0xdcdedc,
"Sitter Red":0x3c2233,
"Sixteen Million Pink":0xfd02ff,
"Sixties Blue":0x0079a9,
"Siyâh Black":0x1c1b1a,
"Sizzling Hot":0xa36956,
"Sizzling Red":0xff3855,
"Sizzling Sunrise":0xffdb00,
"Sizzling Sunset":0xeb7e4d,
"Skarsnik Green":0x5f9370,
"Skavenblight Dinge":0x47413b,
"Skeleton":0xebdecc,
"Skeleton Bone":0xf4ebbc,
"Skeletor's Cape":0x773399,
"Skeptic":0x9db4aa,
"Ski Patrol":0xbb1237,
"Ski Slope":0xe1e5e3,
"Skilandis":0x41332f,
"Skimmed Milk White":0xfeffe3,
"Skin Tone":0xdecaae,
"Skink Blue":0x5cbfce,
"Skinny Dip":0xf9dbd2,
"Skinny Jeans":0x5588ff,
"Skipper":0x748796,
"Skipper Blue":0x484a72,
"Skipping Rocks":0xd1d0c9,
"Skipping Stone":0xd0cbb6,
"Skirret Green":0x51b73b,
"Skobeloff":0x007474,
"Skrag Brown":0xb04e0f,
"Skull":0xe3dac9,
"Skullcrusher Brass":0xf1c78e,
"Skullfire":0xf9f5da,
"Sky":0x76d6ff,
"Sky Babe":0x88c1d8,
"Sky Blue":0x9fb9e2,
"Sky Blue Pink":0xdcbfe1,
"Sky Bus":0x99c1d6,
"Sky Captain":0x262934,
"Sky Chase":0xa5cad1,
"Sky City":0xa0bdd9,
"Sky Cloud":0xaddee5,
"Sky Dancer":0x4499ff,
"Sky Eyes":0x8eaabd,
"Sky Fall":0x89c6df,
"Sky Glass":0xd1dcd8,
"Sky Grey":0xbcc8c6,
"Sky High":0xa7c2eb,
"Sky Light View":0xcadade,
"Sky Lodge":0x546977,
"Sky Magenta":0xcf71af,
"Sky of Magritte":0x0099ff,
"Sky of Ocean":0x82cde5,
"Sky Pilot":0xa2bad4,
"Sky Splash":0xc9d3d3,
"Sky Wanderer":0xb8dced,
"Sky Watch":0x8acfd6,
"Sky's the Limit":0xbbcee0,
"Skyan":0x66ccff,
"Skydiver":0x83acd3,
"Skydiving":0xc6d6d7,
"Skydome":0x38a3cc,
"Skylark":0xc1e4f0,
"Skylight":0xc8e0e0,
"Skyline":0x959eb7,
"Skyline Steel":0xb9c0c3,
"Skylla":0x1f7cc2,
"Skysail Blue":0x818db3,
"Skyscraper":0xd3dbe2,
"Skywalker":0xc1deea,
"Skyway":0xadbed3,
"Slaanesh Grey":0xdbd5e6,
"Slap Happy":0xc9cc4a,
"Slate":0x516572,
"Slate Black":0x4b3d33,
"Slate Blue":0x5b7c99,
"Slate Brown":0xa0987c,
"Slate Green":0x658d6d,
"Slate Grey":0x59656d,
"Slate Mauve":0x625c63,
"Slate Pebble":0xb4ada9,
"Slate Pink":0xb3586c,
"Slate Rock":0x868988,
"Slate Rose":0xb45865,
"Slate Stone":0xacb4ac,
"Slate Tile":0x606e74,
"Slate Tint":0x7a818d,
"Slate Violet":0x989192,
"Slate Wall":0x40535d,
"Sled":0x4c5055,
"Sleek White":0xfaf6e9,
"Sleep":0x4579ac,
"Sleep Baby Sleep":0xbed1e1,
"Sleeping Easy":0x98bddd,
"Sleeping Giant":0x786d5e,
"Sleepy Blue":0xbccbce,
"Sleepy Hollow":0xb7c9d1,
"Sleepy Owlet":0xb5a78d,
"Sleet":0x92949b,
"Slender Reed":0xdec29f,
"Slice of Heaven":0x0022ee,
"Slice of Watermelon":0xe1697c,
"Sliced Cucumber":0xcccfbf,
"Slices of Happy":0xede5bc,
"Slick Blue":0x73ccd8,
"Slick Green":0x615d4c,
"Slick Mud":0xa66e49,
"Sliding":0x97aeaf,
"Slight Mushroom":0xcfc9c5,
"Slightly Golden":0xcb904e,
"Slightly Peach":0xf1ddd8,
"Slightly Rose":0xe6cfce,
"Slightly Spritzig":0x92d2ed,
"Slightly Zen":0xdce4dd,
"Slime":0xa7c408,
"Slime Lime":0xb8ebc5,
"Slimer Green":0xaadd00,
"Slimy Green":0x7ded17,
"Slipper Satin":0xbfc1cb,
"Slippery Moss":0xbeba82,
"Slippery Salmon":0xf87e63,
"Slippery Shale":0x7b766c,
"Slippery Soap":0xefedd8,
"Slippery Stone":0x8d6a4a,
"Slippery Tub":0xd5f3ec,
"Slopes":0xd2b698,
"Slow Dance":0xdbdcc4,
"Slow Green":0xc6d5c9,
"Slow Perch":0xd5d4ce,
"Slubbed Silk":0xe1c2be,
"Sludge":0xca6b02,
"Slugger":0x42342b,
"Slumber":0x2d517c,
"Slumber Sloth":0xcdc5b5,
"Sly Fox":0x804741,
"Sly Shrimp":0xf8e2d9,
"Smallmouth Bass":0xac9a7e,
"Smalt":0x003399,
"Smalt Blue":0x496267,
"Smaragdine":0x4a9976,
"Smart White":0xf6f3ec,
"Smashed Grape":0x8775a1,
"Smashed Potatoes":0xe2d0b9,
"Smashed Pumpkin":0xff6d3a,
"Smashing Pumpkins":0xff5522,
"Smell of Garlic":0xd9ddcb,
"Smell of Lavender":0xdce0ea,
"Smell the Roses":0xbb7283,
"Smells of Fresh Bread":0xd7cecd,
"Smiley Face":0xffc962,
"Smitten":0xc84186,
"Smock Blue":0x3b646c,
"Smoke":0xbfc8c3,
"Smoke & Ash":0x939789,
"Smoke and Mirrors":0xd9e6e8,
"Smoke Blue":0x6688bb,
"Smoke Bush":0xcc7788,
"Smoke Bush Rose":0xad8177,
"Smoke Cloud":0xadb6b9,
"Smoke Dragon":0xccbbaa,
"Smoke Green":0xa8bba2,
"Smoke Grey":0xcebaa8,
"Smoke Pine":0x3e6257,
"Smoke Screen":0xaeaeae,
"Smoke Tree":0xbb5f34,
"Smoked Amethyst":0x5a4351,
"Smoked Black Coffee":0x3b2f2f,
"Smoked Claret":0x583a39,
"Smoked Flamingo":0x674244,
"Smoked Lavender":0xceb5b3,
"Smoked Mauve":0xa89c97,
"Smoked Mulberry":0x725f6c,
"Smoked Oak Brown":0x573f16,
"Smoked Oyster":0xd9d2cd,
"Smoked Paprika":0x6e362c,
"Smoked Pearl":0x656466,
"Smoked Purple":0x444251,
"Smoked Salmon":0xfa8072,
"Smoked Silver":0xddbbcc,
"Smoked Tan":0xaea494,
"Smoked Umber":0xd0c6bd,
"Smokehouse":0x716354,
"Smokescreen":0x5e5755,
"Smokestack":0xbeb2a5,
"Smokey Blue":0x647b84,
"Smokey Claret":0x88716d,
"Smokey Cream":0xe9dfd5,
"Smokey Lilac":0x9a9da2,
"Smokey Pink":0xcebdb4,
"Smokey Slate":0xa5b5ac,
"Smokey Tan":0x9f8c7c,
"Smokey Topaz":0xa57b5b,
"Smokey Wings":0xa7a5a3,
"Smokin Hot":0x954a3d,
"Smoking Mirror":0xa29587,
"Smoking Night Blue":0x43454c,
"Smoking Red":0x992200,
"Smoky":0x605d6b,
"Smoky Azurite":0x708d9e,
"Smoky Beige":0xb9a796,
"Smoky Black":0x100c08,
"Smoky Blue":0x7196a6,
"Smoky Day":0xa49e93,
"Smoky Emerald":0x4c726b,
"Smoky Forest":0x817d68,
"Smoky Grape":0x9b8fa6,
"Smoky Grey Green":0x939087,
"Smoky Mauve":0x998ba5,
"Smoky Mountain":0xafa8a9,
"Smoky Orchid":0xe1d9dc,
"Smoky Pink":0xbb8d88,
"Smoky Quartz":0x51484f,
"Smoky Salmon":0xe2b6a7,
"Smoky Slate":0xa1a18f,
"Smoky Sunrise":0xaa9793,
"Smoky Tone":0x9d9e9d,
"Smoky Topaz":0x7e7668,
"Smoky Trout":0x857d72,
"Smoky White":0xaeada3,
"Smoky Wings":0xb2aca9,
"Smoldering Copper":0xaa6e4b,
"Smooth As Corn Silk":0xf4e4b3,
"Smooth Beech":0xd3bb96,
"Smooth Coffee":0x5d4e4c,
"Smooth Satin":0xa2d5d3,
"Smooth Silk":0xf6ead2,
"Smooth Stone":0xbcb6b3,
"Smooth-Hound Shark":0x97b2b1,
"Smoothie Green":0x988e01,
"Smudged Lips":0xee4466,
"Snail Trail Silver":0xe9eeeb,
"Snake Eyes":0xe9cb4c,
"Snake Fruit":0xdb2217,
"Snake River":0x45698c,
"Snakebite":0xbb4444,
"Snakebite Leather":0xbaa208,
"Snakes in the Grass":0x889717,
"Snap Pea Green":0x8a8650,
"Snap-Shot":0x2b3e52,
"Snapdragon":0xfed777,
"Snappy Happy":0xeb8239,
"Snappy Violet":0xcc0088,
"Snarky Mint":0x9ae37d,
"Sneaky Sesame":0x896a46,
"Sneezy":0x9d7938,
"Snip of Parsley":0x718854,
"Snip of Tannin":0xdccebb,
"Snobby Shore":0xdd7733,
"Snoop":0x49556c,
"Snorkel Blue":0x034f84,
"Snorkel Sea":0x004f7d,
"Snorlax":0x222277,
"Snot":0xacbb0d,
"Snot Green":0x9dc100,
"Snow":0xfffafa,
"Snow Ballet":0xdef1e7,
"Snow Cloud":0xe5e9eb,
"Snow Crystal Green":0xe4f0e8,
"Snow Day":0xf7f5ed,
"Snow Drift":0xe3e3dc,
"Snow Fall":0xf3f2eb,
"Snow Flurry":0xeaf7c9,
"Snow Globe":0xf4f2e9,
"Snow Goose":0xc3d9cb,
"Snow Green":0xc8dac2,
"Snow Leopard":0xcfdfdb,
"Snow Pea":0x6ccc7b,
"Snow Peak":0xe0dcdb,
"Snow Plum":0xf4eaf0,
"Snow | |
<reponame>haiyangxue/fairseq
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections.abc import Iterable
from fairseq import options, utils
import argparse
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
AudioEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
TransformerAudioDecoderLayer,
VGGBlock,
)
import random
from ..data.data_utils import lengths_to_encoder_padding_mask
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model('audio_transformer')
class AudioTransformerModel(AudioEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
@classmethod
def hub_models(cls):
# fmt: off
return {
'transformer.wmt14.en-fr': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2',
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz',
'transformer.wmt19.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz',
'transformer.wmt19.en-ru': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz',
'transformer.wmt19.de-en': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz',
'transformer.wmt19.ru-en': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz',
'transformer.wmt19.en-de.single_model': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz',
'transformer.wmt19.en-ru.single_model': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz',
'transformer.wmt19.de-en.single_model': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz',
'transformer.wmt19.ru-en.single_model': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz',
}
# fmt: on
def __init__(self, encoder,audio_encoder, decoder):
super().__init__(encoder,audio_encoder, decoder)
self.supports_align_args = True
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
parser.add_argument('--layer-wise-attention', default=False, action='store_true',
help='perform layer-wise attention (cross-attention or cross+self-attention)')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
audio_encoder = cls.build_audio_encoder(args)
# encoder=None
# return cls(encoder, decoder)
#
return cls(encoder,audio_encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_audio_encoder(cls, args):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, 'no_cross_attention', False),
)
def prepare_transformer_encoder_params(input_dim, num_heads,ffn_dim,normalize_before,dropout,attention_dropout,relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
# args.attention_dropout = attention_dropout
# args.dropout = dropout
# args.activation_dropout = relu_dropout
args.attention_dropout = 0
args.dropout = 0
args.activation_dropout = 0
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.encoder_layerdrop = args.encoder_layerdrop
self.src_dic=dictionary
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def freeze_module_params(self):
if self is not None:
for p in self.parameters():
p.requires_grad = False
freeze_module_params(self)
def forward_embedding(self, src_tokens):
# embed tokens and positions
embed = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
return x, embed
def forward(self, src_tokens, src_lengths, cls_input=None, return_all_hiddens=False):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False)
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
if self.layer_wise_attention:
return_all_hiddens = True
x, encoder_embedding = self.forward_embedding(src_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
encoder_states = [] if return_all_hiddens else None
# encoder layers
for layer in self.layers:
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not self.training or (dropout_probability > self.encoder_layerdrop):
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
if return_all_hiddens:
encoder_states[-1] = x
# print(encoder_padding_mask)
# exit()
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
'encoder_embedding': encoder_embedding, # B x T x C
'encoder_states': encoder_states, # List[T x B x C]
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if encoder_out.get('encoder_states', None) is not None:
for idx, state in enumerate(encoder_out['encoder_states']):
encoder_out['encoder_states'][idx] = state.index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder | |
<reponame>x22x22/python-ceph-cfg
# Import Python Libs
from __future__ import absolute_import
import logging
import stat
import os.path
import os
import subprocess
# local modules
from . import util_which
from . import utils
from . import model
from . import mdl_updater
from . import presenter
from . import mdl_query
from . import osd
from . import mon
from . import rgw
from . import mds
from . import purger
from . import ops_pool
from . import ops_cephfs
from . import ops_auth
from . import ops_cluster
from . import keyring_use
from . import ops_osd
log = logging.getLogger(__name__)
class Error(Exception):
"""
Error
"""
def __str__(self):
doc = self.__doc__.strip()
return ': '.join([doc] + [str(a) for a in self.args])
def partition_list():
'''
List partitions by disk
'''
m = model.model()
u = mdl_updater.model_updater(m)
u.symlinks_refresh()
u.partitions_all_refresh()
p = presenter.mdl_presentor(m)
return p.partitions_all()
def partition_list_osd():
'''
List all OSD data partitions by partition
'''
m = model.model()
u = mdl_updater.model_updater(m)
u.symlinks_refresh()
u.partitions_all_refresh()
u.discover_partitions_refresh()
p = presenter.mdl_presentor(m)
return p.discover_osd_partitions()
def partition_list_journal():
'''
List all OSD journal partitions by partition
'''
m = model.model()
u = mdl_updater.model_updater(m)
u.symlinks_refresh()
u.partitions_all_refresh()
u.discover_partitions_refresh()
p = presenter.mdl_presentor(m)
return p.discover_journal_partitions()
def osd_discover():
"""
List all OSD by cluster
"""
m = model.model()
u = mdl_updater.model_updater(m)
u.symlinks_refresh()
u.partitions_all_refresh()
u.discover_partitions_refresh()
p = presenter.mdl_presentor(m)
return p.discover_osd()
def partition_is(dev):
"""
Check whether a given device path is a partition or a full disk.
Args:
dev : Block device to test.
"""
mdl = model.model()
osdc = osd.osd_ctrl(mdl)
return osdc.is_partition(dev)
def _update_partition(action, dev, description):
# try to make sure the kernel refreshes the table. note
# that if this gets ebusy, we are probably racing with
# udev because it already updated it.. ignore failure here.
# On RHEL and CentOS distros, calling partprobe forces a reboot of the
# server. Since we are not resizing partitons so we rely on calling
# partx
utils.execute_local_command(
[
util_which.which_partprobe.path,
dev,
],
)
def zap(dev = None, **kwargs):
"""
Destroy the partition table and content of a given disk.
"""
if dev is not None:
log.warning("Depricated use of function, use kwargs")
dev = kwargs.get("dev", dev)
if dev == None:
raise Error('Cannot find', dev)
if not os.path.exists(dev):
raise Error('Cannot find', dev)
dmode = os.stat(dev).st_mode
mdl = model.model(**kwargs)
osdc = osd.osd_ctrl(mdl)
if not stat.S_ISBLK(dmode) or osdc.is_partition(dev):
raise Error('not full block device; cannot zap', dev)
try:
log.debug('Zapping partition table on %s', dev)
# try to wipe out any GPT partition table backups. sgdisk
# isn't too thorough.
lba_size = 4096
size = 33 * lba_size
with open(dev, 'wb') as dev_file:
dev_file.seek(-size, os.SEEK_END)
dev_file.write(size*'\0')
utils.execute_local_command(
[
util_which.which_sgdisk.path,
'--zap-all',
'--',
dev,
],
)
utils.execute_local_command(
[
util_which.which_sgdisk.path,
'--clear',
'--mbrtogpt',
'--',
dev,
],
)
_update_partition('-d', dev, 'zapped')
except subprocess.CalledProcessError as e:
raise Error(e)
return True
def osd_prepare(**kwargs):
"""
prepare an OSD
Args:
**kwargs: Arbitrary keyword arguments.
cluster_uuid : Set the cluster UUID.
journal_dev : Set the journal device. defaults to osd_dev.
cluster_name : Set the cluster name. Defaults to "ceph".
cluster_uuid : Set the cluster date will be added too. Defaults to
the value found in local config.
osd_fs_type : Set the file system to store OSD data with. Defaults
to "xfs".
osd_uuid : Set the OSD data UUID. If set will return if OSD with
data UUID already exists.
journal_uuid : Set the OSD journal UUID. If set will return if OSD
with journal UUID already exists.
"""
return osd.osd_prepare(**kwargs)
def osd_activate(**kwargs):
"""
Activate an OSD
Args:
**kwargs: Arbitrary keyword arguments.
cluster_uuid : Set the cluster UUID.
journal_dev : Set the journal device. defaults to osd_dev.
cluster_name : Set the cluster name. Defaults to "ceph".
cluster_uuid : Set the cluster date will be added too. Defaults to
the value found in local config.
osd_fs_type : Set the file system to store OSD data with. Defaults
to "xfs".
osd_uuid : Set the OSD data UUID. If set will return if OSD with
data UUID already exists.
journal_uuid : Set the OSD journal UUID. If set will return if OSD
with journal UUID already exists.
"""
return osd.osd_activate(**kwargs)
def osd_reweight(**kwargs):
"""
Reweight an OSD, or OSD's on node.
Args:
**kwargs: Arbitrary keyword arguments.
cluster_name : Set the cluster name. Defaults to "ceph".
cluster_uuid : Set the cluster date will be added too. Defaults to
the value found in local config.
osd_number : OSD number to reweight.
weight : The new weight for the node. weight is a float, and must be
in the range 0 to 1.
Note:
Setting the weight to 0 will drain an OSD.
"""
return ops_osd.reweight(**kwargs)
def keyring_create(**kwargs):
"""
Create keyring for cluster
Args:
**kwargs: Arbitrary keyword arguments.
keyring_type : Required parameter. Can be set to: admin, mon, osd,
rgw, mds.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
return keyring_use.keyring_create_type(**kwargs)
def keyring_save(**kwargs):
"""
Create save keyring locally
Args:
**kwargs: Arbitrary keyword arguments.
keyring_type: Required parameter. Can be set to: admin, mon, osd,
rgw, mds
secret: The shared secret in the key
key_content : The complete key including capabilities.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
return keyring_use.keyring_save_type(**kwargs)
def keyring_purge(**kwargs):
"""
Delete keyring for cluster
Args:
**kwargs: Arbitrary keyword arguments.
keyring_type : Required parameter. Can be set to: admin, mon, osd,
rgw, mds
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
return keyring_use.keyring_purge_type(**kwargs)
def keyring_present(**kwargs):
"""
Is keyring on disk
Args:
**kwargs: Arbitrary keyword arguments.
keyring_type : Required parameter. Can be set to: admin, mon, osd,
rgw, mds.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
return keyring_use.keyring_present_type(**kwargs)
def keyring_auth_add(**kwargs):
"""
Add keyring to authorised list
Args:
**kwargs: Arbitrary keyword arguments.
keyring_type : Required parameter. Can be set to: admin, mon, osd,
rgw, mds.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
return keyring_use.keyring_auth_add_type(**kwargs)
def keyring_auth_del(**kwargs):
"""
Remove keyring from authorised list
Args:
**kwargs: Arbitrary keyword arguments.
keyring_type
Required parameter. Can be set to: admin, mon, osd, rgw, mds
cluster_uuid
Set the cluster UUID. Defaults to value found in ceph config file.
cluster_name
Set the cluster name. Defaults to "ceph".
"""
return keyring_use.keyring_auth_del_type(**kwargs)
def keyring_admin_create(**kwargs):
"""
Create admin keyring for cluster
Args:
**kwargs: Arbitrary keyword arguments.
cluster_uuid
Set the cluster UUID. Defaults to value found in ceph config file.
cluster_name
Set the cluster name. Defaults to "ceph".
"""
params = dict(kwargs)
params["keyring_type"] = "admin"
return keyring_create(**params)
def keyring_admin_save(key_content=None, **kwargs):
"""
Write admin keyring for cluster
Args:
**kwargs: Arbitrary keyword arguments.
secret: The shared secret in the key
key_content : The complete key including capabilities.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
params = dict(kwargs)
params["keyring_type"] = "admin"
if key_content is None:
return keyring_save(**params)
log.warning("keyring_admin_save using legacy argument call")
params["key_content"] = str(key_content)
return keyring_save(**params)
def keyring_admin_purge(**kwargs):
"""
Delete Mon keyring for cluster
Args:
**kwargs: Arbitrary keyword arguments.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
params = dict(kwargs)
params["keyring_type"] = "admin"
return keyring_purge(**params)
def keyring_mon_create(**kwargs):
"""
Create mon keyring for cluster
Args:
**kwargs: Arbitrary keyword arguments.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
params = dict(kwargs)
params["keyring_type"] = "mon"
return keyring_create(**params)
def keyring_mon_save(key_content=None, **kwargs):
"""
Write mon keyring for cluster
Args:
key_content : The complete key including capabilities.
**kwargs: Arbitrary keyword arguments.
secret: The shared secret in the key
key_content : The complete key including capabilities.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
params = dict(kwargs)
params["keyring_type"] = "mon"
if | |
:return: a dictionary containing the following keys:
* ``messageid``: id of the message (int)
* ``title``: message's title
* ``body``: message's text
* ``timestamp``: UNIX timestamp (long integer) that specifies when
the message was created.
* ``replyto``: The id of the parent message. String with the format
msg-{id}. Its value can be None.
* ``sender``: The nickname of the message's creator.
* ``editor``: The nickname of the message's editor.
Note that all values in the returned dictionary are string unless
otherwise stated.
'''
message_id = 'msg-' + str(row['message_id'])
message_replyto = 'msg-' + str(row['reply_to']) \
if row['reply_to'] is not None else None
message_sender = row['user_nickname']
message_editor = row['editor_nickname']
message_title = row['title']
message_body = row['body']
message_timestamp = row['timestamp']
message = {'messageid': message_id, 'title': message_title,
'timestamp': message_timestamp, 'replyto': message_replyto,
'body': message_body, 'sender': message_sender,
'editor': message_editor}
return message
def _create_message_list_object(self, row):
'''
Same as :py:meth:`_create_message_object`. However, the resulting
dictionary is targeted to build messages in a list.
:param row: The row obtained from the database.
:type row: sqlite3.Row
:return: a dictionary with the keys ``messageid``, ``title``,
``timestamp`` and ``sender``.
'''
message_id = 'msg-' + str(row['message_id'])
message_sender = row['user_nickname']
message_title = row['title']
message_timestamp = row['timestamp']
message = {'messageid': message_id, 'title': message_title,
'timestamp': message_timestamp, 'sender': message_sender}
return message
#Helpers for users
def _create_user_object(self, row):
'''
It takes a database Row and transform it into a python dictionary.
:param row: The row obtained from the database.
:type row: sqlite3.Row
:return: a dictionary with the following format:
.. code-block:: javascript
{'public_profile':{'registrationdate':,'nickname':'',
'signature':'','avatar':''},
'restricted_profile':{'firstname':'','lastname':'','email':'',
'website':'','mobile':'','skype':'',
'age':'','residence':'','gender':'',
'picture':''}
}
where:
* ``registrationdate``: UNIX timestamp when the user registered in
the system (long integer)
* ``nickname``: nickname of the user
* ``signature``: text chosen by the user for signature
* ``avatar``: name of the image file used as avatar
* ``firstanme``: given name of the user
* ``lastname``: family name of the user
* ``email``: current email of the user.
* ``website``: url with the user's personal page. Can be None
* ``mobile``: string showing the user's phone number. Can be None.
* ``skype``: user's nickname in skype. Can be None.
* ``residence``: complete user's home address.
* ``picture``: file which contains an image of the user.
* ``gender``: User's gender ('male' or 'female').
* ``age``: integer containing the age of the user.
Note that all values are string if they are not otherwise indicated.
'''
reg_date = row['regDate']
return {'public_profile': {'registrationdate': reg_date,
'nickname': row['nickname'],
'signature': row['signature'],
'avatar': row['avatar']},
'restricted_profile': {'firstname': row['firstname'],
'lastname': row['lastname'],
'email': row['email'],
'website': row['website'],
'mobile': row['mobile'],
'skype': row['skype'],
'age': row['age'],
'residence': row['residence'],
'gender': row['gender'],
'picture': row['picture']}
}
def _create_user_list_object(self, row):
'''
Same as :py:meth:`_create_message_object`. However, the resulting
dictionary is targeted to build messages in a list.
:param row: The row obtained from the database.
:type row: sqlite3.Row
:return: a dictionary with the keys ``registrationdate`` and
``nickname``
'''
return {'registrationdate': row['regDate'], 'nickname': row['nickname']}
#API ITSELF
#Message Table API.
def get_message(self, messageid):
'''
Extracts a message from the database.
:param messageid: The id of the message. Note that messageid is a
string with format ``msg-\d{1,3}``.
:return: A dictionary with the format provided in
:py:meth:`_create_message_object` or None if the message with target
id does not exist.
:raises ValueError: when ``messageid`` is not well formed
'''
#Extracts the int which is the id for a message in the database
match = re.match(r'msg-(\d{1,3})', messageid)
if match is None:
raise ValueError("The messageid is malformed")
messageid = int(match.group(1))
#Activate foreign key support
self.set_foreign_keys_support()
#Create the SQL Query
query = 'SELECT * FROM messages WHERE message_id = ?'
#Cursor and row initialization
self.con.row_factory = sqlite3.Row
cur = self.con.cursor()
#Execute main SQL Statement
pvalue = (messageid,)
cur.execute(query, pvalue)
#Process the response.
#Just one row is expected
row = cur.fetchone()
if row is None:
return None
#Build the return object
return self._create_message_object(row)
def get_messages(self, nickname=None, number_of_messages=-1,
before=-1, after=-1):
'''
Return a list of all the messages in the database filtered by the
conditions provided in the parameters.
:param nickname: default None. Search messages of a user with the given
nickname. If this parameter is None, it returns the messages of
any user in the system.
:type nickname: str
:param number_of_messages: default -1. Sets the maximum number of
messages returning in the list. If set to -1, there is no limit.
:type number_of_messages: int
:param before: All timestamps > ``before`` (UNIX timestamp) are removed.
If set to -1, this condition is not applied.
:type before: long
:param after: All timestamps < ``after`` (UNIX timestamp) are removed.
If set to -1, this condition is not applied.
:type after: long
:return: A list of messages. Each message is a dictionary containing
the following keys:
* ``messageid``: string with the format msg-\d{1,3}.Id of the
message.
* ``sender``: nickname of the message's author.
* ``title``: string containing the title of the message.
* ``timestamp``: UNIX timestamp (long int) that specifies when the
message was created.
Note that all values in the returned dictionary are string unless
otherwise stated.
:raises ValueError: if ``before`` or ``after`` are not valid UNIX
timestamps
'''
#Create the SQL Statement build the string depending on the existence
#of nickname, numbero_of_messages, before and after arguments.
query = 'SELECT * FROM messages'
#Nickname restriction
if nickname is not None or before != -1 or after != -1:
query += ' WHERE'
if nickname is not None:
query += " user_nickname = '%s'" % nickname
#Before restriction
if before != -1:
if nickname is not None:
query += ' AND'
query += " timestamp < %s" % str(before)
#After restriction
if after != -1:
if nickname is not None or before != -1:
query += ' AND'
query += " timestamp > %s" % str(after)
#Order of results
query += ' ORDER BY timestamp DESC'
#Limit the number of resulst return
if number_of_messages > -1:
query += ' LIMIT ' + str(number_of_messages)
#Activate foreign key support
self.set_foreign_keys_support()
#Cursor and row initialization
self.con.row_factory = sqlite3.Row
cur = self.con.cursor()
#Execute main SQL Statement
cur.execute(query)
#Get results
rows = cur.fetchall()
if rows is None:
return None
#Build the return object
messages = []
for row in rows:
message = self._create_message_list_object(row)
messages.append(message)
return messages
def delete_message(self, messageid):
'''
Delete the message with id given as parameter.
:param str messageid: id of the message to remove.Note that messageid
is a string with format ``msg-\d{1,3}``
:return: True if the message has been deleted, False otherwise
:raises ValueError: if the messageId has a wrong format.
'''
#Extracts the int which is the id for a message in the database
match = re.match(r'msg-(\d{1,3})', messageid)
if match is None:
raise ValueError("The messageid is malformed")
messageid = int(match.group(1))
'''
#TASK5 TODO:#
* Implement this method.
* HINTS:
* To remove a message use the DELETE sql command
* To check if the message has been previously deleted you can check
the size of the rows returned in the cursor. You can check it from
the attribute cursor.rowcount. If the rowcount is < 1 means that
no row has been deleted and hence you should return False.
Otherwise return True.
* Be sure that you commit the current transaction
* HOW TO TEST: Use the database_api_tests_message. The following tests
must pass without failure or error:
* test_delete_message
* test_delete_message_malformed_id
* test_delete_message_noexisting_id
'''
keys_on = 'PRAGMA foreign_keys = ON'
stmnt = 'DELETE FROM messages WHERE message_id = ?'
#Connects to the database. Gets a connection object
#con = sqlite3.connect(self.db_path)
cur = self.con.cursor()
try:
cur.execute(keys_on)
#execute the statement
pvalue = (messageid,)
cur.execute(stmnt, pvalue)
if cur.rowcount < 1:
print(cur.rowcount)
return False
else :
self.con.commit()
print(cur.rowcount)
return True
except sqlite3.Error as excp:
print("Error %s:" % excp.args[0])
return False
print(cur.rowcount)
return False
def modify_message(self, messageid, title, body, editor="Anonymous"):
'''
Modify the title, the body and the editor of the message with id
``messageid``
:param str messageid: The id of the message to remove. Note that
messageid | |
from animal_avatar.utils.colors import darken
MUZZLES = (
lambda color:
'<path fill="#fff" d="M299.9 307.2c0 35-22.3 63.3-49.9 '
'63.3s-49.9-28.3-49.9-63.3 22.4-46.7 49.9-46.7 49.9 11.8 49.9 46.7z"/>'
'<path fill="#f3252f" d="M250 315l12.3 7.8c0 23-24.2 23.2-24.4.4v-.4l12.1-7.7z"/>'
'<path fill="#15212a" d="M285.2 316c-.8-.3-1.6.2-1.8 1-.9 2.9-3.1 4.8-6.7 5.7A26.5 '
'26.5 0 01254 316c-.5-.5-1-1-1.3-1.6v-7.2c15-1 26.7-11 26.7-22.9 0-12.7-13.2-11.2-29.4-11.2s-29.4-1.5-29.4 '
'11.2c0 12 11.8 21.9 26.8 22.9v7c-.4.7-.9 1.3-1.4 1.8a26.6 26.6 0 '
'01-22.7 6.6c-3.6-1-5.9-2.8-6.7-5.7-.2-.8-1-1.3-1.8-1-.8.2-1.3 1-1 1.8 1 4 '
'4.1 6.6 8.7 7.8a29.9 29.9 0 0025.6-7.3c.8-.8 1.4-1.6 1.9-2.4.5.8 1.1 1.6 1.9 '
'2.3a29.9 29.9 0 0025.6 7.4c4.6-1.2 7.6-3.9 8.8-7.8.2-.8-.3-1.6-1-1.9z"/>',
lambda color:
'<path fill="#fff" d="M324.7 343.4c0 34.5-33.4 29.8-74.7 29.8s-74.7 '
'4.7-74.7-29.8 33.4-62.2 74.7-62.2 74.7 27.8 74.7 62.3z"/>'
'<path fill="#15212a" d="M274.4 312.3c-.9 0-1.5.7-1.5 1.5 0 6-5.6 8.7-10.7 8.7-4.9 '
'0-10.1-2.5-10.6-7.8v-14.3c13-.1 23.3-1.5 23.3-10.1 0-9-11.2-16.2-24.9-16.2s-24.9 7.2-24.9 '
'16.2c0 8.6 10.3 10 23.3 10v14.5c-.5 5.3-5.7 7.7-10.6 7.7-5.1 0-10.6-2.7-10.6-8.7a1.5 1.5 0 '
'10-3 0c0 3.6 1.5 6.7 4.4 8.8 2.4 1.9 5.7 2.9 9.2 2.9 4.9 0 9.8-2.1 12.2-6.1.7 1.2 1.7 2.3 '
'3 3.2 2.4 1.8 5.7 2.8 9.2 2.8 6.8 0 13.7-4 13.7-11.6 0-.8-.7-1.5-1.5-1.5z"/>',
lambda color:
f'<ellipse cx="250" cy="299.3" fill="{darken(color, 30)}" rx="39.7" ry="27.6"/>'
f'<path fill="{darken(color, 60)}" '
f'd="M229.1 315.4c-1.2 1.4-3.3 1.6-5.4.9a8.2 8.2 0 01-4.7-4.3c-1-2-.9-4 .3-5.3 '
f'1.4-1.5 3.7-1.8 5.9-.8a8.3 8.3 0 014.1 3.7c1.1 2 1.1 4.3-.2 5.8zM281 312a8.5 '
f'8.5 0 01-4.7 4.3c-2 .7-4.1.4-5.4-1-1.2-1.4-1.2-3.6-.2-5.6a8.5 8.5 0 014.2-3.8c2.2-1 '
f'4.5-.7 5.8.8 1.2 1.3 1.3 3.4.4 5.4z"/>'
'<path fill="#15212a" d="M281 312a26.6 26.6 0 00-6.1-6.1c-.8.3-1.6.8-2.3 1.4-.8.7-1.4 '
'1.5-1.9 2.4a3 3 0 001 1.5 19 19 0 014.6 5c1.3 2.2 2 4.5 2 7 0 10.2-12.7 18.6-28.3 '
'18.6s-28.3-8.4-28.3-18.7c0-2.4.7-4.7 2-6.8a19 19 0 014.5-5.1c.6-.4 1-1 1-1.6a8.3 8.3 0 '
'00-4-3.7c-.3 0-.6.2-.9.4-2 1.7-3.9 3.6-5.3 5.7a19.5 19.5 0 00-3.6 11.1c0 13.8 15.5 25 '
'34.6 25s34.6-11.2 34.6-25c0-4-1.3-7.7-3.5-11z"/>',
lambda color:
'<ellipse cx="250" cy="315" fill="#fff" rx="73.7" ry="48.3"/>'
'<path fill="#15212a" d="M270 272a20 20 0 01-40.1 0c0-11.1 9-13.7 20-13.7 11.2 0 20.2 2.6 20.2 13.7z"/>'
'<path fill="#f3252f" d="M240.2 306c0 21.3 19.6 21.3 19.6 0l-7.3-3.6-3.4-3.5-3.3 6-5.6 1.1z"/>'
'<path fill="#15212a" d="M270.2 292c-.8-.1-1.5.5-1.6 1.3-.3 5-2.4 8.7-5.6 10.2a7.3 7.3 '
'0 01-7.3-.5c-2.6-1.7-4-4.8-4.2-8.7v-1-1.1c0-.8-.6-1.6-1.4-1.6h-.1-.1c-.9 0-1.5.8-1.4 '
'1.6v2.1c-.2 3.9-1.7 7-4.2 8.7a7.3 7.3 0 01-7.4.5c-3.2-1.5-5.2-5.3-5.5-10.2-.1-.8-.8-1.4-1.6-1.4-.9 '
'0-1.5.8-1.4 1.6.4 6 3 10.7 7.2 12.7a10.4 10.4 0 0010.3-.7c1.8-1.1 3.1-2.8 4-4.8 1 2 2.3 3.7 4 4.8a10.3 '
'10.3 0 0010.4.7c4.2-2 6.9-6.7 7.3-12.7 0-.8-.6-1.5-1.4-1.6z"/>',
lambda color:
'<path fill="#15212a" d="M278.3 286.2a2 2 0 00-2 2c0 9.5-12.2 14.6-24.6 '
'15V290h-3.4v13.2c-12.5-.4-24.6-5.5-24.6-15 0-1-.8-2-2-2s-2 1-2 2c0 6 3.5 11.1 10 14.6a44 44 0 0020.3 4.5c14.6 0 '
'30.3-6 30.3-19 0-1.2-1-2-2-2z"/> '
'<path fill="#15212a" d="M272.4 275c0-6.4-10.1-11.6-22.5-11.6s-22.5 5.2-22.5 11.5c0 2 1.1 4 3 5.7 1 .8 2 1.7 3.2 '
'2.4l8.7 5.8a13.5 13.5 0 0015.2 0l9.7-6.6 1-.7c2.7-1.9 4.2-4.1 4.2-6.6z"/>',
lambda color:
'<path fill="#fff" d="M250 258.3c-33 0-59.9 19.1-59.9 42.7 0 7.6 2.8 14.7 7.7 20.9a53 '
'53 0 007.5 7.4c4.7 3.4 9.5 5 14.2 5 15.2 0 29-15.6 30.5-33.3 2 23.3 25.1 42.7 44.7 '
'28.3 2.6-2.1 5-4.4 7-6.9l.5-.5a33.5 33.5 0 007.6-21c0-23.5-26.8-42.6-59.8-42.6z"/>'
'<path fill="#15212a" d="M282.5 299.6c-.8 0-1.5.7-1.5 1.5 0 2-1.1 3.5-3.4 4.8-5.8 '
'3.3-17 3.3-22.7 0-2.3-1.4-3.4-3-3.4-5v-.1l.4-.4 13.8-10.8c2.3-1.8 1-5.6-2-5.6h-27.4c-3 '
'0-4.3 3.8-2 5.6l13.8 10.8.4.3v.2c0 2-1.1 3.6-3.4 5-5.7 3.3-17 3.3-22.7 0-2.3-1.3-3.4-2.9-3.4-4.8 '
'0-.8-.7-1.5-1.5-1.5-.9 0-1.5.7-1.5 1.5 0 3 1.7 5.6 5 7.5 3.2 1.9 8 2.8 12.6 2.8 4.8 0 9.6-1 '
'13-3 1.5-.8 2.6-1.8 3.4-3 .8 1.2 2 2.2 3.4 3 3.4 2 8.2 3 13 3 4.7 0 9.4-1 12.7-2.8 3.2-1.9 5-4.5 '
'5-7.5 0-.8-.7-1.5-1.6-1.5z"/>',
lambda color:
'<path fill="#15212a" d="M288.6 308.7a2.8 2.8 0 00-3.5-1.4l-18.5 '
'7c0-.2-.1-.3-.3-.3-6.6-3.8-11.5-11.6-14.8-23.8 9.6 0 17 .7 17-9a18.5 18.5 0 '
'10-37 0c0 9.7 7.5 9 17 9v.3c3.3 12.3 8 20.5 14.7 25-12 4.5-21.4 8.3-19 8.5 0 '
'0 30.7-5.2 39.8-8.7 5.4-2.1 5.4-4.8 4.6-6.6z"/>',
lambda color:
'<path fill="#fff" d="M250 328v25.9c-8.2.6-16.5 1-25 .4a89 89 0 '
'01-14.1-2c-4.8-1-9.5-2.6-14.1-5a34.6 34.6 0 01-18.4-23.7c-1-4.7-1.2-9.4-1-13.9.3-4.5 '
'1-8.8 2-13a2.7 2.7 0 015.3.6c-.2 8.2.6 16.5 3.6 23.2 1.5 3.3 3.5 6.1 6 8.2 2.5 2 '
'5.3 3.5 8.6 4.4 6.5 1.8 14 1.7 21.6.6 7.6-1 15.3-3 23-5.3.8-.2 1.7-.4 2.5-.4z"/>'
'<path fill="#fff" d="M250 328v25.9c8.2.6 16.5 1 25 .4a89 89 0 0014.1-2c4.8-1 9.6-2.6 '
'14.1-5a34.6 34.6 0 0018.4-23.7c1-4.7 1.2-9.4 1-13.9-.3-4.5-1-8.8-2-13a2.7 2.7 0 '
'00-5.3.6c.2 8.2-.6 16.5-3.6 23.2a22.8 22.8 0 01-6 8.2c-2.5 2-5.3 3.5-8.6 4.4-6.5 1.8-14 '
'1.7-21.6.6-7.6-1-15.3-3-23-5.3-.8-.2-1.7-.4-2.5-.4z"/>'
f'<path fill="{darken(color, 30)}" d="M292.8 330.6c0 1.2 0 2.3-.2 3.4-2.2 16.6-20.4 '
f'24.4-42.6 24.4-22 0-40.3-7.7-42.6-24.2a25.2 25.2 0 01-.2-3.6v-2c1.4-17.2 20-44.2 '
f'42.8-44.2 22.9 0 41.6 27.2 42.8 44.5v1.7z"/>'
'<circle fill="#15212a" cx="270.6" cy="335.6" r="8.1"/>'
'<circle fill="#15212a" cx="229.4" cy="335.6" r="8.1"/>',
lambda color:
'<path fill="#fff" d="M210.3 362L193 359a3.2 3.2 0 01-2.6-3.4l1.1-18c.1-1.9 2-3.3 3.8-3l22 4c2 .4 3.2 2.5 2.5 '
'4.3l-6 17.1c-.4 1.5-2 2.4-3.6 2.1zM289.8 362L307 359a3.2 3.2 0 002.6-3.4l-1.1-18c-.1-1.9-2-3.3-3.8-3l-22 4c-2 '
'.4-3.2 2.5-2.5 4.3l6 17.1c.4 1.5 2 2.4 3.6 2.1z"/> '
f'<path fill="{darken(color, 30)}" d="M275.7 273.1c-9.4 0-18.2-1.5-25.7 2-7.5-3.5-16.3-2-25.7-2-27.9 0-50.5 '
'17.4-50.5 39 0 21.4 22.6 38.8 50.5 38.8 9.4 0 18.2-6.8 25.7-10.2 7.5 3.4 16.3 10.2 25.7 10.2 27.9 0 50.5-17.4 '
'50.5-38.9s-22.6-38.9-50.5-38.9z"/> '
'<path fill="#15212a" d="M219.9 307.1c-.9 0-1.7 0-2.6-.2a4 4 0 01.5-8h2.1c2.6 0 4.6-1 5.5-5.8a4 4 0 014.7-3.2 4 '
'4 0 013.2 4.7c-1.5 8-6.3 12.5-13.4 12.5zM281.1 307.1c.9 0 1.7 0 2.6-.2a4 4 0 00-.5-8h-2.1c-2.6 0-4.6-1-5.5-5.8a4 '
'4 0 00-4.7-3.2 4 4 0 00-3.2 4.7c1.5 8 6.3 12.5 13.4 12.5z"/> ',
lambda color:
'<path fill="#15212a" d="M286 305.4c3-.6 5.7-.6 8.2-.1-5 17.3-18.5 26.5-42 28.8V318c0-8.2 4.9-15.7 12.5-18.6 '
'10-3.8 16.6-11.6 17.3-21.3.5-7.8-5.3-14.8-13.2-15a13.5 13.5 0 00-13.6 11.1c-2.6 1-7.8 1-10.4 0a13.5 13.5 0 '
'00-13.7-11.1c-7.6.2-13.5 7-13 14.6.3 8.1 4.8 15.2 12.5 19.5 1.3.8 2.8 1.5 4.3 2.1a20 20 0 0112.9 '
'18.7v20.4c27.7-1.6 44.5-12 50.3-32 2.3 1 4.5 2.5 6.6 4.6-4-8.2-10.4-9.4-18.7-5.6z"/> ',
lambda color:
'<path fill="#15212a" d="M249.8 294.6s-16.6-.3-16.6 4.9 11.6 18.9 16.6 18.9 16.7-14 16.7-18.9c0-5-16.7-5-16.7-5z"/>'
'<path fill="#15212a" d="M296.2 323l-.1.4a24 24 0 01-3.5 7.6 21.3 21.3 0 01-6 5.6 25 25 0 '
'01-30.3-3.2c-1.8-2-3.1-4.4-3.8-7-.8-2.5-.9-5.2-.6-7.8l.2-1.5-1.5-.2v-.2l-.8.1h-.7v.1l-1.5.2.1 1.5c.4 2.6.3 '
'5.3-.5 7.9-.7 2.5-2 5-3.8 6.9a25 25 0 01-30.3 3.3 21.2 21.2 0 01-6-5.7 24 24 0 01-3.5-7.6l-.1-.5-1 .3.2.4a25 25 '
'0 003.4 8c1.6 2.5 3.7 4.6 6.2 6.3 4.9 3.3 11 4.7 16.8 4.3a25 25 0 0016.1-7.1c2.1-2.2 3.7-4.9 4.6-7.8 1 3 2.6 5.6 '
'4.7 7.8a25 25 0 0016 7c6 .5 12-1 17-4.2a22.6 22.6 0 009.5-14.3l.1-.5-.9-.2z"/> '
'<path fill="#15212a" d="M148.9 311.9l.2-2 50.2 6.4-.2 2zM149.9 332.7l49.1-6.2.2 2-49 6.1zM160 357.1l40-21.8.9 '
'1.8-40 21.8zM304.2 316.3l50.2-6.4.2 2-50.2 6.4zM304.2 328.5l.3-2 49 '
'6.2-.2 2zM302.5 337l1-1.7 39.9 21.8-1 1.8z"/> ',
lambda color:
'<path fill="#15212a" d="M250 278.7c-15.5 0-21.2 4.1-21.2 12.9 0 8.7 21.2 21.8 21.2 21.8s21.2-12.5 '
'21.2-21.8-5.7-12.9-21.2-12.9z"/> '
'<path fill="#15212a" d="M319.2 321c-.4-.9-1.4-1.3-2.4-1-21 9-42.8 13.5-65 13.5v-25.8a1.8 1.8 0 10-3.6 '
'0v25.8c-21.3-.4-43-5-65.1-13.4-1-.4-2 0-2.3 1-.4.9 0 2 1 2.3a193.4 193.4 0 0068.7 13.7h1c22.8 0 45.1-4.6 '
'66.7-13.7 1-.4 1.4-1.5 1-2.4z"/> ',
lambda color:
'<path fill="#15212a" d="M277.6 291.26c0 9.56-12.36 21.7-27.6 21.7s-27.6-12.14-27.6-21.7 12.36-14.83 27.6-14.83 '
'27.6 5.28 27.6 14.83zM219.24 307.93c-4.98-1.74-10.12-3-15.35-3.67a69.16 69.16 0 0 0-15.82-.22c-10.56 1.04-20.83 '
'4.51-30.4 9.27l-.35-.68c9.45-5.11 19.73-9.01 30.53-10.46 5.39-.73 10.87-.85 16.3-.34 5.44.51 10.81 1.63 16.03 '
'3.25l-.94 2.85zM219.98 312.52c-9.35 1.47-18.46 4.57-26.91 8.99-8.47 4.42-16.28 10.1-23.44 '
'16.51l-.36-.39c6.9-6.73 14.53-12.8 22.95-17.67 8.4-4.87 17.62-8.46 27.23-10.4l.53 2.96zM222.41 318.16a45.94 '
'45.94 0 0 0-10.58 6.51c-3.22 2.65-6.07 5.75-8.52 9.17-4.91 6.84-8.2 14.81-10.48 23.01l-.64-.16c1.92-8.34 '
'4.86-16.58 9.61-23.88 2.37-3.64 5.19-7.02 8.45-9.96 3.25-2.94 6.93-5.43 10.87-7.4l1.29 2.71zM232.69 317.9c-6.32 '
'5.69-11.5 12.7-14.8 20.62-3.35 7.91-4.8 16.59-4.95 25.25h-.47c-.27-8.71.76-17.58 3.8-25.89 1.51-4.15 3.5-8.14 '
'5.93-11.86 2.42-3.72 5.27-7.17 8.42-10.31l2.07 2.19z"/> '
'<path fill="#15212a" d="M280.76 307.93c4.98-1.74 10.12-3 15.35-3.67a69.16 69.16 0 0 1 15.82-.22c10.56 1.04 20.83 '
'4.51 30.4 9.27l.35-.68c-9.45-5.11-19.73-9.01-30.53-10.46-5.39-.73-10.87-.85-16.3-.34-5.44.51-10.81 1.63-16.03 '
'3.25l.94 2.85zM280.02 312.52c9.35 1.47 18.46 4.57 26.91 8.99 8.47 4.42 16.28 10.1 23.44 '
'16.51l.36-.39c-6.9-6.73-14.53-12.8-22.95-17.67-8.4-4.87-17.62-8.46-27.23-10.4l-.53 2.96zM277.59 318.16a45.94 '
'45.94 0 0 1 10.58 6.51c3.22 2.65 6.07 5.75 8.52 9.17 4.91 6.84 8.2 14.81 10.48 '
'23.01l.64-.16c-1.92-8.34-4.86-16.58-9.61-23.88-2.37-3.64-5.19-7.02-8.45-9.96a48.654 48.654 0 0 0-10.87-7.4l-1.29 '
'2.71zM267.31 317.9c6.32 5.69 11.5 12.7 14.8 20.62 3.35 7.91 4.8 16.59 4.95 '
'25.25h.47c.27-8.71-.76-17.58-3.8-25.89-1.51-4.15-3.5-8.14-5.93-11.86-2.42-3.72-5.27-7.17-8.42-10.31l-2.07 '
'2.19z"/> ',
lambda color:
'<path fill="#15212a" d="M249.23 293.56c-24.54-7.26-40.9-32.68 1.34-33.81 40.65 1.14 25.14 26.93 1.34 '
'33.82-.87.25-1.81.24-2.68-.01z"/> '
'<path fill="#15212a" d="M288.25 294.99c-7.94 7.07-15.74 12.03-23.51 14.78a42.65 42.65 0 0 1-15.01 2.74 41.63 '
'41.63 0 0 1-14.75-2.68c-7.73-2.74-15.47-7.7-23.24-14.88a1.62 1.62 0 0 1-.1-2.31 1.67 1.67 0 0 1 2.35-.1c7.78 '
'7.15 15.41 12.03 23.02 14.58 1.19.4 2.37.74 3.56 1.03a38.47 38.47 0 0 0 18.35 0c1.15-.28 2.3-.6 3.46-.99 '
'7.74-2.54 15.59-7.43 23.65-14.65a1.7 1.7 0 0 1 2.35.13c.59.69.56 1.75-.13 2.35z"/> ',
lambda color:
f'<path fill="{darken(color, 30)}" d="M316.5 341.28c-6.67 6.12-14.33 9.99-21.58 11.43l-.36.24-1.63 | |
<reponame>JHP4911/JINA
import os
import numpy as np
import pytest
from jina import Flow
from jina.enums import SocketType, FlowBuildLevel
from jina.excepts import RuntimeFailToStart
from jina.executors import BaseExecutor
from jina.helper import random_identity
from jina.proto.jina_pb2 import DocumentProto
from jina.types.request import Response
from tests import random_docs, rm_files
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_with_jump():
def _validate(f):
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r4']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r5']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r6']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r8']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r9']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r10']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_BIND
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
f = (Flow().add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.add(name='r4', needs='r2')
.add(name='r5', needs='r3')
.add(name='r6', needs='r4')
.add(name='r8', needs='r6')
.add(name='r9', needs='r5')
.add(name='r10', needs=['r9', 'r8']))
with f:
_validate(f)
f.save_config('tmp.yml')
Flow.load_config('tmp.yml')
with Flow.load_config('tmp.yml') as f:
_validate(f)
rm_files(['tmp.yml'])
@pytest.mark.parametrize('restful', [False, True])
def test_simple_flow(restful):
bytes_gen = (b'aaa' for _ in range(10))
def bytes_fn():
for _ in range(100):
yield b'aaa'
f = Flow(restful=restful).add()
with f:
f.index(input_fn=bytes_gen)
with f:
f.index(input_fn=bytes_fn)
with f:
f.index(input_fn=bytes_fn)
f.index(input_fn=bytes_fn)
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['pod0']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_BIND
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_flow_identical():
with open(os.path.join(cur_dir, '../yaml/test-flow.yml')) as fp:
a = Flow.load_config(fp)
b = (Flow()
.add(name='chunk_seg', parallel=3)
.add(name='wqncode1', parallel=2)
.add(name='encode2', parallel=2, needs='chunk_seg')
.join(['wqncode1', 'encode2']))
a.save_config('test2.yml')
c = Flow.load_config('test2.yml')
assert a == b
assert a == c
with a as f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['chunk_seg']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.head_args.socket_out == SocketType.ROUTER_BIND
for arg in node.peas_args['peas']:
assert arg.socket_in == SocketType.DEALER_CONNECT
assert arg.socket_out == SocketType.PUSH_CONNECT
assert node.tail_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['wqncode1']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.head_args.socket_out == SocketType.ROUTER_BIND
for arg in node.peas_args['peas']:
assert arg.socket_in == SocketType.DEALER_CONNECT
assert arg.socket_out == SocketType.PUSH_CONNECT
assert node.tail_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['encode2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.head_args.socket_out == SocketType.ROUTER_BIND
for arg in node.peas_args['peas']:
assert arg.socket_in == SocketType.DEALER_CONNECT
assert arg.socket_out == SocketType.PUSH_CONNECT
assert node.tail_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
rm_files(['test2.yml'])
@pytest.mark.parametrize('restful', [False, True])
def test_flow_no_container(restful):
f = (Flow(restful=restful)
.add(name='dummyEncoder', uses=os.path.join(cur_dir, '../mwu-encoder/mwu_encoder.yml')))
with f:
f.index(input_fn=random_docs(10))
def test_shards():
f = Flow().add(name='doc_pb', uses=os.path.join(cur_dir, '../yaml/test-docpb.yml'), parallel=3)
with f:
f.index(input_fn=random_docs(1000), random_doc_id=False)
with f:
pass
rm_files(['test-docshard-tmp'])
def test_py_client():
f = (Flow().add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.add(name='r4', needs='r2')
.add(name='r5', needs='r3')
.add(name='r6', needs='r4')
.add(name='r8', needs='r6')
.add(name='r9', needs='r5')
.add(name='r10', needs=['r9', 'r8']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r4']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r5']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r6']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r8']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r9']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r10']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_BIND
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_dry_run_with_two_pathways_diverging_at_gateway():
f = (Flow()
.add(name='r2')
.add(name='r3', needs='gateway')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_dry_run_with_two_pathways_diverging_at_non_gateway():
f = (Flow()
.add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_refactor_num_part():
f = (Flow()
.add(name='r1', uses='_logforward', needs='gateway')
.add(name='r2', uses='_logforward', needs='gateway')
.join(['r1', 'r2']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_refactor_num_part_proxy():
f = (Flow()
.add(name='r1', uses='_logforward')
.add(name='r2', uses='_logforward', needs='r1')
.add(name='r3', uses='_logforward', needs='r1')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
@pytest.mark.parametrize('restful', [False, True])
def test_refactor_num_part_proxy_2(restful):
f = (Flow(restful=restful)
.add(name='r1', uses='_logforward')
.add(name='r2', uses='_logforward', needs='r1', parallel=2)
.add(name='r3', uses='_logforward', needs='r1', parallel=3, polling='ALL')
.needs(['r2', 'r3']))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
@pytest.mark.parametrize('restful', [False, True])
def test_refactor_num_part_2(restful):
f = (Flow(restful=restful)
.add(name='r1', uses='_logforward', needs='gateway', parallel=3, polling='ALL'))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
f = (Flow(restful=restful)
.add(name='r1', uses='_logforward', needs='gateway', parallel=3))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
@pytest.mark.parametrize('restful', [False, True])
def test_index_text_files(mocker, restful):
def validate(req):
assert len(req.docs) > 0
for d in req.docs:
assert d.text
response_mock = mocker.Mock(wrap=validate)
f = (Flow(restful=restful, read_only=True)
.add(uses=os.path.join(cur_dir, '../yaml/datauriindex.yml'), timeout_ready=-1))
with f:
f.index_files('*.py', on_done=response_mock, callback_on='body')
rm_files(['doc.gzip'])
response_mock.assert_called()
# TODO(Deepankar): Gets stuck when `restful: True` - issues with `needs='gateway'`
@pytest.mark.parametrize('restful', [False])
def test_flow_with_publish_driver(mocker, restful):
def validate(req):
for d in req.docs:
assert d.embedding is not None
response_mock = mocker.Mock(wrap=validate)
f = (Flow(restful=restful)
.add(name='r2', uses='!OneHotTextEncoder')
.add(name='r3', uses='!OneHotTextEncoder', needs='gateway')
.join(needs=['r2', 'r3']))
with f:
f.index_lines(lines=['text_1', 'text_2'], on_done=response_mock)
response_mock.assert_called()
@pytest.mark.parametrize('restful', [False, True])
def test_flow_with_modalitys_simple(mocker, restful):
def validate(req):
for d in req.index.docs:
assert d.modality in ['mode1', 'mode2']
def input_fn():
doc1 = DocumentProto()
doc1.modality = 'mode1'
doc2 = DocumentProto()
doc2.modality = 'mode2'
doc3 = DocumentProto()
doc3.modality = 'mode1'
return [doc1, doc2, doc3]
response_mock = mocker.Mock(wrap=validate)
flow = (Flow(restful=restful)
.add(name='chunk_seg', parallel=3)
.add(name='encoder12', parallel=2,
uses='- !FilterQL | {lookups: {modality__in: [mode1, mode2]}, traversal_paths: [c]}'))
with flow:
flow.index(input_fn=input_fn, on_done=response_mock)
response_mock.assert_called()
def test_flow_arguments_priorities():
f = Flow(port_expose=12345).add(name='test', port_expose=23456)
assert f._pod_nodes['test'].args.port_expose == 23456
f = Flow(port_expose=12345).add(name='test')
assert f._pod_nodes['test'].args.port_expose == 12345
@pytest.mark.parametrize('restful', [False])
def test_flow_arbitrary_needs(restful):
f = (Flow(restful=restful)
.add(name='p1').add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.add(name='p4', needs='gateway')
.add(name='p5', needs='gateway')
.needs(['p2', 'p4'], name='r1')
.needs(['p3', 'p5'], name='r2')
.needs(['p1', 'r1'], name='r3')
.needs(['r2', 'r3'], name='r4'))
with f:
f.index_lines(['abc', 'def'])
@pytest.mark.parametrize('restful', [False])
def test_flow_needs_all(restful):
f = (Flow(restful=restful)
.add(name='p1', needs='gateway')
.needs_all(name='r1'))
assert f._pod_nodes['r1'].needs == {'p1'}
f = (Flow(restful=restful)
.add(name='p1', needs='gateway')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.needs(needs=['p1', 'p2'], name='r1')
.needs_all(name='r2'))
assert f._pod_nodes['r2'].needs == {'p3', 'r1'}
with f:
f.index_ndarray(np.random.random([10, 10]))
f = (Flow(restful=restful)
.add(name='p1', needs='gateway')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.needs(needs=['p1', 'p2'], name='r1')
.needs_all(name='r2')
.add(name='p4', needs='r2'))
assert f._pod_nodes['r2'].needs == {'p3', 'r1'}
assert f._pod_nodes['p4'].needs == {'r2'}
with f:
f.index_ndarray(np.random.random([10, 10]))
def test_flow_with_pod_envs():
f = Flow.load_config('yaml/flow-with-envs.yml')
class EnvChecker1(BaseExecutor):
"""Class used in Flow YAML"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pea/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
class EnvChecker2(BaseExecutor):
"""Class used in Flow YAML"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pea/pod-specific
assert 'key1' not in os.environ
assert 'key2' not in os.environ
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
with f:
pass
@pytest.mark.parametrize('return_results', [False, True])
@pytest.mark.parametrize('restful', [False, True])
def test_return_results_sync_flow(return_results, restful):
with Flow(restful=restful, return_results=return_results).add() as f:
r = f.index_ndarray(np.random.random([10, 2]))
if return_results:
assert isinstance(r, list)
assert isinstance(r[0], Response)
else:
assert r is None
@pytest.mark.parametrize('input, expect_host, expect_port',
[('0.0.0.0', '0.0.0.0', None),
('0.0.0.0:12345', '0.0.0.0', 12345),
('123.456.789.0:45678', '123.456.789.0', 45678),
('api.jina.ai:45678', 'api.jina.ai', 45678)])
def test_flow_host_expose_shortcut(input, expect_host, expect_port):
f = Flow().add(host=input).build()
assert f['pod0'].args.host == expect_host
if expect_port is not None:
assert f['pod0'].args.port_expose == expect_port
def test_flow_workspace_id():
f = Flow().add().add().add().build()
assert len(f.workspace_id) == 3
assert len(set(f.workspace_id.values())) == 3
with pytest.raises(ValueError):
f.workspace_id = 'hello'
new_id = random_identity()
f.workspace_id = | |
<filename>yt_dlp/extractor/npo.py<gh_stars>10-100
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
determine_ext,
ExtractorError,
fix_xml_ampersands,
int_or_none,
merge_dicts,
orderedSet,
parse_duration,
qualities,
str_or_none,
strip_jsonp,
unified_strdate,
unified_timestamp,
url_or_none,
urlencode_postdata,
)
class NPOBaseIE(InfoExtractor):
def _get_token(self, video_id):
return self._download_json(
'http://ida.omroep.nl/app.php/auth', video_id,
note='Downloading token')['token']
class NPOIE(NPOBaseIE):
IE_NAME = 'npo'
IE_DESC = 'npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl'
_VALID_URL = r'''(?x)
(?:
npo:|
https?://
(?:www\.)?
(?:
npo\.nl/(?:[^/]+/)*|
(?:ntr|npostart)\.nl/(?:[^/]+/){2,}|
omroepwnl\.nl/video/fragment/[^/]+__|
(?:zapp|npo3)\.nl/(?:[^/]+/){2,}
)
)
(?P<id>[^/?#]+)
'''
_TESTS = [{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
}, {
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show: The best of.',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
}, {
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht: Zwart geld. De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
'duration': 3000,
},
}, {
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'm4v',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
'skip_download': True,
}
}, {
# non asf in streams
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content',
'info_dict': {
'id': 'VPWON_1233944',
'ext': 'm4v',
'title': 'Aap, poot, pies',
'description': 'md5:c9c8005d1869ae65b858e82c01a91fde',
'upload_date': '20150508',
'duration': 599,
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698',
'info_dict': {
'id': 'POW_00996502',
'ext': 'm4v',
'title': '''"Dit is wel een 'landslide'..."''',
'description': 'md5:f8d66d537dfb641380226e31ca57b8e8',
'upload_date': '20150508',
'duration': 462,
},
'params': {
'skip_download': True,
}
}, {
# audio
'url': 'http://www.npo.nl/jouw-stad-rotterdam/29-01-2017/RBX_FUNX_6683215/RBX_FUNX_7601437',
'info_dict': {
'id': 'RBX_FUNX_6683215',
'ext': 'mp3',
'title': 'J<NAME> Rotterdam',
'description': 'md5:db251505244f097717ec59fabc372d9f',
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.zapp.nl/de-bzt-show/gemist/KN_1687547',
'only_matching': True,
}, {
'url': 'http://www.zapp.nl/de-bzt-show/filmpjes/POMS_KN_7315118',
'only_matching': True,
}, {
'url': 'http://www.zapp.nl/beste-vrienden-quiz/extra-video-s/WO_NTR_1067990',
'only_matching': True,
}, {
'url': 'https://www.npo3.nl/3onderzoekt/16-09-2015/VPWON_1239870',
'only_matching': True,
}, {
# live stream
'url': 'npo:LI_NL1_4188102',
'only_matching': True,
}, {
'url': 'http://www.npo.nl/radio-gaga/13-06-2017/BNN_101383373',
'only_matching': True,
}, {
'url': 'https://www.zapp.nl/1803-skelterlab/instructie-video-s/740-instructievideo-s/POMS_AT_11736927',
'only_matching': True,
}, {
'url': 'https://www.npostart.nl/broodje-gezond-ei/28-05-2018/KN_1698996',
'only_matching': True,
}, {
'url': 'https://npo.nl/KN_1698996',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if any(ie.suitable(url)
for ie in (NPOLiveIE, NPORadioIE, NPORadioFragmentIE))
else super(NPOIE, cls).suitable(url))
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(url, video_id) or self._get_old_info(video_id)
def _get_info(self, url, video_id):
token = self._download_json(
'https://www.npostart.nl/api/token', video_id,
'Downloading token', headers={
'Referer': url,
'X-Requested-With': 'XMLHttpRequest',
})['token']
player = self._download_json(
'https://www.npostart.nl/player/%s' % video_id, video_id,
'Downloading player JSON', data=urlencode_postdata({
'autoplay': 0,
'share': 1,
'pageUrl': url,
'hasAdConsent': 0,
'_token': token,
}))
player_token = player['token']
drm = False
format_urls = set()
formats = []
for profile in ('hls', 'dash-widevine', 'dash-playready', 'smooth'):
streams = self._download_json(
'https://start-player.npo.nl/video/%s/streams' % video_id,
video_id, 'Downloading %s profile JSON' % profile, fatal=False,
query={
'profile': profile,
'quality': 'npo',
'tokenId': player_token,
'streamType': 'broadcast',
})
if not streams:
continue
stream = streams.get('stream')
if not isinstance(stream, dict):
continue
stream_url = url_or_none(stream.get('src'))
if not stream_url or stream_url in format_urls:
continue
format_urls.add(stream_url)
if stream.get('protection') is not None or stream.get('keySystemOptions') is not None:
drm = True
continue
stream_type = stream.get('type')
stream_ext = determine_ext(stream_url)
if stream_type == 'application/dash+xml' or stream_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
stream_url, video_id, mpd_id='dash', fatal=False))
elif stream_type == 'application/vnd.apple.mpegurl' or stream_ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
elif re.search(r'\.isml?/Manifest', stream_url):
formats.extend(self._extract_ism_formats(
stream_url, video_id, ism_id='mss', fatal=False))
else:
formats.append({
'url': stream_url,
})
if not formats:
if not self.get_param('allow_unplayable_formats') and drm:
self.report_drm(video_id)
self._sort_formats(formats)
info = {
'id': video_id,
'title': video_id,
'formats': formats,
}
embed_url = url_or_none(player.get('embedUrl'))
if embed_url:
webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed page', fatal=False)
if webpage:
video = self._parse_json(
self._search_regex(
r'\bvideo\s*=\s*({.+?})\s*;', webpage, 'video',
default='{}'), video_id)
if video:
title = video.get('episodeTitle')
subtitles = {}
subtitles_list = video.get('subtitles')
if isinstance(subtitles_list, list):
for cc in subtitles_list:
cc_url = url_or_none(cc.get('src'))
if not cc_url:
continue
lang = str_or_none(cc.get('language')) or 'nl'
subtitles.setdefault(lang, []).append({
'url': cc_url,
})
return merge_dicts({
'title': title,
'description': video.get('description'),
'thumbnail': url_or_none(
video.get('still_image_url') or video.get('orig_image_url')),
'duration': int_or_none(video.get('duration')),
'timestamp': unified_timestamp(video.get('broadcastDate')),
'creator': video.get('channel'),
'series': video.get('title'),
'episode': title,
'episode_number': int_or_none(video.get('episodeNumber')),
'subtitles': subtitles,
}, info)
return info
def _get_old_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
error = metadata.get('error')
if error:
raise ExtractorError(error, expected=True)
# For some videos actual video id (prid) is different (e.g. for
# http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698
# video id is POMS_WNL_853698 but prid is POW_00996502)
video_id = metadata.get('prid') or video_id
# titel is too generic in some cases so utilize aflevering_titel as well
# when available (e.g. http://tegenlicht.vpro.nl/afleveringen/2014-2015/access-to-africa.html)
title = metadata['titel']
sub_title = metadata.get('aflevering_titel')
if sub_title and sub_title != title:
title += ': %s' % sub_title
token = self._get_token(video_id)
formats = []
urls = set()
def is_legal_url(format_url):
return format_url and format_url not in urls and re.match(
r'^(?:https?:)?//', format_url)
QUALITY_LABELS = ('Laag', 'Normaal', 'Hoog')
QUALITY_FORMATS = ('adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std')
quality_from_label = qualities(QUALITY_LABELS)
quality_from_format_id = qualities(QUALITY_FORMATS)
items = self._download_json(
'http://ida.omroep.nl/app.php/%s' % video_id, video_id,
'Downloading formats JSON', query={
'adaptive': 'yes',
'token': token,
})['items'][0]
for num, item in enumerate(items):
item_url = item.get('url')
if not is_legal_url(item_url):
continue
urls.add(item_url)
format_id = self._search_regex(
r'video/ida/([^/]+)', item_url, 'format id',
default=None)
item_label = item.get('label')
def add_format_url(format_url):
width = int_or_none(self._search_regex(
r'(\d+)[xX]\d+', format_url, 'width', default=None))
height = int_or_none(self._search_regex(
r'\d+[xX](\d+)', format_url, 'height', default=None))
if item_label in QUALITY_LABELS:
quality = quality_from_label(item_label)
f_id = item_label
elif item_label in QUALITY_FORMATS:
quality = quality_from_format_id(format_id)
f_id = format_id
else:
quality, f_id = [None] * 2
formats.append({
'url': format_url,
'format_id': f_id,
'width': width,
'height': height,
'quality': quality,
})
# Example: http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706
if item.get('contentType') in ('url', 'audio'):
add_format_url(item_url)
continue
try:
stream_info = self._download_json(
item_url + '&type=json', video_id,
'Downloading %s stream JSON'
% item_label or item.get('format') or format_id or num)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404:
error = (self._parse_json(
ee.cause.read().decode(), video_id,
fatal=False) or {}).get('errorstring')
if error:
raise ExtractorError(error, expected=True)
raise
# Stream URL instead of JSON, example: npo:LI_NL1_4188102
if isinstance(stream_info, compat_str):
if not stream_info.startswith('http'):
continue
video_url = stream_info
# JSON
else:
video_url = stream_info.get('url')
if not video_url or 'vodnotavailable.' in video_url or video_url in urls:
continue
urls.add(video_url)
if determine_ext(video_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
else:
add_format_url(video_url)
is_live = metadata.get('medium') == 'live'
if not is_live:
for num, stream in enumerate(metadata.get('streams', [])):
stream_url = stream.get('url')
if not is_legal_url(stream_url):
continue
urls.add(stream_url)
# smooth streaming is not supported
stream_type = stream.get('type', '').lower()
if stream_type in ['ss', 'ms']:
continue
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(
stream_url, video_id, fatal=False)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -5
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, ext='mp4', fatal=False))
# Example: http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706
elif '.asf' in stream_url:
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % num,
transform_source=fix_xml_ampersands, fatal=False)
if not asx:
continue
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url or video_url in urls:
continue
urls.add(video_url)
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
'preference': -10,
})
else:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = [{
'ext': 'vtt',
'url': 'http://tt888.omroep.nl/tt888/%s' % video_id,
}]
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': metadata.get('info'),
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo(?:start)?\.nl/live(?:/(?P<id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NL1_4188102',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^NPO 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.npo.nl/live',
'only_matching': True,
}, {
'url': 'https://www.npostart.nl/live/npo-1',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url) or 'npo-1'
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
[r'media-id="([^"]+)"', r'data-prid="([^"]+)"'], webpage, 'live id')
return {
'_type': 'url_transparent',
'url': 'npo:%s' % live_id,
'ie_key': NPOIE.ie_key(),
'id': live_id,
'display_id': display_id,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@classmethod
def | |
"iconst_5" ],
0x6c : [ "idiv" ],
0xa5 : [ "if_acmpeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa6 : [ "if_acmpne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9f : [ "if_icmpeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa0 : [ "if_icmpne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa1 : [ "if_icmplt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa2 : [ "if_icmpge", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa3 : [ "if_icmpgt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa4 : [ "if_icmple", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x99 : [ "ifeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9a : [ "ifne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9b : [ "iflt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9c : [ "ifge", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9d : [ "ifgt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9e : [ "ifle", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc7 : [ "ifnonnull", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc6 : [ "ifnull", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x84 : [ "iinc", "index:B const:B", special_F0, special_F0, None ],
0x15 : [ "iload", "index:B", special_F0, special_F0, None ],
0x1a : [ "iload_0" ],
0x1b : [ "iload_1" ],
0x1c : [ "iload_2" ],
0x1d : [ "iload_3" ],
0x68 : [ "imul" ],
0x74 : [ "ineg" ],
0xc1 : [ "instanceof", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0xb9 : [ "invokeinterface", "indexbyte1:B indexbyte2:B count:B null:B", special_F1, special_F1R, "get_interface", "get_interface_index" ],
0xb7 : [ "invokespecial", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0xb8 : [ "invokestatic", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0xb6 : [ "invokevirtual", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0x80 : [ "ior" ],
0x70 : [ "irem" ],
0xac : [ "ireturn" ],
0x78 : [ "ishl" ],
0x7a : [ "ishr" ],
0x36 : [ "istore", "index:B", special_F0, special_F0, None ],
0x3b : [ "istore_0" ],
0x3c : [ "istore_1" ],
0x3d : [ "istore_2" ],
0x3e : [ "istore_3" ],
0x64 : [ "isub" ],
0x7c : [ "iushr" ],
0x82 : [ "ixor" ],
0xa8 : [ "jsr", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc9 : [ "jsr_w", "branchbyte1:B branchbyte2:B branchbyte3:B branchbyte4:B", special_F3, special_F3R, None ],
0x8a : [ "l2d" ],
0x89 : [ "l2f" ],
0x88 : [ "l2i" ],
0x61 : [ "ladd" ],
0x2f : [ "laload" ],
0x7f : [ "land" ],
0x50 : [ "lastore" ],
0x94 : [ "lcmp" ],
0x9 : [ "lconst_0" ],
0xa : [ "lconst_1" ],
0x12 : [ "ldc", "index:B", special_F0, special_F0R, "get_value" ],
0x13 : [ "ldc_w", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0x14 : [ "ldc2_w", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0x6d : [ "ldiv" ],
0x16 : [ "lload", "index:B", special_F0, special_F0, None ],
0x1e : [ "lload_0" ],
0x1f : [ "lload_1" ],
0x20 : [ "lload_2" ],
0x21 : [ "lload_3" ],
0x69 : [ "lmul" ],
0x75 : [ "lneg" ],
0xab : [ "lookupswitch", LookupSwitch ],
0x81 : [ "lor" ],
0x71 : [ "lrem" ],
0xad : [ "lreturn" ],
0x79 : [ "lshl" ],
0x7b : [ "lshr" ],
0x37 : [ "lstore", "index:B", special_F0, special_F0, None ],
0x3f : [ "lstore_0" ],
0x40 : [ "lstore_1" ],
0x41 : [ "lstore_2" ],
0x42 : [ "lstore_3" ],
0x65 : [ "lsub" ],
0x7d : [ "lushr" ],
0x83 : [ "lxor" ],
0xc2 : [ "monitorenter" ],
0xc3 : [ "monitorexit" ],
0xc5 : [ "multianewarray", "indexbyte1:B indexbyte2:B dimensions:B", special_F4, special_F4R, None ],
0xbb : [ "new", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_class", "get_class_index2" ],
0xbc : [ "newarray", "atype:B", special_F0, special_F0, "get_array_type" ],
0x0 : [ "nop" ],
0x57 : [ "pop" ],
0x58 : [ "pop2" ],
0xb5 : [ "putfield", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field", "get_field_index" ],
0xb3 : [ "putstatic", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field", "get_field_index" ],
0xa9 : [ "ret", "index:B", special_F0, special_F0, None ],
0xb1 : [ "return" ],
0x35 : [ "saload" ],
0x56 : [ "sastore" ],
0x11 : [ "sipush", "byte1:B byte2:B", special_F1, special_F1R, None ],
0x5f : [ "swap" ],
0xaa : [ "tableswitch", TableSwitch ],
0xc4 : [ "wide" ], # FIXME
}
# Invert the value and the name of the bytecode
INVERT_JAVA_OPCODES = dict([( JAVA_OPCODES[k][0], k ) for k in JAVA_OPCODES])
# List of java bytecodes which can modify the control flow
BRANCH_JVM_OPCODES = [ "goto", "goto_w", "if_acmpeq", "if_icmpeq", "if_icmpne", "if_icmplt", "if_icmpge", "if_icmpgt", "if_icmple", "ifeq", "ifne", "iflt", "ifge", "ifgt", "ifle", "ifnonnull", "ifnull", "jsr", "jsr_w" ]
BRANCH2_JVM_OPCODES = [ "goto", "goto.", "jsr", "jsr.", "if.", "return", ".return", "tableswitch", "lookupswitch" ]
MATH_JVM_OPCODES = { ".and" : '&',
".add" : '+',
".sub" : '-',
".mul" : '*',
".div" : '/',
".shl" : '<<',
".shr" : '>>',
".xor" : '^',
".or" : '|',
}
MATH_JVM_RE = []
for i in MATH_JVM_OPCODES :
MATH_JVM_RE.append( (re.compile( i ), MATH_JVM_OPCODES[i]) )
INVOKE_JVM_OPCODES = [ "invoke." ]
FIELD_READ_JVM_OPCODES = [ "get." ]
FIELD_WRITE_JVM_OPCODES = [ "put." ]
BREAK_JVM_OPCODES = [ "invoke.", "put.", ".store", "iinc", "pop", ".return", "if." ]
INTEGER_INSTRUCTIONS = [ "bipush", "sipush" ]
def EXTRACT_INFORMATION_SIMPLE(op_value) :
"""Extract information (special functions) about a bytecode"""
r_function = JAVA_OPCODES[ op_value ][2]
v_function = JAVA_OPCODES[ op_value ][3]
f_function = JAVA_OPCODES[ op_value ][4]
r_format = ">"
r_buff = []
format = JAVA_OPCODES[ op_value ][1]
l = format.split(" ")
for j in l :
operands = j.split(":")
name = operands[0] + " "
val = operands[1]
r_buff.append( name.replace(' ', '') )
r_format += val
return ( r_function, v_function, r_buff, r_format, f_function )
def EXTRACT_INFORMATION_VARIABLE(idx, op_value, raw_format) :
r_function, v_function, r_buff, r_format, f_function = JAVA_OPCODES[ op_value ][1]( idx, raw_format )
return ( r_function, v_function, r_buff, r_format, f_function )
def determineNext(i, end, m) :
#if "invoke" in i.get_name() :
# self.childs.append( self.end, -1, ExternalMethod( i.get_operands()[0], i.get_operands()[1], i.get_operands()[2] ) )
# self.childs.append( self.end, self.end, self.__context.get_basic_block( self.end + 1 ) )
if "return" in i.get_name() :
return [ -1 ]
elif "goto" in i.get_name() :
return [ i.get_operands() + end ]
elif "jsr" in i.get_name() :
return [ i.get_operands() + end ]
elif "if" in i.get_name() :
return [ end + i.get_length(), i.get_operands() + end ]
elif "tableswitch" in i.get_name() :
x = []
x.append( i.get_operands().default + end )
for idx in range(0, (i.get_operands().high - i.get_operands().low) + 1) :
off = getattr(i.get_operands(), "offset%d" % idx)
x.append( off + end )
return x
elif "lookupswitch" in i.get_name() :
x = []
x.append( i.get_operands().default + end )
for idx in range(0, i.get_operands().npairs) :
off = getattr(i.get_operands(), "offset%d" % idx)
x.append( off + end )
return x
return []
def determineException(vm, m) :
return []
def classToJclass(x) :
return "L%s;" % x
METHOD_INFO = [ '>HHHH', namedtuple("MethodInfo", "access_flags name_index descriptor_index attributes_count") ]
ATTRIBUTE_INFO = [ '>HL', namedtuple("AttributeInfo", "attribute_name_index attribute_length") ]
FIELD_INFO = [ '>HHHH', namedtuple("FieldInfo", "access_flags name_index descriptor_index attributes_count") ]
LINE_NUMBER_TABLE = [ '>HH', namedtuple("LineNumberTable", "start_pc line_number") ]
EXCEPTION_TABLE = [ '>HHHH', namedtuple("ExceptionTable", "start_pc end_pc handler_pc catch_type") ]
LOCAL_VARIABLE_TABLE = [ '>HHHHH', namedtuple("LocalVariableTable", "start_pc length name_index descriptor_index index") ]
LOCAL_VARIABLE_TYPE_TABLE = [ '>HHHHH', namedtuple("LocalVariableTypeTable", "start_pc length name_index signature_index index") ]
CODE_LOW_STRUCT = [ '>HHL', namedtuple( "LOW", "max_stack max_locals code_length" ) ]
ARRAY_TYPE = {
4 : "T_BOOLEAN",
5 : "T_CHAR",
6 : "T_FLOAT",
7 : "T_DOUBLE",
8 : "T_BYTE",
9 : "T_SHORT",
10 : "T_INT",
11 : "T_LONG",
}
INVERT_ARRAY_TYPE = dict([( ARRAY_TYPE[k][0], k ) for k in ARRAY_TYPE])
ACC_CLASS_FLAGS = {
0x0001 : [ "ACC_PUBLIC", "Declared public; may be accessed from outside its package." ],
0x0010 : [ "ACC_FINAL", "Declared final; no subclasses allowed." ],
0x0020 : [ "ACC_SUPER", "Treat superclass methods specially when invoked by the invokespecial instruction." ],
0x0200 : [ "ACC_INTERFACE", "Is an interface, not a class." ],
0x0400 : [ "ACC_ABSTRACT", "Declared abstract; may not be instantiated." ],
}
INVERT_ACC_CLASS_FLAGS = dict([( ACC_CLASS_FLAGS[k][0], k ) for k in ACC_CLASS_FLAGS])
ACC_FIELD_FLAGS = {
0x0001 : [ "ACC_PUBLIC", "Declared public; may be accessed from outside its package." ],
0x0002 : [ "ACC_PRIVATE", "Declared private; usable only within | |
ORPORT=0")
return d
def test_multiline_plus(self):
"""
"""
d = self.protocol.get_info("FOO")
d.addCallback(CallbackChecker({"FOO": "\na\nb\nc"}))
self.send(b"250+FOO=")
self.send(b"a")
self.send(b"b")
self.send(b"c")
self.send(b".")
self.send(b"250 OK")
return d
def test_multiline_plus_embedded_equals(self):
"""
"""
d = self.protocol.get_info("FOO")
d.addCallback(CallbackChecker({"FOO": "\na="}))
self.send(b"250+FOO=")
self.send(b"a=")
self.send(b".")
self.send(b"250 OK")
return d
def incremental_check(self, expected, actual):
if '=' in actual:
return
self.assertEqual(expected, actual)
def test_getinfo_incremental(self):
d = self.protocol.get_info_incremental(
"FOO",
functools.partial(self.incremental_check, "bar")
)
self.send(b"250+FOO=")
self.send(b"bar")
self.send(b"bar")
self.send(b".")
self.send(b"250 OK")
return d
def test_getinfo_incremental_continuation(self):
d = self.protocol.get_info_incremental(
"FOO",
functools.partial(self.incremental_check, "bar")
)
self.send(b"250-FOO=")
self.send(b"250-bar")
self.send(b"250-bar")
self.send(b"250 OK")
return d
def test_getinfo_one_line(self):
d = self.protocol.get_info(
"foo",
)
self.send(b'250 foo=bar')
d.addCallback(lambda _: functools.partial(self.incremental_check, "bar"))
return d
def test_getconf(self):
d = self.protocol.get_conf("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker({'SocksPort': '9050', 'ORPort': '0'}))
self.send(b"250-SocksPort=9050")
self.send(b"250 ORPort=0")
return d
def test_getconf_raw(self):
d = self.protocol.get_conf_raw("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker('SocksPort=9050\nORPort=0'))
self.send(b"250-SocksPort=9050")
self.send(b"250 ORPort=0")
return d
def test_getconf_single(self):
d = self.protocol.get_conf_single("SOCKSPORT")
d.addCallback(CallbackChecker('9050'))
self.send(b"250 SocksPort=9050")
return d
def response_ok(self, v):
self.assertEqual(v, '')
def test_setconf(self):
d = self.protocol.set_conf("foo", "bar").addCallback(
functools.partial(self.response_ok)
)
self.send(b"250 OK")
self._wait(d)
self.assertEqual(self.transport.value(), b"SETCONF foo=bar\r\n")
def test_setconf_with_space(self):
d = self.protocol.set_conf("foo", "a value with a space")
d.addCallback(functools.partial(self.response_ok))
self.send(b"250 OK")
self._wait(d)
self.assertEqual(
self.transport.value(),
b'SETCONF foo="a value with a space"\r\n'
)
def test_setconf_multi(self):
d = self.protocol.set_conf("foo", "bar", "baz", 1)
self.send(b"250 OK")
self._wait(d)
self.assertEqual(
self.transport.value(),
b"SETCONF foo=bar baz=1\r\n",
)
def test_quit(self):
d = self.protocol.quit()
self.send(b"250 OK")
self._wait(d)
self.assertEqual(
self.transport.value(),
b"QUIT\r\n",
)
def test_dot(self):
# just checking we don't expode
self.protocol.graphviz_data()
def test_debug(self):
self.protocol.start_debug()
self.assertTrue(exists('txtorcon-debug.log'))
def error(self, failure):
print("ERROR", failure)
self.assertTrue(False)
def test_twocommands(self):
"Two commands on the wire before first response."
d1 = self.protocol.get_conf("FOO")
ht = {"a": "one", "b": "two"}
d1.addCallback(CallbackChecker(ht)).addErrback(log.err)
d2 = self.protocol.get_info_raw("BAR")
d2.addCallback(CallbackChecker("bar")).addErrback(log.err)
self.send(b"250-a=one")
self.send(b"250-b=two")
self.send(b"250 OK")
self.send(b"250 bar")
return d2
def test_signal_error(self):
try:
self.protocol.signal('FOO')
self.fail()
except Exception as e:
self.assertTrue('Invalid signal' in str(e))
def test_signal(self):
self.protocol.valid_signals = ['NEWNYM']
self.protocol.signal('NEWNYM')
self.assertEqual(
self.transport.value(),
b'SIGNAL NEWNYM\r\n',
)
def test_650_after_authenticate(self):
self.protocol._set_valid_events('CONF_CHANGED')
self.protocol.add_event_listener(
'CONF_CHANGED',
CallbackChecker("Foo=bar")
)
self.send(b"250 OK")
self.send(b"650-CONF_CHANGED")
self.send(b"650-Foo=bar")
def test_notify_after_getinfo(self):
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener(
'CIRC',
CallbackChecker("1000 EXTENDED moria1,moria2")
)
self.send(b"250 OK")
d = self.protocol.get_info("a")
d.addCallback(CallbackChecker({'a': 'one'})).addErrback(self.fail)
self.send(b"250-a=one")
self.send(b"250 OK")
self.send(b"650 CIRC 1000 EXTENDED moria1,moria2")
return d
def test_notify_error(self):
self.protocol._set_valid_events('CIRC')
self.send(b"650 CIRC 1000 EXTENDED moria1,moria2")
def test_getinfo(self):
d = self.protocol.get_info("version")
d.addCallback(CallbackChecker({'version': '0.2.2.34'}))
d.addErrback(self.fail)
self.send(b"250-version=0.2.2.34")
self.send(b"250 OK")
self.assertEqual(
self.transport.value(),
b"GETINFO version\r\n",
)
return d
def test_getinfo_single(self):
d = self.protocol.get_info_single("version")
d.addCallback(CallbackChecker('0.2.2.34'))
d.addErrback(self.fail)
self.send(b"250-version=0.2.2.34")
self.send(b"250 OK")
self.assertEqual(
self.transport.value(),
b"GETINFO version\r\n",
)
return d
def test_getinfo_for_descriptor(self):
descriptor_info = b"""250+desc/name/moria1=
router moria1 172.16.31.10 9101 0 9131
platform Tor 0.2.5.0-alpha-dev on Linux
protocols Link 1 2 Circuit 1
published 2013-07-05 23:48:52
fingerprint 9695 DFC3 5FFE B861 329B 9F1A B04C 4639 7020 CE31
uptime 1818933
bandwidth 512000 62914560 1307929
extra-info-digest 17D0142F6EBCDF60160EB1794FA6C9717D581F8C
caches-extra-info
onion-key
-----BEGIN RSA PUBLIC KEY-----
<KEY>
-----END RSA PUBLIC KEY-----
signing-key
-----BEGIN RSA PUBLIC KEY-----
<KEY>
-----END RSA PUBLIC KEY-----
hidden-service-dir
contact 1024D/28988BF5 arma mit edu
ntor-onion-key 9ZVjNkf/iLEnD685SpC5kcDytQ7u5ViiI9JOftdbE0k=
reject *:*
router-signature
-----BEGIN SIGNATURE-----
Y8Tj2e7mPbFJbguulkPEBVYzyO57p4btpWEXvRMD6vxIh/eyn25pehg5dUVBtZlL
iO3EUE0AEYah2W9gdz8t+i3Dtr0zgqLS841GC/TyDKCm+MKmN8d098qnwK0NGF9q
01NZPuSqXM1b6hnl2espFzL7XL8XEGRU+aeg+f/ukw4=
-----END SIGNATURE-----
.
250 OK"""
d = self.protocol.get_info("desc/name/moria1")
d.addCallback(CallbackChecker({'desc/name/moria1': '\n' + '\n'.join(descriptor_info.decode('ascii').split('\n')[1:-2])}))
d.addErrback(self.fail)
for line in descriptor_info.split(b'\n'):
self.send(line)
return d
def test_getinfo_multiline(self):
descriptor_info = b"""250+desc/name/moria1=
router moria1 172.16.31.10 9101 0 9131
platform Tor 0.2.5.0-alpha-dev on Linux
.
250 OK"""
d = self.protocol.get_info("desc/name/moria1")
gold = "\nrouter moria1 172.16.31.10 9101 0 9131\nplatform Tor 0.2.5.0-alpha-dev on Linux"
d.addCallback(CallbackChecker({'desc/name/moria1': gold}))
d.addErrback(self.fail)
for line in descriptor_info.split(b'\n'):
self.send(line)
return d
def test_addevent(self):
self.protocol._set_valid_events('FOO BAR')
self.protocol.add_event_listener('FOO', lambda _: None)
# is it dangerous/ill-advised to depend on internal state of
# class under test?
d = self.protocol.defer
self.send(b"250 OK")
self._wait(d)
self.assertEqual(
self.transport.value().split(b'\r\n')[-2],
b"SETEVENTS FOO"
)
self.transport.clear()
self.protocol.add_event_listener('BAR', lambda _: None)
d = self.protocol.defer
self.send(b"250 OK")
self.assertTrue(
self.transport.value() == b"SETEVENTS FOO BAR\r\n" or
self.transport.value() == b"SETEVENTS BAR FOO\r\n"
)
self._wait(d)
try:
self.protocol.add_event_listener(
'SOMETHING_INVALID', lambda _: None
)
self.assertTrue(False)
except Exception:
pass
def test_eventlistener(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener = EventListener()
self.protocol.add_event_listener('STREAM', listener)
d = self.protocol.defer
self.send(b"250 OK")
self._wait(d)
self.send(b"650 STREAM 1234 NEW 4321 1.2.3.4:555 REASON=MISC")
self.send(b"650 STREAM 2345 NEW 4321 2.3.4.5:666 REASON=MISC")
self.assertEqual(listener.stream_events, 2)
def test_eventlistener_error(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
do_error = False
def __call__(self, data):
self.stream_events += 1
if self.do_error:
raise Exception("the bad thing happened")
# we make sure the first listener has the errors to prove the
# second one still gets called.
listener0 = EventListener()
listener0.do_error = True
listener1 = EventListener()
self.protocol.add_event_listener('STREAM', listener0)
self.protocol.add_event_listener('STREAM', listener1)
d = self.protocol.defer
self.send(b"250 OK")
self._wait(d)
self.send(b"650 STREAM 1234 NEW 4321 1.2.3.4:555 REASON=MISC")
self.send(b"650 STREAM 2345 NEW 4321 2.3.4.5:666 REASON=MISC")
self.assertEqual(listener0.stream_events, 2)
self.assertEqual(listener1.stream_events, 2)
# should have logged the two errors
logged = self.flushLoggedErrors()
self.assertEqual(2, len(logged))
self.assertTrue("the bad thing happened" in str(logged[0]))
self.assertTrue("the bad thing happened" in str(logged[1]))
def test_remove_eventlistener(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener = EventListener()
self.protocol.add_event_listener('STREAM', listener)
self.assertEqual(self.transport.value(), b'SETEVENTS STREAM\r\n')
self.protocol.lineReceived(b"250 OK")
self.transport.clear()
self.protocol.remove_event_listener('STREAM', listener)
self.assertEqual(self.transport.value(), b'SETEVENTS \r\n')
def test_remove_eventlistener_multiple(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener0 = EventListener()
listener1 = EventListener()
self.protocol.add_event_listener('STREAM', listener0)
self.assertEqual(self.transport.value(), b'SETEVENTS STREAM\r\n')
self.protocol.lineReceived(b"250 OK")
self.transport.clear()
# add another one, shouldn't issue a tor command
self.protocol.add_event_listener('STREAM', listener1)
self.assertEqual(self.transport.value(), b'')
# remove one, should still not issue a tor command
self.protocol.remove_event_listener('STREAM', listener0)
self.assertEqual(self.transport.value(), b'')
# remove the other one, NOW should issue a command
self.protocol.remove_event_listener('STREAM', listener1)
self.assertEqual(self.transport.value(), b'SETEVENTS \r\n')
# try removing invalid event
try:
self.protocol.remove_event_listener('FOO', listener0)
self.fail()
except Exception as e:
self.assertTrue('FOO' in str(e))
def test_continuation_line(self):
d = self.protocol.get_info_raw("key")
def check_continuation(v):
self.assertEqual(v, "key=\nvalue0\nvalue1")
d.addCallback(check_continuation)
self.send(b"250+key=")
self.send(b"value0")
self.send(b"value1")
self.send(b".")
self.send(b"250 OK")
return d
def test_newdesc(self):
"""
FIXME: this test is now maybe a little silly, it's just testing
multiline GETINFO... (Real test is in
TorStateTests.test_newdesc_parse)
"""
self.protocol.get_info_raw('ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A')
d = self.protocol.defer
d.addCallback(CallbackChecker("""ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A=
r fake YkkmgCNRV1/35OPWDvo7+1bmfoo tanLV/4ZfzpYQW0xtGFqAa46foo 2011-12-12 16:29:16 192.168.127.12 443 80
s Exit Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=518000
p accept 43,53,79-81,110,143,194,220,443,953,989-990,993,995,1194,1293,1723,1863,2082-2083,2086-2087,2095-2096,3128,4321,5050,5190,5222-5223,6679,6697,7771,8000,8008,8080-8081,8090,8118,8123,8181,8300,8443,8888"""))
self.send(b"250+ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A=")
self.send(b"r fake YkkmgCNRV1/35OPWDvo7+1bmfoo tanLV/4ZfzpYQW0xtGFqAa46foo 2011-12-12 16:29:16 192.168.127.12 443 80")
self.send(b"s Exit Fast Guard HSDir Named Running Stable V2Dir Valid")
self.send(b"w Bandwidth=518000")
self.send(b"p accept 43,53,79-81,110,143,194,220,443,953,989-990,993,995,1194,1293,1723,1863,2082-2083,2086-2087,2095-2096,3128,4321,5050,5190,5222-5223,6679,6697,7771,8000,8008,8080-8081,8090,8118,8123,8181,8300,8443,8888")
self.send(b".")
self.send(b"250 OK")
return d
def test_plus_line_no_command(self):
self.protocol.lineReceived(b"650+NS\r\n")
self.protocol.lineReceived(b"r Gabor gFpAHsFOHGATy12ZUswRf0ZrqAU GG6GDp40cQfR3ODvkBT0r+Q09kw 2012-05-12 16:54:56 172.16.58.3 443 80\r\n")
def test_minus_line_no_command(self):
"""
haven't seen 600's use - "in the wild" but don't see why it's not
possible
"""
self.protocol._set_valid_events('NS')
self.protocol.add_event_listener('NS', lambda _: None)
self.protocol.lineReceived(b"650-NS\r\n")
self.protocol.lineReceived(b"650 OK\r\n")
class ParseTests(unittest.TestCase):
def setUp(self):
self.controller = TorState(TorControlProtocol())
self.controller.connectionMade = lambda _: None
def test_keywords(self):
x = parse_keywords('events/names=CIRC STREAM ORCONN BW DEBUG INFO NOTICE WARN ERR NEWDESC ADDRMAP AUTHDIR_NEWDESCS DESCCHANGED NS STATUS_GENERAL STATUS_CLIENT STATUS_SERVER GUARD STREAM_BW CLIENTS_SEEN NEWCONSENSUS BUILDTIMEOUT_SET')
self.assertTrue('events/names' in x)
self.assertEqual(x['events/names'], 'CIRC STREAM ORCONN BW DEBUG INFO NOTICE WARN ERR NEWDESC ADDRMAP AUTHDIR_NEWDESCS DESCCHANGED NS STATUS_GENERAL STATUS_CLIENT STATUS_SERVER GUARD STREAM_BW CLIENTS_SEEN NEWCONSENSUS BUILDTIMEOUT_SET')
self.assertEqual(len(x.keys()), 1)
def test_keywords_mutli_equals(self):
x = parse_keywords('foo=something subvalue="foo"')
self.assertEqual(len(x), 1)
self.assertTrue('foo' in x)
self.assertEqual(x['foo'], 'something subvalue="foo"')
def test_default_keywords(self):
x = parse_keywords('foo')
self.assertEqual(len(x), 1)
self.assertTrue('foo' in x)
self.assertEqual(x['foo'], DEFAULT_VALUE)
def test_multientry_keywords_2(self):
x = parse_keywords('foo=bar\nfoo=zarimba')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], list))
self.assertEqual(len(x['foo']), 2)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'zarimba')
def test_multientry_keywords_3(self):
x = parse_keywords('foo=bar\nfoo=baz\nfoo=zarimba')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], list))
self.assertEqual(len(x['foo']), 3)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'baz')
self.assertEqual(x['foo'][2], 'zarimba')
def test_multientry_keywords_4(self):
x = parse_keywords('foo=bar\nfoo=baz\nfoo=zarimba\nfoo=foo')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], list))
self.assertEqual(len(x['foo']), 4)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'baz')
self.assertEqual(x['foo'][2], 'zarimba')
self.assertEqual(x['foo'][3], 'foo')
def test_multiline_keywords_with_spaces(self):
x = parse_keywords('''ns/name/foo=
r foo aaaam7E7h1vY5Prk8v9/nSRCydY BBBBOfum4CtAYuOgf/D33Qq5+rk 2013-10-27 06:22:18 1.2.3.4 9001 9030
s Fast Guard HSDir Running Stable V2Dir Valid
w Bandwidth=1234
ns/name/bar=
r bar aaaaHgNYtTVPw5hHTO28J4je5i8 BBBBBUaJaBFSU/HDrTxnSh+D3+fY 2013-10-27 07:48:56 1.2.4.5 9001 9030
s Exit Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=1234
OK
''')
self.assertEqual(2, len(x))
keys = sorted(x.keys())
self.assertEqual(keys, ['ns/name/bar', 'ns/name/foo'])
def test_multiline_keywords(self):
x = parse_keywords('''Foo=bar\nBar''')
self.assertEqual(x, {'Foo': 'bar\nBar'})
x = parse_keywords('''Foo=bar\nBar''', multiline_values=False)
self.assertEqual(x, {'Foo': 'bar',
'Bar': DEFAULT_VALUE})
def test_unquoted_keywords(self):
x = parse_keywords('''Tor="0.1.2.3.4-rc44"''')
self.assertEqual(x, {'Tor': '0.1.2.3.4-rc44'})
def test_unquoted_keywords_singlequote(self):
x = parse_keywords("Tor='0.1.2.3.4-rc44'")
self.assertEqual(x, {'Tor': '0.1.2.3.4-rc44'})
def test_unquoted_keywords_empty(self):
x = parse_keywords('foo=')
self.assertEqual(x, {'foo': ''})
def test_network_status(self):
self.controller._update_network_status("""ns/all=
r right2privassy3 ADQ6gCT3DiFHKPDFr3rODBUI8HM JehnjB8l4Js47dyjLCEmE8VJqao 2011-12-02 03:36:40 172.16.58.3 9023 0
s Exit Fast Named Running Stable Valid
w Bandwidth=53
p accept 80,1194,1220,1293,1500,1533,1677,1723,1863,2082-2083,2086-2087,2095-2096,2102-2104,3128,3389,3690,4321,4643,5050,5190,5222-5223,5228,5900,6660-6669,6679,6697,8000,8008,8074,8080,8087-8088,8443,8888,9418,9999-10000,19294,19638
r Unnamed AHe2V2pmj4Yfn0H9+Np3lci7htU T/g7ZLzG/ooqCn+gdLd9Jjh+AEI 2011-12-02 15:52:09 192.168.3.11 443 9030
s Exit Fast Running V2Dir Valid
w Bandwidth=33
p reject 25,119,135-139,445,563,1214,4661-4666,6346-6429,6699,6881-6999""")
# the routers list is always keyed with both name and hash
self.assertEqual(len(self.controller.routers_by_name), 2)
self.assertEqual(len(self.controller.routers_by_hash), 2)
self.assertTrue('right2privassy3' in self.controller.routers)
self.assertTrue('Unnamed' in self.controller.routers)
self.controller.routers.clear()
self.controller.routers_by_name.clear()
self.controller.routers_by_hash.clear()
def test_circuit_status(self):
self.controller._update_network_status("""ns/all=
r wildnl f+Ty/+B6lgYr0Ntbf67O/L2M8ZI c1iK/kPPXKGZZvwXRWbvL9eCfSc 2011-12-02 19:07:05 192.168.127.12 9001 0
s Exit Fast Named Running Stable Valid
w Bandwidth=1900
p reject 25,119,135-139,445,563,1214,4661-4666,6346-6429,6699,6881-6999
r l0l wYXUpLBpzVWfzVSMgGO0dThdd38 KIJC+W1SHeaFOj/BVsEAgxbtQNM 2011-12-02 13:43:39 172.16.58.3 443 80
s Fast Named Running Stable V2Dir Valid
w Bandwidth=22800
p reject 1-65535
r Tecumseh /xAD0tFLS50Dkz+O37xGyVLoKlk yJHbad7MFl1VW2/23RxrPKBTOIE 2011-12-02 09:44:10 192.168.127.12 22 9030
s Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=18700
p | |
<filename>program/dnn-desktop-demo/preprocess.py<gh_stars>10-100
import sys
import os
APP_CONF_FILE = 'app.conf'
CAFFE_ENGINE = 'caffe'
TF_ENGINE = 'tf'
def ck_preprocess(i):
if sys.version_info[0]>2:
import configparser as cp
else:
import ConfigParser as cp
# read existing desktop app config, if any
conf = cp.ConfigParser()
conf.read(APP_CONF_FILE)
ck = i['ck_kernel']
r = fill_general(ck, conf, i.get('params', {}))
if r['return'] > 0: return r
r = fill_models(ck, conf, 'Models', tags='caffemodel', exclude_tags=['ssd'], engine=CAFFE_ENGINE)
if r['return'] > 0: return r
r = fill_models(ck, conf, 'DetectionModels', 'caffemodel,ssd', engine=CAFFE_ENGINE)
if r['return'] > 0: return r
r = fill_models(ck, conf, 'DetectionModels', 'model,tensorflow,squeezedetmodel', engine=TF_ENGINE, start_count=len(r['lst']))
if r['return'] > 0: return r
host_os_dict = i.get('host_os_dict', {})
host_os = host_os_dict.get('ck_name', '')
exe_extension = ''
if 'win' == host_os:
exe_extension = '.exe'
r = fill_programs(ck, conf, exe_extension, 'Programs', 'caffe-classification,continuous')
if r['return'] > 0: return r
r = fill_programs(ck, conf, exe_extension, 'DetectionPrograms', 'caffe-detection,continuous')
if r['return'] > 0: return r
r = fill_squeezedet(ck, conf, 'DetectionPrograms', start_count=len(r['lst']))
if r['return'] > 0: return r
r = fill_aux(ck, conf)
if r['return'] > 0: return r
r = fill_val(ck, conf, 'VAL', 'imagenet,val')
if r['return'] > 0: return r
r = fill_val(ck, conf, 'DetectionDatasets', 'object-detection,images')
if r['return'] > 0: return r
with open(APP_CONF_FILE, 'w') as f:
conf.write(f)
bat = ''
if 'win' != host_os:
misc = i.get('misc', {})
path = misc.get('path', '')
tmp_dir = misc.get('tmp_dir', '')
if '' != path:
ld_path = os.path.join(path, tmp_dir)
bat ='export ' + host_os_dict.get('env_ld_library_path', 'LD_LIBRARY_PATH') +'="' + ld_path + '"'
return {'return':0, 'bat': bat, 'new_env': i['env']}
def setstr(conf, section, key, value):
# string values must be enquoted for Qt to read them correctly
conf.set(section, key, '"' + value.replace('\\', '\\\\') + '"')
def ensure_section(conf, section, clean=False):
if clean:
conf.remove_section(section)
if not conf.has_section(section):
conf.add_section(section)
def conf_set_from_params(conf, section, params, param_names):
for param_name in param_names:
if param_name in params:
conf.set(section, param_name, str(params[param_name]))
def fill_general(ck, conf, params):
section = 'General'
ensure_section(conf, section)
try:
bin_path, bin_name = os.path.split(which('ck'))
setstr(conf, section, 'ck_bin_path', bin_path)
setstr(conf, section, 'ck_exe_name', bin_name)
except WhichError:
return {'return':1, 'error': 'Path to ck not found'}
r = ck.access({'action': 'where', 'module_uoa': 'repo', 'data_uoa': 'local'})
if r['return'] > 0: return r
setstr(conf, section, 'ck_repos_path', os.path.dirname(r['path']))
conf_set_from_params(conf, section, params, [
'fps_update_interval_ms',
'recognition_update_interval_ms',
'footer_right_text',
'footer_right_url',
'recognition_auto_restart'
])
return {'return':0}
def meta_contain_tag(u, tags_to_check):
utags = u.get('meta', {}).get('tags', [])
for t in tags_to_check:
if t in utags:
return True
return False
def find_by_tags(ck, tags, module='', exclude_tags=[]):
search_dict = {'action': 'search', 'tags': tags, 'add_meta': 'yes'}
if module != '':
search_dict['module_uoa'] = module
r = ck.access(search_dict)
if r['return'] > 0: return r
lst = [x for x in r['lst'] if not meta_contain_tag(x, exclude_tags)]
for i, u in enumerate(lst):
module_uoa = u['module_uoa']
data_uoa = u['data_uoa']
r = ck.access({'action': 'load', 'module_uoa': module_uoa, 'data_uoa': data_uoa})
if r['return'] > 0: return r
u['meta'] = r['dict']
u['data_name'] = r['data_name']
return {'return':0, 'lst': lst}
def fill_section(ck, conf, section, tags, module='', exclude_tags=[], start_count=0):
r = find_by_tags(ck, tags=tags, module=module, exclude_tags=exclude_tags)
if r['return'] > 0: return r
ensure_section(conf, section, 0 == start_count)
lst = r['lst']
conf.set(section, 'count', str(len(lst) + start_count))
for i, u in enumerate(lst):
setstr(conf, section, str(i + start_count) + '_uoa', u['data_uoa'])
setstr(conf, section, str(i + start_count) + '_name', u['data_name'])
return {'return':0, 'lst': lst}
def fill_models(ck, conf, section, tags, exclude_tags=[], engine='', start_count=0):
r = fill_section(ck, conf, section=section, tags=tags, module='env', exclude_tags=exclude_tags, start_count=start_count)
if r['return'] > 0: return r
lst = r['lst']
for i, u in enumerate(lst):
i = i + start_count
setstr(conf, section, str(i) + '_engine', engine)
return {'return':0, 'lst': lst}
def fill_programs(ck, conf, exe_extension, section, tags):
import glob
r = fill_section(ck, conf, section=section, tags=tags)
if r['return'] > 0: return r
lst = r['lst']
for i, u in enumerate(lst):
output_file = ck.get_by_flat_key({'dict': u, 'key': '##meta#run_cmds#use_continuous#run_time#run_cmd_out1'}).get('value', None)
if not output_file:
print('! Could not find output file for ' + u['data_uoa'])
continue
target_file = ck.get_by_flat_key({'dict': u, 'key': '##meta#target_file'}).get('value', None)
if not target_file:
print('! Could not find target file for ' + u['data_uoa'])
continue
if not target_file.endswith(exe_extension):
target_file = target_file + exe_extension
r = ck.access(['find', '--module_uoa=' + u['module_uoa'], '--data_uoa=' + u['data_uoa']])
if r['return'] != 0:
print('! Could not load program ' + u['data_uoa'] + ': ' + r['error'])
continue
program_path = r['path']
is_webcam = 'webcam' in u.get('meta', {}).get('tags', [])
target_dirs = glob.glob(os.path.join(program_path, 'tmp*'))
if not target_dirs:
print('! Program "' + u['data_uoa'] + '" is not compiled. For use it in desktop demo, please compile it first')
continue
target_paths = []
target_names = []
target_uoas = []
for target_path in target_dirs:
full_target_path = os.path.join(program_path, target_path)
r = ck.load_json_file({'json_file': os.path.join(full_target_path, 'tmp-deps.json')})
if r['return'] != 0:
print('! Failed to load tmp-deps.json from ' + full_target_path + ': ' + r['error'])
continue
target_uoa = ck.get_by_flat_key({'dict': r['dict'], 'key': '##lib-caffe#uoa'}).get('value', None)
if not target_uoa:
print('! Not found Caffe lib env UOA for ' + full_target_path)
continue
target_caffe_name = ck.get_by_flat_key({'dict': r['dict'], 'key': '##lib-caffe#dict#data_name'}).get('value', None)
if not target_caffe_name:
print('! Not found Caffe lib data_name for ' + full_target_path)
continue
if target_caffe_name in target_names:
print('! Duplicate Caffe lib "' + target_caffe_name + '", skipping directory ' + full_target_path)
continue
target_names.append(target_caffe_name)
target_paths.append(os.path.basename(target_path))
target_uoas.append(target_uoa)
if not target_paths:
print('! Program "' + u['data_uoa'] + '" is not compiled. For use it in desktop demo, please compile it first')
continue
setstr(conf, section, str(i) + '_path', program_path)
setstr(conf, section, str(i) + '_output_file', output_file)
setstr(conf, section, str(i) + '_exe', target_file)
setstr(conf, section, str(i) + '_engine', CAFFE_ENGINE)
conf.set(section, str(i) + '_webcam', str(1 if is_webcam else 0))
conf.set(section, str(i) + '_target_count', str(len(target_paths)))
for j, target_path in enumerate(target_paths):
k = str(i) + '_target_' + str(j)
setstr(conf, section, k + '_path', target_path)
setstr(conf, section, k + '_name', target_names[j])
setstr(conf, section, k + '_uoa', target_uoas[j])
return {'return': 0, 'lst': lst}
def fill_aux(ck, conf):
section = 'AUX'
r = fill_section(ck, conf, section=section, tags='imagenet,aux', module='env')
if r['return'] > 0: return r
lst = r['lst']
for i, u in enumerate(lst):
package_uoa = u.get('meta', {}).get('package_uoa', '')
if package_uoa == '':
print('! There is no package_uoa for AUX env entry ' + u['data_uoa'])
setstr(conf, section, str(i) + '_package_uoa', package_uoa)
return {'return': 0}
def fill_val(ck, conf, section, tags):
r = fill_section(ck, conf, section=section, tags=tags, module='env')
if r['return'] > 0: return r
lst = r['lst']
for i, u in enumerate(lst):
package_uoa = u.get('meta', {}).get('package_uoa', '')
r = {}
if package_uoa == '':
print('! There is no package_uoa for VAL env entry ' + u['data_uoa'])
else:
r = ck.access({'action': 'load', 'module_uoa': 'package', 'data_uoa': package_uoa})
setstr(conf, section, str(i) + '_name', r.get('data_name', ''))
setstr(conf, section, str(i) + '_aux_package_uoa', r.get('dict', {}).get('aux_uoa', ''))
return {'return': 0}
def fill_squeezedet(ck, conf, section, start_count):
r = fill_section(ck, conf, section=section, tags='tensorflow,squeezedet,continuous', start_count=start_count)
if r['return'] > 0: return r
lst = r['lst']
for i, u in enumerate(lst):
i = i + start_count
output_file = ck.get_by_flat_key({'dict': u, 'key': '##meta#run_cmds#use_continuous#run_time#run_cmd_out1'}).get('value', None)
if None == output_file:
print('! Could not find output file for ' + u['data_uoa'])
else:
setstr(conf, section, str(i) + '_output_file', output_file)
setstr(conf, section, str(i) + '_exe', 'continuous.sh')
r = ck.access(['find', '--module_uoa=' + u['module_uoa'], '--data_uoa=' + u['data_uoa']])
if r['return'] != 0:
print('! Could not load program ' + u['data_uoa'] + ': ' + r['error'])
continue
program_path = r['path']
is_webcam = 'webcam' in u.get('meta', {}).get('tags', [])
setstr(conf, section, str(i) + '_path', program_path)
setstr(conf, section, str(i) + '_engine', TF_ENGINE)
conf.set(section, str(i) + '_webcam', str(1 if is_webcam else 0))
r = find_by_tags(ck, tags='lib,tensorflow', module='env')
if r['return'] > 0: return r
lst = r['lst']
conf.set(section, str(i) + '_target_count', str(len(lst)))
for j, u in enumerate(lst):
k = str(i) + '_target_' + str(j)
target = lst[j]
setstr(conf, section, k + '_path', 'tmp')
setstr(conf, section, k + '_name', target['data_name'])
setstr(conf, section, k + '_uoa', target['data_uoa'])
return {'return': 0}
#
# =============================================================================
#
# Copyright (c) 2002-2007 ActiveState Software Inc.
# Author:
# <NAME> (<EMAIL>)
# Home:
# http://trentm.com/projects/which/
#
# LICENSE: MIT
#
# Copyright (c) 2002-2005 ActiveState Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above | |
# This is modified from Upenn MEAM 620 course:
# https://alliance.seas.upenn.edu/~meam620/wiki/index.php
import numpy as np
import torch
import matplotlib.pyplot as plt
class qd_object:
"""
Struct to hold qd information
"""
def __init__(self):
self.pos = 0
self.vel = 0
self.euler = 0
self.omega = 0
class state_object:
"""
Struct to hold state information
"""
def __init__(self):
self.pos = np.zeros(3)
self.vel = np.zeros(3)
self.acc = np.zeros(3)
self.yaw = 0
self.yawdot = 0
def init_state(s_start):
"""
Initialize 13 x 1 state vector
"""
s = np.zeros(13)
phi0 = 0.0
theta0 = 0.0
psi0 = s_start.yaw
Rot0 = RPYtoRot_ZXY(phi0, theta0, psi0)
Quat0 = RotToQuat(Rot0)
s[0] = s_start.pos[0] #x
s[1] = s_start.pos[1] #y
s[2] = s_start.pos[2] #z
s[3] = s_start.vel[0] #xdot
s[4] = s_start.vel[1] #ydot
s[5] = s_start.vel[2] #zdot
s[6] = Quat0[0] #qw
s[7] = Quat0[1] #qx
s[8] = Quat0[2] #qy
s[9] = Quat0[3] #qz
s[10] = 0 #p
s[11] = 0 #q
s[12] = 0 #r
return s
def QuatToRot(q):
"""
QuatToRot Converts a Quaternion to Rotation matrix written by Daniel Mellinger
"""
# normalize q
q = q / np.sqrt(np.sum(q**2))
qahat = np.zeros([3, 3] )
qahat[0, 1] = -q[3]
qahat[0, 2] = q[2]
qahat[1, 2] = -q[1]
qahat[1, 0] = q[3]
qahat[2, 0] = -q[2]
qahat[2, 1] = q[1]
R = np.identity(3) + 2 * qahat @ qahat + 2 * q[0] * qahat
return R
def RotToQuat(R):
"""
ROTTOQUAT Converts a Rotation matrix into a Quaternion written by <NAME> from the following website,
deals with the case when tr<0 http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/index.htm
"""
tr = np.sum(np.trace(R))
if (tr > 0):
S = np.sqrt(tr + 1.0) * 2 # S=4*qw
qw = 0.25 * S
qx = (R[2, 1] - R[1, 2]) / S
qy = (R[0, 2] - R[2, 0]) / S
qz = (R[1, 0] - R[0, 1]) / S
elif (R[0, 0] > R[1, 1]) and (R[0, 0] > R[2, 2]):
S = np.sqrt(1.0 + R(1,1) - R(2,2) - R(3,3)) * 2 # S=4*qx
qw = (R[2, 1] - R[1, 2]) / S
qx = 0.25 * S
qy = (R[0, 1] + R[1, 0]) / S
qz = (R[0, 2] + R[2, 0]) / S
elif R[1, 1] > R[2, 2] :
S = np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2] ) * 2 # S=4*qy
qw = (R[0, 2] - R[2, 0] ) / S
qx = (R[0, 1] + R[1, 0] ) / S
qy = 0.25 * S
qz = (R[1, 2] + R[2, 1] ) / S
else:
S = np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1] ) * 2 # S=4*qz
qw = (R[1, 0] - R[0, 1] ) / S
qx = (R[0, 2] + R[2, 0] ) / S
qy = (R[1, 2] + R[2, 1] ) / S
qz = 0.25 * S
q = np.array([[qw], [qx], [qy], [qz]])
q = q * np.sign(qw)
return q
def RPYtoRot_ZXY(phi, theta, psi):
"""
RPYtoRot_ZXY Converts roll, pitch, yaw to a body-to-world Rotation matrix.
The rotation matrix in this function is world to body [bRw] you will need to transpose this matrix to get the body
to world [wRb] such that [wP] = [wRb] * [bP], where [bP] is a point in the body frame and [wP] is a point in the
world frame written by <NAME>
"""
R = np.array([[np.cos(psi) * np.cos(theta) - np.sin(phi) * np.sin(psi) * np.sin(theta),
np.cos(theta)*np.sin(psi) + np.cos(psi)*np.sin(phi)*np.sin(theta), -np.cos(phi)*np.sin(theta)],
[-np.cos(phi)*np.sin(psi), np.cos(phi)*np.cos(psi), np.sin(phi)],
[np.cos(psi)*np.sin(theta) + np.cos(theta)*np.sin(phi)*np.sin(psi),
np.sin(psi)*np.sin(theta) - np.cos(psi)*np.cos(theta)*np.sin(phi), np.cos(phi)*np.cos(theta)]])
return R
def RotToRPY_ZXY(R):
"""
RotToRPY_ZXY Extract Roll, Pitch, Yaw from a world-to-body Rotation Matrix
The rotation matrix in this function is world to body [bRw] you will need to transpose the matrix if you have a
body to world [wRb] such that [wP] = [wRb] * [bP], where [bP] is a point in the body frame and [wP] is a point in
the world frame written by <NAME>
bRw = [ cos(psi)*cos(theta) - sin(phi)*sin(psi)*sin(theta),
cos(theta)*sin(psi) + cos(psi)*sin(phi)*sin(theta),
-cos(phi)*sin(theta)]
[-cos(phi)*sin(psi), cos(phi)*cos(psi), sin(phi)]
[ cos(psi)*sin(theta) + cos(theta)*sin(phi)*sin(psi),
sin(psi)*sin(theta) - cos(psi)*cos(theta)*sin(phi),
cos(phi)*cos(theta)]
"""
phi = np.arcsin(R[1, 2])
psi = np.arctan2(-R[1, 0] / np.cos(phi), R[1, 1] / np.cos(phi))
theta = np.arctan2(-R[0, 2] / np.cos(phi), R[2, 2] / np.cos(phi))
return phi, theta, psi
def qdToState(qd):
"""
Converts state vector for simulation to qd struct used in hardware.
x is 1 x 13 vector of state variables [pos vel quat omega]
qd is a struct including the fields pos, vel, euler, and omega
"""
x = np.zeros(13) #initialize dimensions
x[0:3] = qd.pos
x[3:6] = qd.vel
Rot = RPYtoRot_ZXY(qd.euler[0], qd.euler[1], qd.euler[2])
quat = RotToQuat(Rot)
x[6:10] = quat
x[11:13] = qd.omega
return x
def stateToQd(x):
"""
Converts qd struct used in hardware to x vector used in simulation
x is 1 x 13 vector of state variables [pos vel quat omega]
qd is a struct including the fields pos, vel, euler, and omega
"""
qd = qd_object()
# current state
qd.pos = x[0:3]
qd.vel = x[3:6]
qd.Rot = QuatToRot(x[6:10])
#print("Rot:\n", Rot)
print("rotmat in my qd struct:\n", qd.Rot)
[phi, theta, yaw] = RotToRPY_ZXY(qd.Rot)
qd.euler = np.array([phi, theta, yaw])
qd.omega = x[10:13]
return qd
def diamond(t):
"""
Desired diamond trajectory
"""
T = 15
if t < 0:
pos = np.array([0, 0, 0])
vel = np.array([0, 0, 0])
acc = np.array([0, 0, 0])
elif t < T / 4:
pos = np.array([0, np.sqrt(2), np.sqrt(2)]) * t / (T / 4)
vel = np.array([0, np.sqrt(2), np.sqrt(2)]) / (T / 4)
acc = np.array([0, 0, 0])
elif t < T / 2:
pos = np.array([0, np.sqrt(2), np.sqrt(2)]) * (2 - 4 * t / T) + np.array([0, 0, 2 * np.sqrt(2)]) * (
4 * t / T - 1)
vel = np.array([0, np.sqrt(2), np.sqrt(2)]) * (-4 / T) + np.array([0, 0, 2 * np.sqrt(2)]) * (4 / T)
acc = np.array([0, 0, 0])
elif t < 3 * T / 4:
pos = np.array([0, 0, 2 * np.sqrt(2)]) * (3 - 4 * t / T) + np.array([0, -np.sqrt(2), np.sqrt(2)]) * (
4 * t / T - 2)
vel = np.array([0, 0, 2 * np.sqrt(2)]) * (-4 / T) + np.array([0, -np.sqrt(2), np.sqrt(2)]) * (4 / T)
acc = np.array([0, 0, 0])
elif t < T:
pos = np.array([0, -np.sqrt(2), np.sqrt(2)]) * (4 - 4 * t / T) + np.array([1, 0, 0.5]) * (4 * t / T - 3)
vel = np.array([0, -np.sqrt(2), np.sqrt(2)]) * (-4 / T) + np.array([1, 0, 0]) * (4 / T)
acc = np.array([0, 0, 0])
else:
pos = np.array([1, 0, 0.5])
vel = np.array([0, 0, 0])
acc = np.array([0, 0, 0])
yaw = 0
yawdot = 0
desired_state = state_object()
desired_state.pos = pos
desired_state.vel = vel
desired_state.acc = acc
desired_state.yaw = yaw
desired_state.yawdot = yawdot
return desired_state
#############################################################
def vee_map(R):
"""
Performs the vee mapping from a rotation matrix to a vector
"""
arr_out = np.zeros(3)
arr_out[0] = -R[1, 2]
arr_out[1] = R[0, 2]
arr_out[2] = -R[0, 1]
return arr_out
def hat_map(a, mode = "torch"):
if mode is "torch":
a_hat = torch.tensor([[0, -a[2], a[1]],
[a[2], 0, -a[0]],
[-a[1], a[0], 0]], device=device, dtype=torch.float32)
else:
a_hat = np.array([[0, -a[2], a[1]],
[a[2], 0, -a[0]],
[-a[1], a[0], 0]])
return a_hat
##############################################################
def plot_states1D(s_traj, s_plan, fig_num=None):
"""
Plot position and velocity with each X, Y, Z dimension on a separate axis
"""
plt.figure(fig_num, figsize=(10,7.5))
ax_px = plt.subplot(421)
ax_py = plt.subplot(423)
ax_pz = plt.subplot(425)
ax_yaw = plt.subplot(427)
ax_vx = plt.subplot(422)
ax_vy = plt.subplot(424)
ax_vz = plt.subplot(426)
ax_w = plt.subplot(428)
ax_px.plot(s_traj[:, -1], s_traj[:, 0])
ax_px.plot(s_plan[:, -1], s_plan[:, 0])
ax_px.set_ylabel('x (m)')
ax_py.plot(s_traj[:, -1], s_traj[:, 1])
ax_py.plot(s_plan[:, -1], s_plan[:, 1])
ax_py.set_ylabel('y (m)')
ax_pz.plot(s_traj[:, -1], s_traj[:, 2])
ax_pz.plot(s_plan[:, -1], s_plan[:, 2])
ax_pz.set_ylabel('z (m)')
ax_vx.plot(s_traj[:, -1], s_traj[:, 3])
ax_vx.plot(s_plan[:, -1], s_plan[:, 3])
ax_vx.set_ylabel('x (m/s)')
ax_vy.plot(s_traj[:, -1], s_traj[:, 4])
ax_vy.plot(s_plan[:, -1], s_plan[:, 4])
ax_vy.set_ylabel('y (m/s)')
ax_vz.plot(s_traj[:, -1], s_traj[:, 5])
ax_vz.plot(s_plan[:, -1], s_plan[:, 5])
ax_vz.set_ylabel('z (m/s)')
ax_yaw.plot(s_traj[:, -1], s_traj[:, 9])
ax_yaw.plot(s_plan[:, -1], s_plan[:, 9])
ax_yaw.set_ylabel('yaw (rad)')
ax_w.plot(s_traj[:, -1], s_traj[:, 10])
| |
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""API for loading and Argoverse 2.0 maps.
These include left and right lane boundaries, instead of only lane centerlines,
as was the case in Argoverse 1.0 and 1.1.
Separate map data (files) is provided for each log/scenario. This local map data represents
map entities that fall within some distance according to l-infinity norm from the trajectory
of the egovehicle (AV).
"""
from __future__ import annotations
import copy
import logging
import math
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Dict, Final, List, Optional, Tuple, Union
import numpy as np
import av2.geometry.interpolate as interp_utils
import av2.utils.dilation_utils as dilation_utils
import av2.utils.io as io_utils
import av2.utils.raster as raster_utils
from av2.geometry.sim2 import Sim2
from av2.map.drivable_area import DrivableArea
from av2.map.lane_segment import LaneSegment
from av2.map.pedestrian_crossing import PedestrianCrossing
from av2.utils.typing import NDArrayBool, NDArrayByte, NDArrayFloat, NDArrayInt
# 1 meter resolution is insufficient for the online-generated drivable area and ROI raster grids
# these grids can be generated at an arbitrary resolution, from vector (polygon) objects.
ONLINE_RASTER_RESOLUTION_M: Final[float] = 0.1 # 10 cm resolution
ONLINE_RASTER_RESOLUTION_SCALE: Final[float] = 1 / ONLINE_RASTER_RESOLUTION_M
GROUND_HEIGHT_THRESHOLD_M: Final[float] = 0.3 # 30 centimeters
ROI_ISOCONTOUR_M: Final[float] = 5.0 # in meters
ROI_ISOCONTOUR_GRID: Final[float] = ROI_ISOCONTOUR_M * ONLINE_RASTER_RESOLUTION_SCALE
WPT_INFINITY_NORM_INTERP_NUM: Final[int] = 50
logger = logging.getLogger(__name__)
class RasterLayerType(str, Enum):
"""Raster layer types."""
ROI = "ROI"
DRIVABLE_AREA = "DRIVABLE_AREA"
GROUND_HEIGHT = "GROUND_HEIGHT"
@dataclass(frozen=True)
class RasterMapLayer:
"""Data sampled at points along a regular grid, and a mapping from city coordinates to grid array coordinates."""
array: Union[NDArrayByte, NDArrayFloat]
array_Sim2_city: Sim2
def get_raster_values_at_coords(
self, points_xyz: NDArrayFloat, fill_value: Union[float, int]
) -> Union[NDArrayFloat, NDArrayInt]:
"""Index into a raster grid and extract values corresponding to city coordinates.
Note: a conversion is required between city coordinates and raster grid coordinates, via Sim(2).
Args:
points_xyz: array of shape (N,2) or (N,3) representing coordinates in the city coordinate frame.
fill_value: float representing default "raster" return value for out-of-bounds queries.
Returns:
raster_values: array of shape (N,) representing raster values at the N query coordinates.
"""
# Note: we do NOT round here, because we need to enforce scaled discretization.
city_coords = points_xyz[:, :2]
npyimage_coords = self.array_Sim2_city.transform_point_cloud(city_coords)
npyimage_coords = npyimage_coords.astype(np.int64)
# out of bounds values will default to the fill value, and will not be indexed into the array.
# index in at (x,y) locations, which are (y,x) in the image
raster_values = np.full((npyimage_coords.shape[0]), fill_value)
# generate boolean array indicating whether the value at each index represents a valid coordinate.
ind_valid_pts = (
(npyimage_coords[:, 1] >= 0)
* (npyimage_coords[:, 1] < self.array.shape[0])
* (npyimage_coords[:, 0] >= 0)
* (npyimage_coords[:, 0] < self.array.shape[1])
)
raster_values[ind_valid_pts] = self.array[npyimage_coords[ind_valid_pts, 1], npyimage_coords[ind_valid_pts, 0]]
return raster_values
@dataclass(frozen=True)
class GroundHeightLayer(RasterMapLayer):
"""Rasterized ground height map layer.
Stores the "ground_height_matrix" and also the array_Sim2_city: Sim(2) that produces takes point in city
coordinates to numpy image/matrix coordinates, e.g. p_npyimage = array_Transformation_city * p_city
"""
@classmethod
def from_file(cls, log_map_dirpath: Path) -> GroundHeightLayer:
"""Load ground height values (w/ values at 30 cm resolution) from .npy file, and associated Sim(2) mapping.
Note: ground height values are stored on disk as a float16 2d-array, but cast to float32 once loaded for
compatibility with matplotlib.
Args:
log_map_dirpath: path to directory which contains map files associated with one specific log/scenario.
Returns:
The ground height map layer.
Raises:
RuntimeError: If raster ground height layer file is missing or Sim(2) mapping from city to image coordinates
is missing.
"""
ground_height_npy_fpaths = sorted(log_map_dirpath.glob("*_ground_height_surface____*.npy"))
if not len(ground_height_npy_fpaths) == 1:
raise RuntimeError("Raster ground height layer file is missing")
Sim2_json_fpaths = sorted(log_map_dirpath.glob("*___img_Sim2_city.json"))
if not len(Sim2_json_fpaths) == 1:
raise RuntimeError("Sim(2) mapping from city to image coordinates is missing")
# load the file with rasterized values
ground_height_array: NDArrayFloat = np.load(ground_height_npy_fpaths[0]) # type: ignore
array_Sim2_city = Sim2.from_json(Sim2_json_fpaths[0])
return cls(array=ground_height_array.astype(np.float32), array_Sim2_city=array_Sim2_city)
def get_ground_points_boolean(self, points_xyz: NDArrayFloat) -> NDArrayBool:
"""Check whether each 3d point is likely to be from the ground surface.
Args:
points_xyz: Numpy array of shape (N,3) representing 3d coordinates of N query locations.
Returns:
Numpy array of shape (N,) where ith entry is True if the 3d point (e.g. a LiDAR return) is likely
located on the ground surface.
Raises:
ValueError: If `points_xyz` aren't 3d.
"""
if points_xyz.shape[1] != 3:
raise ValueError("3-dimensional points must be provided to classify them as `ground` with the map.")
ground_height_values = self.get_ground_height_at_xy(points_xyz)
z = points_xyz[:, 2]
near_ground: NDArrayBool = np.absolute(z - ground_height_values) <= GROUND_HEIGHT_THRESHOLD_M
underground: NDArrayBool = z < ground_height_values
is_ground_boolean_arr: NDArrayBool = near_ground | underground
return is_ground_boolean_arr
def get_rasterized_ground_height(self) -> Tuple[NDArrayFloat, Sim2]:
"""Get ground height matrix along with Sim(2) that maps matrix coordinates to city coordinates.
Returns:
ground_height_matrix:
array_Sim2_city: Sim(2) that produces takes point in city coordinates to image coordinates, e.g.
p_image = image_Transformation_city * p_city
"""
ground_height_matrix: NDArrayFloat = self.array.astype(float)
return ground_height_matrix, self.array_Sim2_city
def get_ground_height_at_xy(self, points_xyz: NDArrayFloat) -> NDArrayFloat:
"""Get ground height for each of the xy locations for all points {(x,y,z)} in a point cloud.
Args:
points_xyz: Numpy array of shape (K,2) or (K,3)
Returns:
Numpy array of shape (K,)
"""
ground_height_values: NDArrayFloat = self.get_raster_values_at_coords(points_xyz, fill_value=np.nan).astype(
float
)
return ground_height_values
@dataclass(frozen=True)
class DrivableAreaMapLayer(RasterMapLayer):
"""Rasterized drivable area map layer.
This provides the "drivable area" as a binary segmentation mask in the bird's eye view.
"""
@classmethod
def from_vector_data(cls, drivable_areas: List[DrivableArea]) -> DrivableAreaMapLayer:
"""Return a drivable area map from vector data.
NOTE: This function provides "drivable area" as a binary segmentation mask in the bird's eye view.
Args:
drivable_areas: List of drivable areas.
Returns:
Driveable area map layer.
"""
# We compute scene boundaries on the fly, based on the vertices of all drivable area polygons.
# These scene boundaries are used to define the raster grid extents.
x_min, y_min, x_max, y_max = compute_data_bounds(drivable_areas)
# The resolution of the rasterization will affect image dimensions.
array_s_city = ONLINE_RASTER_RESOLUTION_SCALE
img_h = int((y_max - y_min + 1) * array_s_city)
img_w = int((x_max - x_min + 1) * array_s_city)
# scale determines the resolution of the raster DA layer.
array_Sim2_city = Sim2(R=np.eye(2), t=np.array([-x_min, -y_min]), s=array_s_city)
# convert vertices for each polygon from a 3d array in city coordinates, to a 2d array
# in image/array coordinates.
da_polygons_img = []
for da_polygon_city in drivable_areas:
da_polygon_img = array_Sim2_city.transform_from(da_polygon_city.xyz[:, :2])
da_polygon_img = np.round(da_polygon_img).astype(np.int32) # type: ignore
da_polygons_img.append(da_polygon_img)
da_mask = raster_utils.get_mask_from_polygons(da_polygons_img, img_h, img_w)
return cls(array=da_mask, array_Sim2_city=array_Sim2_city)
@dataclass(frozen=True)
class RoiMapLayer(RasterMapLayer):
"""Rasterized Region of Interest (RoI) map layer.
This layer provides the "region of interest" as a binary segmentation mask in the bird's eye view.
"""
@classmethod
def from_drivable_area_layer(cls, drivable_area_layer: DrivableAreaMapLayer) -> RoiMapLayer:
"""Rasterize and return 3d vector drivable area as a 2d array, and dilate it by 5 meters, to return a ROI mask.
Args:
drivable_area_layer: Drivable map layer.
Returns:
ROI Layer, containing a (M,N) matrix representing a binary segmentation for the region of interest,
and `array_Sim2_city`, Similarity(2) transformation that transforms point in the city coordinates to
2d array coordinates:
p_array = array_Sim2_city * p_city
"""
# initialize ROI as zero-level isocontour of drivable area, and the dilate to 5-meter isocontour
roi_mat_init: NDArrayByte = copy.deepcopy(drivable_area_layer.array).astype(np.uint8)
roi_mask = dilation_utils.dilate_by_l2(roi_mat_init, dilation_thresh=ROI_ISOCONTOUR_GRID)
return cls(array=roi_mask, array_Sim2_city=drivable_area_layer.array_Sim2_city)
def compute_data_bounds(drivable_areas: List[DrivableArea]) -> Tuple[int, int, int, int]:
"""Find the minimum and maximum coordinates along the x and y axes for a set of drivable areas.
Args:
drivable_areas: list of drivable area objects, defined in the city coordinate frame.
Returns:
xmin: float representing minimum x-coordinate of any vertex of any provided drivable area.
ymin: float representing minimum y-coordinate, as above.
xmax: float representing maximum x-coordinate, as above.
ymax: float representing maximum y-coordinate, as above.
"""
xmin = math.floor(min([da.xyz[:, 0].min() for da in drivable_areas]))
ymin = math.floor(min([da.xyz[:, 1].min() for da in drivable_areas]))
xmax = math.ceil(max([da.xyz[:, 0].max() for da in drivable_areas]))
ymax = math.ceil(max([da.xyz[:, 1].max() for da in drivable_areas]))
return xmin, ymin, xmax, ymax
@dataclass
class ArgoverseStaticMap:
"""API to interact with a local map for a single log (within a single city).
Nodes in the lane graph are lane segments. Edges in the lane graph provided the lane segment connectivity, via
left and right neighbors and successors.
Lane segments are parameterized by 3d waypoints representing their left and right boundaries.
Note: predecessors are implicit and available by reversing the directed graph dictated by successors.
| |
found
"""
if hasattr(dll, 'libvlc_media_list_count'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaList)
paramflags=( (1, ), )
libvlc_media_list_count = prototype( ("libvlc_media_list_count", dll), paramflags )
libvlc_media_list_count.__doc__ = """Get count on media list items
The libvlc_media_list_lock should be held upon entering this function.
\param p_ml a media list instance
\return number of items in media list
"""
if hasattr(dll, 'libvlc_media_list_item_at_index'):
prototype=ctypes.CFUNCTYPE(Media, MediaList, ctypes.c_int)
paramflags=(1,), (1,)
libvlc_media_list_item_at_index = prototype( ("libvlc_media_list_item_at_index", dll), paramflags )
libvlc_media_list_item_at_index.__doc__ = """List media instance in media list at a position
The libvlc_media_list_lock should be held upon entering this function.
\param p_ml a media list instance
\param i_pos position in array where to insert
\return media instance at position i_pos, or NULL if not found.
In case of success, libvlc_media_retain() is called to increase the refcount
on the media.
"""
if hasattr(dll, 'libvlc_media_list_index_of_item'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaList, Media)
paramflags=(1,), (1,)
libvlc_media_list_index_of_item = prototype( ("libvlc_media_list_index_of_item", dll), paramflags )
libvlc_media_list_index_of_item.__doc__ = """Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The libvlc_media_list_lock should be held upon entering this function.
\param p_ml a media list instance
\param p_md media list instance
\return position of media instance
"""
if hasattr(dll, 'libvlc_media_list_is_readonly'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaList)
paramflags=( (1, ), )
libvlc_media_list_is_readonly = prototype( ("libvlc_media_list_is_readonly", dll), paramflags )
libvlc_media_list_is_readonly.__doc__ = """This indicates if this media list is read-only from a user point of view
\param p_ml media list instance
\return 0 on readonly, 1 on readwrite
"""
if hasattr(dll, 'libvlc_media_list_lock'):
prototype=ctypes.CFUNCTYPE(None, MediaList)
paramflags=( (1, ), )
libvlc_media_list_lock = prototype( ("libvlc_media_list_lock", dll), paramflags )
libvlc_media_list_lock.__doc__ = """Get lock on media list items
\param p_ml a media list instance
"""
if hasattr(dll, 'libvlc_media_list_unlock'):
prototype=ctypes.CFUNCTYPE(None, MediaList)
paramflags=( (1, ), )
libvlc_media_list_unlock = prototype( ("libvlc_media_list_unlock", dll), paramflags )
libvlc_media_list_unlock.__doc__ = """Release lock on media list items
The libvlc_media_list_lock should be held upon entering this function.
\param p_ml a media list instance
"""
if hasattr(dll, 'libvlc_media_list_event_manager'):
prototype=ctypes.CFUNCTYPE(EventManager, MediaList)
paramflags=( (1, ), )
libvlc_media_list_event_manager = prototype( ("libvlc_media_list_event_manager", dll), paramflags )
libvlc_media_list_event_manager.__doc__ = """Get libvlc_event_manager from this media list instance.
The p_event_manager is immutable, so you don't have to hold the lock
\param p_ml a media list instance
\return libvlc_event_manager
"""
if hasattr(dll, 'libvlc_media_list_player_new'):
prototype=ctypes.CFUNCTYPE(MediaListPlayer, Instance)
paramflags=( (1, ), )
libvlc_media_list_player_new = prototype( ("libvlc_media_list_player_new", dll), paramflags )
libvlc_media_list_player_new.__doc__ = """Create new media_list_player.
\param p_instance libvlc instance
\return media list player instance or NULL on error
"""
if hasattr(dll, 'libvlc_media_list_player_release'):
prototype=ctypes.CFUNCTYPE(None, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_release = prototype( ("libvlc_media_list_player_release", dll), paramflags )
libvlc_media_list_player_release.__doc__ = """Release media_list_player.
\param p_mlp media list player instance
"""
if hasattr(dll, 'libvlc_media_list_player_event_manager'):
prototype=ctypes.CFUNCTYPE(EventManager, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_event_manager = prototype( ("libvlc_media_list_player_event_manager", dll), paramflags )
libvlc_media_list_player_event_manager.__doc__ = """Return the event manager of this media_list_player.
\param p_mlp media list player instance
\return the event manager
"""
if hasattr(dll, 'libvlc_media_list_player_set_media_player'):
prototype=ctypes.CFUNCTYPE(None, MediaListPlayer, MediaPlayer)
paramflags=(1,), (1,)
libvlc_media_list_player_set_media_player = prototype( ("libvlc_media_list_player_set_media_player", dll), paramflags )
libvlc_media_list_player_set_media_player.__doc__ = """Replace media player in media_list_player with this instance.
\param p_mlp media list player instance
\param p_mi media player instance
"""
if hasattr(dll, 'libvlc_media_list_player_set_media_list'):
prototype=ctypes.CFUNCTYPE(None, MediaListPlayer, MediaList)
paramflags=(1,), (1,)
libvlc_media_list_player_set_media_list = prototype( ("libvlc_media_list_player_set_media_list", dll), paramflags )
libvlc_media_list_player_set_media_list.__doc__ = """Set the media list associated with the player
\param p_mlp media list player instance
\param p_mlist list of media
"""
if hasattr(dll, 'libvlc_media_list_player_play'):
prototype=ctypes.CFUNCTYPE(None, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_play = prototype( ("libvlc_media_list_player_play", dll), paramflags )
libvlc_media_list_player_play.__doc__ = """Play media list
\param p_mlp media list player instance
"""
if hasattr(dll, 'libvlc_media_list_player_pause'):
prototype=ctypes.CFUNCTYPE(None, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_pause = prototype( ("libvlc_media_list_player_pause", dll), paramflags )
libvlc_media_list_player_pause.__doc__ = """Pause media list
\param p_mlp media list player instance
"""
if hasattr(dll, 'libvlc_media_list_player_is_playing'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_is_playing = prototype( ("libvlc_media_list_player_is_playing", dll), paramflags )
libvlc_media_list_player_is_playing.__doc__ = """Is media list playing?
\param p_mlp media list player instance
\return true for playing and false for not playing
"""
if hasattr(dll, 'libvlc_media_list_player_get_state'):
prototype=ctypes.CFUNCTYPE(State, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_get_state = prototype( ("libvlc_media_list_player_get_state", dll), paramflags )
libvlc_media_list_player_get_state.__doc__ = """Get current libvlc_state of media list player
\param p_mlp media list player instance
\return libvlc_state_t for media list player
"""
if hasattr(dll, 'libvlc_media_list_player_play_item_at_index'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaListPlayer, ctypes.c_int)
paramflags=(1,), (1,)
libvlc_media_list_player_play_item_at_index = prototype( ("libvlc_media_list_player_play_item_at_index", dll), paramflags )
libvlc_media_list_player_play_item_at_index.__doc__ = """Play media list item at position index
\param p_mlp media list player instance
\param i_index index in media list to play
\return 0 upon success -1 if the item wasn't found
"""
if hasattr(dll, 'libvlc_media_list_player_play_item'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaListPlayer, Media)
paramflags=(1,), (1,)
libvlc_media_list_player_play_item = prototype( ("libvlc_media_list_player_play_item", dll), paramflags )
libvlc_media_list_player_play_item.__doc__ = """Play the given media item
\param p_mlp media list player instance
\param p_md the media instance
\return 0 upon success, -1 if the media is not part of the media list
"""
if hasattr(dll, 'libvlc_media_list_player_stop'):
prototype=ctypes.CFUNCTYPE(None, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_stop = prototype( ("libvlc_media_list_player_stop", dll), paramflags )
libvlc_media_list_player_stop.__doc__ = """Stop playing media list
\param p_mlp media list player instance
"""
if hasattr(dll, 'libvlc_media_list_player_next'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_next = prototype( ("libvlc_media_list_player_next", dll), paramflags )
libvlc_media_list_player_next.__doc__ = """Play next item from media list
\param p_mlp media list player instance
\return 0 upon success -1 if there is no next item
"""
if hasattr(dll, 'libvlc_media_list_player_previous'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaListPlayer)
paramflags=( (1, ), )
libvlc_media_list_player_previous = prototype( ("libvlc_media_list_player_previous", dll), paramflags )
libvlc_media_list_player_previous.__doc__ = """Play previous item from media list
\param p_mlp media list player instance
\return 0 upon success -1 if there is no previous item
"""
if hasattr(dll, 'libvlc_media_list_player_set_playback_mode'):
prototype=ctypes.CFUNCTYPE(None, MediaListPlayer, PlaybackMode)
paramflags=(1,), (1,)
libvlc_media_list_player_set_playback_mode = prototype( ("libvlc_media_list_player_set_playback_mode", dll), paramflags )
libvlc_media_list_player_set_playback_mode.__doc__ = """Sets the playback mode for the playlist
\param p_mlp media list player instance
\param e_mode playback mode specification
"""
if hasattr(dll, 'libvlc_media_player_new'):
prototype=ctypes.CFUNCTYPE(MediaPlayer, Instance)
paramflags=( (1, ), )
libvlc_media_player_new = prototype( ("libvlc_media_player_new", dll), paramflags )
libvlc_media_player_new.__doc__ = """Create an empty Media Player object
\param p_libvlc_instance the libvlc instance in which the Media Player
should be created.
\return a new media player object, or NULL on error.
"""
if hasattr(dll, 'libvlc_media_player_new_from_media'):
prototype=ctypes.CFUNCTYPE(MediaPlayer, Media)
paramflags=( (1, ), )
libvlc_media_player_new_from_media = prototype( ("libvlc_media_player_new_from_media", dll), paramflags )
libvlc_media_player_new_from_media.__doc__ = """Create a Media Player object from a Media
\param p_md the media. Afterwards the p_md can be safely
destroyed.
\return a new media player object, or NULL on error.
"""
if hasattr(dll, 'libvlc_media_player_release'):
prototype=ctypes.CFUNCTYPE(None, MediaPlayer)
paramflags=( (1, ), )
libvlc_media_player_release = prototype( ("libvlc_media_player_release", dll), paramflags )
libvlc_media_player_release.__doc__ = """Release a media_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then libvlc_media_player_release() will
release the media player object. If the media player object
has been released, then it should not be used again.
\param p_mi the Media Player to free
"""
if hasattr(dll, 'libvlc_media_player_retain'):
prototype=ctypes.CFUNCTYPE(None, MediaPlayer)
paramflags=( (1, ), )
libvlc_media_player_retain = prototype( ("libvlc_media_player_retain", dll), paramflags )
libvlc_media_player_retain.__doc__ = """Retain a reference to a media player object. Use
libvlc_media_player_release() to decrement reference count.
\param p_mi media player object
"""
if hasattr(dll, 'libvlc_media_player_set_media'):
prototype=ctypes.CFUNCTYPE(None, MediaPlayer, Media)
paramflags=(1,), (1,)
libvlc_media_player_set_media = prototype( ("libvlc_media_player_set_media", dll), paramflags )
libvlc_media_player_set_media.__doc__ = """Set the media that will be used by the media_player. If any,
previous md will be released.
\param p_mi the Media Player
\param p_md the Media. Afterwards the p_md can be safely
destroyed.
"""
if hasattr(dll, 'libvlc_media_player_get_media'):
prototype=ctypes.CFUNCTYPE(Media, MediaPlayer)
paramflags=( (1, ), )
libvlc_media_player_get_media = prototype( ("libvlc_media_player_get_media", dll), paramflags )
libvlc_media_player_get_media.__doc__ = """Get the media used by the media_player.
\param p_mi the Media Player
\return the media associated with p_mi, or NULL if no
media is associated
"""
if hasattr(dll, 'libvlc_media_player_event_manager'):
prototype=ctypes.CFUNCTYPE(EventManager, MediaPlayer)
paramflags=( (1, ), )
libvlc_media_player_event_manager = prototype( ("libvlc_media_player_event_manager", dll), paramflags )
libvlc_media_player_event_manager.__doc__ = """Get the Event Manager from which the media player send event.
\param p_mi the Media Player
\return the event manager associated with p_mi
"""
if hasattr(dll, 'libvlc_media_player_is_playing'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaPlayer)
paramflags=( (1, ), )
libvlc_media_player_is_playing = prototype( ("libvlc_media_player_is_playing", dll), paramflags )
libvlc_media_player_is_playing.__doc__ = """is_playing
\param p_mi the Media Player
\return 1 if the media player is playing, 0 otherwise
"""
if hasattr(dll, 'libvlc_media_player_play'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaPlayer)
paramflags=( (1, ), )
libvlc_media_player_play = prototype( ("libvlc_media_player_play", dll), paramflags )
libvlc_media_player_play.__doc__ = """Play
\param p_mi the Media Player
\return 0 if playback started (and was already started), or -1 on error.
"""
if hasattr(dll, 'libvlc_media_player_set_pause'):
prototype=ctypes.CFUNCTYPE(None, MediaPlayer, ctypes.c_int)
paramflags=(1,), (1,)
libvlc_media_player_set_pause = prototype( ("libvlc_media_player_set_pause", dll), paramflags )
libvlc_media_player_set_pause.__doc__ = """Pause or resume (no effect if there is no media)
\param mp the Media Player
\param do_pause play/resume if zero, pause if non-zero
\version LibVLC 1.1.1 or later
"""
if hasattr(dll, 'libvlc_media_player_pause'):
prototype=ctypes.CFUNCTYPE(None, MediaPlayer)
paramflags=( (1, ), )
libvlc_media_player_pause = prototype( ("libvlc_media_player_pause", dll), paramflags )
libvlc_media_player_pause.__doc__ = """Toggle pause (no effect if there is no media)
\param p_mi the Media Player
"""
if hasattr(dll, 'libvlc_media_player_stop'):
prototype=ctypes.CFUNCTYPE(None, MediaPlayer)
paramflags=( (1, ), )
libvlc_media_player_stop = prototype( ("libvlc_media_player_stop", dll), paramflags )
libvlc_media_player_stop.__doc__ = """Stop (no effect if there is no media)
\param p_mi the Media Player
"""
if hasattr(dll, 'libvlc_video_set_format'):
prototype=ctypes.CFUNCTYPE(None, | |
# Library for the dynamics of a lumen network
# The lumen are 2 dimensional and symmetric and connected with 1 dimensional tubes
#
# Created by <NAME>, 2018
# Modified by <NAME>--Serandour on 8/04/2019
"""
network.py conf.init
Defines the class network and associated functions
Imports
-------
Libraries : numpy, os, math
Created by <NAME>
Modified by <NAME> on 8/06/2018
Modified by <NAME>--Serandour on 8/04/2019
"""
import numpy as np
import math
import os
class network:
def __init__(self, network_folder, out_path, t_step, tube_radius = 0.01, friction = 1, swelling = False, swelling_rate=0., save_area_dat=False):
"""
Initialization of the object network
All properties needed for the simulation are read and initialized
Input
-----
network_folder : str
out_path : str, path-like
t_step : float
Time step of the simulation. Note that if the simulation is adaptative, this time step will change.
tube_radius : float, optional, default = 0.01
Radius of the tube connecting lumens. Define the condition for empty lumens.
friction : float, optional, default = 1
Friction constant for the fluid circulating through pipes.
swelling : bool, optional, default = False
Swelling option for the simulation. True if swelling is included, False otherwise.
swelling_rate : float, optional, default = 0.
Swelling rate value in case the swelling is considered. Make sure the rate is not to big to avoid non-converging simulations.
save_area_dat : bool, optional, default = False
Save area option. True if areas are saved in area.dat, False otherwise.
"""
self.network_folder = network_folder
# Reading properties of the lumen
self.gamma_lumen, self.gamma_contact, self.area = np.loadtxt(os.path.join(network_folder, 'lumen.dat'), dtype = float, usecols = [0,2,3], unpack = True)
# Reading links between two lumen
self.lumen_lumen = self.read_lumen_lumen(os.path.join(network_folder, 'lumen_lumen.dat'))
# Reading links between bridge and lumen
self.bridge_lumen, self.num_bridges = self.read_bridge_lumen(os.path.join(network_folder, 'bridge_lumen.dat'))
# Reading links between two bridges
self.bridge_bridge, self.num_bridges = self.read_bridge_bridge(os.path.join(network_folder, 'bridge_bridge.dat'), self.num_bridges)
# Surface tension ratio
self.alpha = self.gamma_contact/(2*self.gamma_lumen)
self.delta = np.full(len(self.alpha), 1) # Possibility of asymmetric lumen is not included
# Resistances
self.tube_radius = tube_radius # Radius of the tube connecting the lumen and the bridges
self.friction = friction # Friction coefficient; friction * length = resistance
# Opening angle of the lumen (angle between curvature and tube)
self.theta = self.set_theta()
# Area factor for expressing the pressure in terms of the area instead of the radius
self.area_factor = self.set_area_factor()
# Ending time: time at which only one lumen is remaining
self.end_time = 0
# Time step for the output of the area evolution
self.time_step = t_step
# Creating output file for the area evolution, events, error messages
self.save_area(start = True, out_path = out_path)
self.save_event('', start = True, out_path = out_path)
self.save_error('', start = True, out_path = out_path)
# Area distribution after only one lumen is remaining
self.final_area = []
# Current time step of the simulation
self.current_time = 0
# List of empty lumen (area < tube_radius **2)
self.empty_list = np.zeros(len(self.alpha))
# Swelling
self.swelling_bool = swelling
self.swelling_rate = swelling_rate
# Save area
self.save_area_dat = save_area_dat
############################################################################################################################
########################################################## Dynamics ########################################################
############################################################################################################################
def flux(self, t, state):
"""
Determines the flux/ area change for each lumen of the network, main function of network.py
Input
-----
self : network object
Needs to be called by a class object
t : float
Actual time step (not needed for the calculation of the flux, but required for the used integration method in network_simulation.py
state : float array
The current area of the lumens
Returns
-------
flux : float array
Contains the area change for each lumen in dt
"""
# Initialization of the array containing the area change (index == lumen ID)
flux = []
self.current_time = t
for i in range(len(self.alpha)):
flux.append(0)
# If only one lumen remains -> End of simulation, flux is zero (needed as for the integration method used, no dynamic stop is possible)
if(np.sum(self.empty_list) >= len(self.alpha) - 1):
if(self.end_time == 0):
# Setting the end time for the output file area.log
self.end_time = t
# more than one lumen remaining: calculation of the flux
else:
# Adapting network to new state: Empty lumen are removed and graph is reconnected
self.area = state
self.remove_empty_lumen()
# Area change between directly connected lumen
flux = self.flux_lumen(flux)
# Calculating artificial pressure at each bridge; linear system of equations, with flux(bridge) = 0, the bridge does not gain or loose area
pressure_bridges = self.pressure_bridges()
# Area change between lumen-bridges
flux = self.flux_bridges(flux, pressure_bridges)
# Area change due to swelling
if self.swelling_bool :
flux = self.flux_swelling(flux)
# Saving area for the time step given in the configuration file
if self.save_area_dat :
self.save_area()
self.t_old = t
if(np.abs(np.sum(flux)) > self.tube_radius ** 2):
error = 'total flux is non-zero: total flux = %f' % (np.sum(flux))
self.save_error(error)
return flux
def flux_lumen(self,flux):
"""
Determines the flux/ area change for each lumen due to the connection between lumen and lumen
Input
-----
self network object
needs to be called by a class object
flux float array
vector containing the area change for each lumen; index = lumen ID
Returns
-------
flux float array
area changes due to lumen-lumen connection added to the vector passed
"""
# for each connection between two lumen
for line in range(len(self.lumen_lumen)):
lumen_1 = int (self.lumen_lumen[line][0]) # first lumen
lumen_2 = int (self.lumen_lumen[line][1]) # second lumen
# flux from lumen 2 to lumen 1
fl = (self.pressure(lumen_2) - self.pressure(lumen_1))*self.friction/self.lumen_lumen[line][2]
flux[lumen_1] += fl
flux[lumen_2] -= fl
return flux
def pressure_bridges(self):
"""
Determines the pressure at each bridge
for each bridge the total flux is 0, meaning that the bridge does not gain or loose area
this gives a linear equation system, which can be solved
The connections are taken from the files bridge_lumen.dat and bridge_bridge.dat
For Information about the equations see the documentation to the code
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
pressure_bridges : float array
Pressure at each bridge
"""
R_sum = np.zeros(self.num_bridges, dtype = float) # sum of the resistences around one bridge
P_over_R_sum = np.zeros(self.num_bridges, dtype = float) # sum of pressure over resistance between one bridge and all directly connected lumen
matrix_bridges = np.zeros([self.num_bridges, self.num_bridges], dtype= float) # matrix to calculate the pressure at each bridge
# For each connection between bridge and lumen
for line in self.bridge_lumen:
bridge = int(line[0])
lumen = int(line[1])
R_sum[bridge] += 1./line[2]*self.friction
P_over_R_sum[bridge] += self.pressure(lumen)/line[2]*self.friction
# For each connection between bridge and bridge
for line in self.bridge_bridge:
bridge1 = int(line[0])
bridge2 = int(line[1])
matrix_bridges[bridge1][bridge2] = 1./line[2]*self.friction
matrix_bridges[bridge2][bridge1] = 1./line[2]*self.friction
R_sum[bridge1] += 1./line[2]*self.friction
R_sum[bridge2] += 1./line[2]*self.friction
for line in range(self.num_bridges):
matrix_bridges[line][line] = -R_sum[line]
# Solving linear problem with the pressure at each bridge as solution
pressure_bridges = np.linalg.solve(matrix_bridges, -P_over_R_sum)
return pressure_bridges;
def flux_bridges(self, flux, pressure_bridges):
"""
Determines the flux/ area change for each lumen due to the connection between lumen and bridge
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
flux : float array
Area changes due to bridge-lumen connection added to the vector passed
"""
# Area change in one bridge; should be 0; calculated as control value
flux_bridge = np.zeros(self.num_bridges, dtype = float)
# For each connection between bridge and bridge
for line in self.bridge_bridge:
bridge1 = int(line[0])
bridge2 = int(line[1])
fb = (pressure_bridges[bridge2] - pressure_bridges[bridge1])*self.friction/line[2]
flux_bridge[bridge1] += fb
flux_bridge[bridge2] -= fb
# For each connection between bridge and lumen
for line in self.bridge_lumen:
bridge = int(line[0])
lumen = int(line[1])
fl = (pressure_bridges[bridge] - self.pressure(lumen))*self.friction/line[2]
flux[lumen] += fl
flux_bridge[bridge] -= fl
for i in range(len(flux_bridge)):
if (np.abs(flux_bridge[i]) > self.tube_radius ** 2):
error = 'total flux of bridge %d is non-zero: total flux = %f' % (i,flux_bridge[i])
self.save_error(error)
return flux
def flux_swelling(self, flux) :
"""
| |
type 'int' or 'float'")
else:
return (-a)
# mulinv() - returns the multiplicative inverse of the given input
# input - an integer other than zero
# output - multiplicative inverse of the given input
def mulinv(a):
if (isinstance(a,int) or isinstance(a,float))==False:
raise TypeError("Invalid inputs for type 'int' or 'float'")
else:
return (1/a)
# isprime() - checks whether the given input is prime or not
# input - an integer
# output - True/False. Returns True if the given number is prime. False otherwise
def isprime(a):
if isinstance(a,int)==False:
raise TypeError("Invalid inputs for type 'int'")
else:
if a==0 or a==1:
return False
for i in range(2,a):
if a%i==0:
return False
else:
return True
# iscomposite() - checks whether the given input is composite or not
# input - an integer
# output - True/False. Returns True if the given composite is even. False otherwise
def iscomposite(a):
if isinstance(a,int)==False:
raise TypeError("Invalid inputs for type 'int'")
else:
if a==0 or a==1:
return False
for i in range(2,a):
if a%i==0:
return True
else:
return False
# prime() - returns the sequence of prime numbers till the upper limit
# input - an integer(upper limit)
# output - a sequence of prime numbers till the upper limit
def prime(a):
if isinstance(a,int)==False:
raise TypeError("Invalid inputs for type 'int'")
else:
p=[]
for i in range(2,a):
for j in range(2,i):
if i%j==0:
break
else:
p.append(i)
return p
# composite() - returns the sequence of composite numbers till the upper limit
# input - an integer(upper limit)
# output - a sequence of composite numbers till the upper limit
def composite(a):
if isinstance(a,int)==False:
raise TypeError("Invalid inputs for type 'int'")
else:
p,u=[],{x for x in range(2,a+1)}
for i in range(2,a):
for j in range(2,i):
if i%j==0:
break
else:
p.append(i)
return list(u-set(p))
# perimeter_sq() - gives the perimeter of the square
# input - an integer(side of the square)
# output - perimeter using the given side
def perimeter_sq(s):
if (isinstance(s,int) or isinstance(s,float))==False:
raise TypeError("Invalid inputs for type 'int' or 'float'")
else:
if s<0:
raise ValueError('Inputs for measurement should be positive')
else:
return 4*s
# perimeter_rec() - gives the perimeter of the rectangle
# inputs - two integers(length and breadth of the rectangle)
# output - perimeter using the given length and breadth
def perimeter_rec(l,b):
a=[]
a.extend([l,b])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if l<0 or b<0:
raise ValueError('Inputs for measurement should be positive')
else:
return 2*(l+b)
# perimeter_parallelogram() - gives the perimeter of the parallelogram
# inputs - two integers(length and width of the rectangle)
# output - perimeter using the given length and width
def perimeter_parallelogram(l,w):
a=[]
a.extend([l,w])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (l<0 or w<0):
raise ValueError('Inputs for measurement should be positive')
else:
return 2*(l+w)
# perimeter_tri() - gives the perimeter of the triangle
# inputs - three integers(sides of the triangle)
# output - perimeter using the given sides
def perimeter_tri(a,b,c):
d=[]
d.extend([a,b,c])
for i in d:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (a<0 or b<0) or c<0:
raise ValueError('Inputs for measurement should be positive')
else:
return a+b+c
# perimeter_trapezium() - gives the perimeter of the trapezium
# inputs - three integers(height, base1 and base2 of trapezium)
# output - perimeter using the given height, base1 and base2
def perimeter_trapezium(a,b,c,d):
a1=[]
a1.extend([a,b,c,d])
for i in a1:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (a<0 or b<0) or (c<0 or d<0):
raise ValueError('Inputs for measurement should be positive')
else:
return a+b+c+d
# perimeter_cir() - gives the perimeter of the circle
# inputs - an integer(radius of the circle)
# output - perimeter using the given circle
def perimeter_cir(r):
if (isinstance(r,int) or isinstance(r,float))==False:
raise TypeError("Invalid inputs for type 'int' or 'float'")
else:
if r<0:
raise ValueError('Inputs for measurement should be positive')
else:
pi=22/7
return 2*pi*r
# area_tri() - gives the area of the triangle
# inputs - two integers(base and height of the triangle)
# output - area using the given base and height
def area_tri(b,h):
a=[]
a.extend([b,h])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (b<0 or h<0):
raise ValueError('Inputs for measurement should be positive')
else:
return 0.5*b*h
# area_sq() - gives the area of the square
# inputs - an integer(side of the square)
# output - area using the given side
def area_sq(a):
if (isinstance(a,int) or isinstance(a,float))==False:
raise TypeError("Invalid inputs for type 'int' or 'float'")
else:
if a<0:
raise ValueError('Inputs for measurement should be positive')
else:
return a**2
# area_rec() - gives the area of the rectangle
# inputs - two integers(length and breadth of the rectangle)
# output - area using the given length and breadth
def area_rec(l,b):
a=[]
a.extend([l,b])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (l<0 or b<0):
raise ValueError('Inputs for measurement should be positive')
else:
return l*b
# area_cir() - gives the area of the circle
# inputs - an integer(radius of the circle)
# output - area using the given radius
def area_cir(r):
if (isinstance(r,int) or isinstance(r,float))==False:
raise TypeError("Invalid inputs for type 'int' or 'float'")
else:
if r<0:
raise ValueError('Inputs for measurement should be positive')
else:
pi=22/7
return pi*(r**2)
# area_sector() - gives the area of the sector
# inputs - two integers(theta(angle) and radius of the sector)
# output - area using the given angle and radius
def area_sector(theta,r):
a=[]
a.extend([theta,r])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (theta<0 or r<0):
raise ValueError('Inputs for measurement should be positive')
else:
pi=22/7
t=theta/360
return pi*(r**2)*t
# area_parallelogram() - gives the area of the parallelogram
# inputs - two integers(base and width of the parallelogram)
# output - area using the given base and width
def area_parallelogram(b,w):
a=[]
a.extend([b,w])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (b<0 or w<0):
raise ValueError('Inputs for measurement should be positive')
else:
return b*w
# area_trapezium() - gives the area of the trapezium
# inputs - three integers(height, base1 and base2 of the trapezium)
# output - area using the given height, base1 and base2
def area_trapezium(h,b1,b2):
a=[]
a.extend([h,b1,b2])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (h<0 or b1<0) or b2<0:
raise ValueError('Inputs for measurement should be positive')
else:
t=b1+b2
return h*(t/2)
# area_ellipse() - gives the area of the ellipse
# inputs - two integers(radius1 and radius2 of the ellipse)
# output - area using the given radius1 and radius2
def area_ellipse(a,b):
d=[]
d.extend([a,b])
for i in d:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (a<0 or b<0):
raise ValueError('Inputs for measurement should be positive')
else:
pi=22/7
return pi*a*b
# area_cube() - gives the area of the cube
# inputs - an integer(side of the cube)
# output - area using the given side
def area_cube(a):
if (isinstance(a,int) or isinstance(a,float))==False:
raise TypeError("Invalid inputs for type 'int' or 'float'")
else:
if a<0:
raise ValueError('Inputs for measurement should be positive')
else:
return 6*(a**2)
# area_rectangular_prism() - gives the area of the rectangular_prism
# inputs - three integers(width, length and height of the rectangular_prism)
# output - area using the given width, length and height
def area_rectangular_prism(w,l,h):
a=[]
a.extend([w,l,h])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (w<0 or l<0) or h<0:
raise ValueError('Inputs for measurement should be positive')
else:
return 2*((w*l)+(h*l)+(h*w))
# area_torus() - gives the area of the torus
# inputs - two integers(radius1 and radius2 of the torus)
# output - area using the given radius1 and radius2
def area_torus(r1,r2):
a=[]
a.extend([r1,r2])
for i in a:
if isinstance(i,int) or isinstance(i,float):
break
else:
raise TypeError("Invalid inputs for type 'int' or 'float'")
if (r1<0 or r2<0):
raise ValueError('Inputs for measurement should be positive')
else:
pi=22/7
return (pi**2)*((r2**2)-(r1**2))
# vol_cube() - gives the volume of the cube
# inputs - an integer(side of a cube)
# output - area using the | |
+= r / gamma
return np.reciprocal(np.sqrt(virtual_distance))
def reverse_beamspread_2d_for_path(ray_geometry):
"""
Reverse beamspread for a path.
Uses the same angles as in beamspread_2d_for_path for consistency.
For a ray (i, ..., j), the reverse beamspread is obtained by considering the point
j is the source and i is the endpoint. The direct beamspread considers this is the
opposite.
This gives the same result as beamspread_2d_for_path(reversed_ray_geometry)
assuming the rays perfectly follow Snell laws. Because of errors in the ray tracing,
there is a small difference.
Parameters
----------
ray_geometry : arim.ray.RayGeometry
Returns
-------
rev_beamspread : ndarray
Shape: (numelements, numgridpoints)
"""
velocities = ray_geometry.rays.fermat_path.velocities
# import pdb; pdb.set_trace()
# Using notations from forward model, this function computes the beamspread at A_n
# where n = ray_geometry.numinterfaces - 1
# Case n=0: undefined
# Case n=1: beamspread = 1/sqrt(r)
# Precompute gamma (coefficient of conversion between actual source
# and virtual source)
n = ray_geometry.numinterfaces - 1
gamma_list = []
for k in range(1, n):
# k varies in [1, n-1] (included)
theta_out = ray_geometry.conventional_inc_angle(n - k)
nu = velocities[n - k] / velocities[n - k - 1]
sin_theta = np.sin(theta_out)
cos_theta = np.cos(theta_out)
# gamma expressed with theta_out instead of theta_in
gamma_list.append(
(nu * cos_theta * cos_theta) / (1 - nu * nu * sin_theta * sin_theta)
)
# Between the probe and the first interface, beamspread of an unbounded medium.
# Use a copy because the original may be a cached value and we don'ray want
# to change it by accident.
virtual_distance = ray_geometry.inc_leg_size(n).copy()
for k in range(1, n):
# distance A_k A_{k+1}:
r = ray_geometry.inc_leg_size(n - k)
gamma = 1.0
for i in range(k):
gamma *= gamma_list[i]
virtual_distance += r / gamma
return np.reciprocal(np.sqrt(virtual_distance))
def material_attenuation_for_path(path, ray_geometry, frequency):
r"""
Return material attenuation for each ray (between 0 and 1)
.. math::
M(\omega) = \exp(- \sum_i a_i(\omega) d_i)
If no attenuation is provided, ignore silently.
Reference: Schmerr chapter 9
Parameters
----------
path : Path
ray_geometry : arim.ray.RayGeometry
Returns
-------
attenuation : ndarray
Shape: (numelements, numgridpoints)
"""
log_att = np.zeros(
((path.interfaces[0].points.numpoints, path.interfaces[-1].points.numpoints))
)
for k, (material, mode) in enumerate(zip(path.materials, path.modes), start=1):
att_obj = material.attenuation(mode)
if att_obj is None:
continue
else:
att_coeff = att_obj(frequency)
log_att -= att_coeff * ray_geometry.inc_leg_size(k)
return np.exp(log_att)
def _nested_dict_to_flat_list(dictlike):
if dictlike is None:
return []
else:
try:
values = dictlike.values()
except AttributeError:
# dictlike is a leaf:
return [dictlike]
# dictlike is not a leaf:
all_values = []
for value in values:
# union of sets:
all_values = _nested_dict_to_flat_list(value)
return all_values
class RayWeights(
namedtuple(
"RayWeights",
[
"tx_ray_weights_dict",
"rx_ray_weights_dict",
"tx_ray_weights_debug_dict",
"rx_ray_weights_debug_dict",
"scattering_angles_dict",
],
)
):
"""
Data container for ray weights.
Attributes
----------
tx_ray_weights_dict : dict[arim.Path, ndarray]
Each value has a shape of (numelements, numgridpoints)
rx_ray_weights_dict : dict[arim.Path, ndarray]
Each value has a shape of (numelements, numgridpoints)
tx_ray_weights_debug_dict : dict
See function tx_ray_weights
rx_ray_weights_debug_dict : dict
See function rx_ray_weights
scattering_angles_dict : dict[arim.Path, ndarray]
Each value has a shape of (numelements, numgridpoints)
"""
@property
def nbytes(self):
all_arrays = []
all_arrays += _nested_dict_to_flat_list(self.tx_ray_weights_dict)
all_arrays += _nested_dict_to_flat_list(self.rx_ray_weights_dict)
all_arrays += _nested_dict_to_flat_list(self.tx_ray_weights_debug_dict)
all_arrays += _nested_dict_to_flat_list(self.rx_ray_weights_debug_dict)
all_arrays += _nested_dict_to_flat_list(self.scattering_angles_dict)
# an array is not hashable so we cheat a bit to get unique arrays
unique_ids = set(id(x) for x in all_arrays)
nbytes = 0
for arr in all_arrays:
if id(arr) in unique_ids:
nbytes += arr.nbytes
unique_ids.remove(id(arr))
return nbytes
def model_amplitudes_factory(tx, rx, view, ray_weights, scattering, scat_angle=0.0):
"""
Calculates the model coefficients once the ray weights are known.
The effective scattering is ``scattering(inc_theta - scat_angle, out_theta - scat_angle)``
Parameters
----------
tx : ndarray
rx : ndarray
view : View
ray_weights : RayWeights
scattering : dict
Dict of functions (slow but precise) or matrices(fast but precision depends on
the angle sampling).
scat_angle : float
Returns
------
model_amplitudes : ModelAmplitudes
Object that is indexable with a grid point index or a slice of grid points. The
values are computed on the fly.
can be indexe as an array but that computes the
Function that returns the model amplitudes and takes as argument a slice.
ndarray
Shape: (blocksize, numtimetraces)
Yield until all grid points are processed.
Examples
--------
>>> model_amplitudes = model_amplitudes_factory(tx, rx, view, ray_weights, scattering)
>>> model_amplitudes[0]
# returns the 'numtimetraces' amplitudes at the grid point 0
>>> model_amplitudes[:10] # returns the amplitudes for the first 10 grid points
array([ 0.27764253, 0.78863332, 0.83998295, 0.96811351, 0.57929045, 0.00935137, 0.8905348 , 0.46976061, 0.08101099, 0.57615469])
>>> model_amplitudes[...] # returns the amplitudes for all points. Warning: you may
... # run out of memory!
array([...])
"""
# Pick the right scattering matrix/function.
# scat_key is LL, LT, TL or TT
scattering_obj = scattering[view.scat_key()]
try:
scattering_obj.shape
except AttributeError:
is_scattering_func = True
else:
is_scattering_func = False
tx_ray_weights = ray_weights.tx_ray_weights_dict[view.tx_path]
rx_ray_weights = ray_weights.rx_ray_weights_dict[view.rx_path]
tx_scattering_angles = ray_weights.scattering_angles_dict[view.tx_path]
rx_scattering_angles = ray_weights.scattering_angles_dict[view.rx_path]
assert (
tx_ray_weights.shape
== rx_ray_weights.shape
== tx_scattering_angles.shape
== rx_scattering_angles.shape
)
# the great transposition
tx_ray_weights = tx_ray_weights.T
rx_ray_weights = rx_ray_weights.T
tx_scattering_angles = tx_scattering_angles.T
rx_scattering_angles = rx_scattering_angles.T
if is_scattering_func:
return _ModelAmplitudesWithScatFunction(
tx,
rx,
scattering_obj,
tx_ray_weights,
rx_ray_weights,
tx_scattering_angles,
rx_scattering_angles,
scat_angle,
)
else:
return _ModelAmplitudesWithScatMatrix(
tx,
rx,
scattering_obj,
tx_ray_weights,
rx_ray_weights,
tx_scattering_angles,
rx_scattering_angles,
scat_angle,
)
class ModelAmplitudes(abc.ABC):
"""
Class for on-the-fly calculation of model amplitudes.
Pseudo-array of coefficients P_ij = Q_i Q'_j S_ij. Shape: (numpoints, numtimetraces)
This object can be indexed almost like a regular Numpy array.
When indexed, the values are computed on the fly.
Otherwise an array of this size would be too large.
.. warning::
Only the first dimension must be indexed. See examples below.
Examples
--------
>>> model_amplitudes = model_amplitudes_factory(tx, rx, view, ray_weights, scattering_dict)
This object is not an array:
>>> type(model_amplitudes)
__main__.ModelAmplitudes
But when indexed, it returns an array:
>>> type(model_amplitudes[0])
numpy.ndarray
Get the P_ij for the first grid point (returns an array of size (numtimetraces,)):
>>> model_amplitudes[0]
Get the P_ij for the first ten grid points (returns an array of size
(10, numtimetraces,)):
>>> model_amplitudes[:10]
Get all P_ij (may run out of memory):
>>> model_amplitudes[...]
To get the first Get all P_ij (may run out of memory):
>>> model_amplitudes[...]
Indexing the second dimension will fail. For example to model amplitude of
the fourth point and the eigth timetrace, use:
>>> model_amplitudes[3][7] # valid usage
>>> model_amplitudes[3, 7] # invalid usage, raise an IndexError
"""
@abc.abstractmethod
def __getitem__(self, grid_slice):
...
@property
def shape(self):
return (self.numpoints, self.numtimetraces)
def sensitivity_uniform_tfm(self, timetrace_weights, **kwargs):
# wrapper in general case, inherit and write a faster implementation if possible
return sensitivity_uniform_tfm(self, timetrace_weights, **kwargs)
def sensitivity_model_assisted_tfm(self, timetrace_weights, **kwargs):
# wrapper in general case, inherit and write a faster implementation if possible
return sensitivity_model_assisted_tfm(self, timetrace_weights, **kwargs)
class _ModelAmplitudesWithScatFunction(ModelAmplitudes):
def __init__(
self,
tx,
rx,
scattering_fn,
tx_ray_weights,
rx_ray_weights,
tx_scattering_angles,
rx_scattering_angles,
scat_angle=0.0,
):
self.tx = tx
self.rx = rx
self.scattering_fn = scattering_fn
self.tx_ray_weights = tx_ray_weights
self.rx_ray_weights = rx_ray_weights
self.tx_scattering_angles = tx_scattering_angles
self.rx_scattering_angles = rx_scattering_angles
self.numpoints, self.numelements = tx_ray_weights.shape
self.numtimetraces = self.tx.shape[0]
self.scat_angle = scat_angle
self.dtype = np.complex_
def __getitem__(self, grid_slice):
# Nota bene: arrays' shape is (numpoints, numtimetrace), i.e. the transpose
# of RayWeights. They are contiguous.
if np.empty(self.numpoints)[grid_slice].ndim > 1:
raise IndexError("Only the first dimension of the object is indexable.")
scat_angle = self.scat_angle
scattering_amplitudes = self.scattering_fn(
np.take(self.tx_scattering_angles[grid_slice], self.tx, axis=-1)
- scat_angle,
np.take(self.rx_scattering_angles[grid_slice], self.rx, axis=-1)
- scat_angle,
)
model_amplitudes = (
scattering_amplitudes
* np.take(self.tx_ray_weights[grid_slice], self.tx, axis=-1)
* np.take(self.rx_ray_weights[grid_slice], self.rx, axis=-1)
)
return model_amplitudes
@numba.guvectorize(
"void(int32[:], int32[:], complex128[:,:], complex128[:], complex128[:], float64[:], float64[:], float64[:], complex128[:])",
"(n),(n),(s,s),(e),(e),(e),(e),()->(n)",
nopython=True,
target="parallel",
)
def _model_amplitudes_with_scat_matrix(
tx,
rx,
scattering_matrix,
tx_ray_weights,
rx_ray_weights,
tx_scattering_angles,
rx_scattering_angles,
scat_angle,
res,
):
# This is a kernel on a grid point.
numtimetraces = tx.shape[0]
# assert res.shape[0] == tx_ray_weights.shape[0]
# assert tx_ray_weights.shape == rx_ray_weights.shape == tx_scattering_angles.shape == rx_scattering_angles.shape
for scan in range(numtimetraces):
inc_theta = tx_scattering_angles[tx[scan]] - scat_angle[0]
out_theta = rx_scattering_angles[rx[scan]] - scat_angle[0]
scattering_amp = _scat._interpolate_scattering_matrix_kernel(
scattering_matrix, inc_theta, out_theta
)
res[scan] = scattering_amp * tx_ray_weights[tx[scan]] * rx_ray_weights[rx[scan]]
class _ModelAmplitudesWithScatMatrix(ModelAmplitudes):
def __init__(
self,
tx,
rx,
scattering_mat,
tx_ray_weights,
rx_ray_weights,
tx_scattering_angles,
rx_scattering_angles,
scat_angle=0.0,
):
self.tx = tx
self.rx = rx
self.scattering_mat = scattering_mat
self.tx_ray_weights = tx_ray_weights
self.rx_ray_weights = rx_ray_weights
self.tx_scattering_angles = tx_scattering_angles
self.rx_scattering_angles = rx_scattering_angles
self.numpoints, self.numelements = tx_ray_weights.shape
self.numtimetraces = self.tx.shape[0]
| |
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (0, 0, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="SSARJ":
SSARJcontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (0, 0, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
def incrementActive(self, *args):
global Beta4Bcontrol, Beta3Bcontrol, Beta2Bcontrol, Beta1Bcontrol, Beta4Acontrol, Beta3Acontrol, Beta2Acontrol, Beta1Acontrol, PSARJcontrol, SSARJcontrol, PTRRJcontrol, STRRJcontrol
if Beta4Bcontrol:
self.incrementBeta4B(float(args[0]))
if Beta3Bcontrol:
self.incrementBeta3B(float(args[0]))
if Beta2Bcontrol:
self.incrementBeta2B(float(args[0]))
if Beta1Bcontrol:
self.incrementBeta1B(float(args[0]))
if Beta4Acontrol:
self.incrementBeta4A(float(args[0]))
if Beta3Acontrol:
self.incrementBeta3A(float(args[0]))
if Beta2Acontrol:
self.incrementBeta2A(float(args[0]))
if Beta1Acontrol:
self.incrementBeta1A(float(args[0]))
if PTRRJcontrol:
self.incrementPTRRJ(float(args[0]))
if STRRJcontrol:
self.incrementSTRRJ(float(args[0]))
if PSARJcontrol:
self.incrementPSARJ(float(args[0]))
if SSARJcontrol:
self.incrementSSARJ(float(args[0]))
self.callback()
def incrementPSARJ(self, *args):
global psarjmc
psarjmc += args[0]
serialWrite("PSARJ=" + str(psarjmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'psarj'",(psarjmc,))
self.ids.statusbar.text = "PSARJ Value Sent: " + str(psarjmc)
def incrementSSARJ(self, *args):
global ssarjmc
ssarjmc += args[0]
serialWrite("SSARJ=" + str(ssarjmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'ssarj'",(ssarjmc,))
self.ids.statusbar.text = "SSARJ Value Sent: " + str(ssarjmc)
def incrementPTRRJ(self, *args):
global ptrrjmc
ptrrjmc += args[0]
serialWrite("PTRRJ=" + str(ptrrjmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'ptrrj'",(ptrrjmc,))
self.ids.statusbar.text = "PTRRJ Value Sent: " + str(ptrrjmc)
def incrementSTRRJ(self, *args):
global strrjmc
strrjmc += args[0]
serialWrite("STRRJ=" + str(strrjmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'strrj'",(strrjmc,))
self.ids.statusbar.text = "STRRJ Value Sent: " + str(strrjmc)
def incrementBeta1B(self, *args):
global beta1bmc
beta1bmc += args[0]
serialWrite("B1B=" + str(beta1bmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta1b'",(beta1bmc,))
self.ids.statusbar.text = "Beta1B Value Sent: " + str(beta1bmc)
def incrementBeta1A(self, *args):
global beta1amc
beta1amc += args[0]
serialWrite("B1A=" + str(beta1amc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta1a'",(beta1amc,))
self.ids.statusbar.text = "Beta1A Value Sent: " + str(beta1amc)
def incrementBeta2B(self, *args):
global beta2bmc
beta2bmc += args[0]
serialWrite("B2B=" + str(beta2bmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta2b'",(beta2bmc,))
self.ids.statusbar.text = "Beta2B Value Sent: " + str(beta2bmc)
def incrementBeta2A(self, *args):
global beta2amc
beta2amc += args[0]
serialWrite("B2A=" + str(beta2amc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta2a'",(beta2amc,))
self.ids.statusbar.text = "Beta2A Value Sent: " + str(beta2amc)
def incrementBeta3B(self, *args):
global beta3bmc
beta3bmc += args[0]
serialWrite("B3B=" + str(beta3bmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta3b'",(beta3bmc,))
self.ids.statusbar.text = "Beta3B Value Sent: " + str(beta3bmc)
def incrementBeta3A(self, *args):
global beta3amc
beta3amc += args[0]
serialWrite("B3A=" + str(beta3amc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta3a'",(beta3amc,))
self.ids.statusbar.text = "Beta3A Value Sent: " + str(beta3amc)
def incrementBeta4B(self, *args):
global beta4bmc
beta4bmc += args[0]
serialWrite("B4B=" + str(beta4bmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta4b'",(beta4bmc,))
self.ids.statusbar.text = "Beta4B Value Sent: " + str(beta4bmc)
def incrementBeta4A(self, *args):
global beta4amc
beta4amc += args[0]
serialWrite("B4A=" + str(beta4amc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta4a'",(beta4amc,))
self.ids.statusbar.text = "Beta4A Value Sent: " + str(beta4amc)
def changeBoolean(self, *args):
global manualcontrol
manualcontrol = args[0]
def sendActive(self, *args):
if Beta4Bcontrol:
try:
self.sendBeta4B(float(args[0]))
except Exception as e:
logWrite(e)
if Beta3Bcontrol:
try:
self.sendBeta3B(float(args[0]))
except Exception as e:
logWrite(e)
if Beta2Bcontrol:
try:
self.sendBeta2B(float(args[0]))
except Exception as e:
logWrite(e)
if Beta1Bcontrol:
try:
self.sendBeta1B(float(args[0]))
except Exception as e:
logWrite(e)
if Beta4Acontrol:
try:
self.sendBeta4A(float(args[0]))
except Exception as e:
logWrite(e)
if Beta3Acontrol:
try:
self.sendBeta3A(float(args[0]))
except Exception as e:
logWrite(e)
if Beta2Acontrol:
try:
self.sendBeta2A(float(args[0]))
except Exception as e:
logWrite(e)
if Beta1Acontrol:
try:
self.sendBeta1A(float(args[0]))
except Exception as e:
logWrite(e)
if PTRRJcontrol:
try:
self.sendPTRRJ(float(args[0]))
except Exception as e:
logWrite(e)
if STRRJcontrol:
try:
self.sendSTRRJ(float(args[0]))
except Exception as e:
logWrite(e)
if PSARJcontrol:
try:
self.sendPSARJ(float(args[0]))
except Exception as e:
logWrite(e)
if SSARJcontrol:
try:
self.sendSSARJ(float(args[0]))
except Exception as e:
logWrite(e)
def sendPSARJ(self, *args):
global psarjmc
psarjmc = args[0]
serialWrite("PSARJ=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'psarj'",(args[0],))
self.ids.statusbar.text = "PSARJ Value Sent: " + str(args[0])
def sendSSARJ(self, *args):
global ssarjmc
ssarjmc = args[0]
serialWrite("SSARJ=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'ssarj'",(args[0],))
self.ids.statusbar.text = "SSARJ Value Sent: " + str(args[0])
def sendPTRRJ(self, *args):
global ptrrjmc
ptrrjmc = args[0]
serialWrite("PTRRJ=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'ptrrj'",(args[0],))
self.ids.statusbar.text = "PTRRJ Value Sent: " + str(args[0])
def sendSTRRJ(self, *args):
global strrjmc
strrjmc = args[0]
serialWrite("STRRJ=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'strrj'",(args[0],))
self.ids.statusbar.text = "STRRJ Value Sent: " + str(args[0])
def sendBeta1B(self, *args):
global beta1bmc
beta1bmc = args[0]
serialWrite("B1B=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta1b'",(args[0],))
self.ids.statusbar.text = "Beta1B Value Sent: " + str(args[0])
def sendBeta1A(self, *args):
global beta1amc
beta1amc = args[0]
serialWrite("B1A=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta1a'",(args[0],))
self.ids.statusbar.text = "Beta1A Value Sent: " + str(args[0])
def sendBeta2B(self, *args):
global beta2bmc
beta2bmc = args[0]
serialWrite("B2B=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta2b'",(args[0],))
self.ids.statusbar.text = "Beta2B Value Sent: " + str(args[0])
def sendBeta2A(self, *args):
global beta2amc
beta2amc = args[0]
serialWrite("B2A=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta2a'",(args[0],))
self.ids.statusbar.text = "Beta2A Value Sent: " + str(args[0])
def sendBeta3B(self, *args):
global beta3bmc
beta3bmc = args[0]
serialWrite("B3B=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta3b'",(args[0],))
self.ids.statusbar.text = "Beta3B Value Sent: " + str(args[0])
def sendBeta3A(self, *args):
global beta3amc
beta3amc = args[0]
serialWrite("B3A=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta3a'",(args[0],))
self.ids.statusbar.text = "Beta3A Value Sent: " + str(args[0])
def sendBeta4B(self, *args):
global beta4bmc
beta4bmc = args[0]
serialWrite("B4B=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta4b'",(args[0],))
self.ids.statusbar.text = "Beta4B Value Sent: " + str(args[0])
def sendBeta4A(self, *args):
global beta4amc
beta4amc = args[0]
serialWrite("B4A=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta4a'",(args[0],))
self.ids.statusbar.text = "Beta4A Value Sent: " + str(args[0])
def send0(self, *args):
global psarjmc,ssarjmc,ptrrjmc,strrjmc,beta1amc,beta1bmc,beta2amc,beta2bmc,beta3amc,beta3bmc,beta4amc,beta4bmc
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta1a'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta1b'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta2a'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta2b'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta3a'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta3b'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta4a'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta4b'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'psarj'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'ssarj'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'ptrrj'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'strrj'")
strrjmc = 0
ptrrjmc = 0
ssarjmc = 0
psarjmc = 0
beta1bmc = 0
beta1amc = 0
beta2bmc = 0
beta2amc = 0
beta3bmc = 0
beta3amc = 0
| |
None)[0]['def_plugs']
@client_api('getter', True)
def get_def_c_plugs(self, ns_key):
'''
Gets the client default plugin in a specific namespace
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
'''
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},]
EMUValidator.verify(ver_args)
data = ns_key.conv_to_dict(True)
return self._send_chunks(cmd = 'ctx_client_get_def_plugins', data = data)[0]['def_plugs']
############################ EMU #############################
############################ API #############################
############################ #############################
# Emu Profile
@client_api('command', True)
def load_profile(self, profile, max_rate = None, tunables = None, dry = False, verbose = False):
"""
.. code-block:: python
### Creating a profile and loading it ###
# creating a namespace key with no vlans
ns_key = EMUNamespaceKey(vport = 0)
# plugs for namespace
plugs = {'igmp': {}}
# default plugins for each client in the namespace
def_c_plugs = {'arp': {}}
# creating a simple client
mac = Mac('00:00:00:70:00:01) # creating Mac obj
kwargs = {
'ipv4': [1, 1, 1, 3], 'ipv4_dg': [1, 1, 1, 1],
'plugs': {'icmp': {}}
}
client = EMUClientObj(mac.V(), **kwargs) # converting mac to list of bytes
# creating the namespace with 1 client
ns = EMUNamespaceObj(ns_key = ns_key, clients = [client], plugs = plugs, def_c_plugs = def_c_plugs)
# every ns in the profile will have by default ipv6 plugin
def_ns_plugs = {'ipv6': {}}
# creating the profile and send it
profile = EMUProfile(ns = ns, def_ns_plugs = def_ns_plugs)
emu_client.load_profile(profile = profile) # using EMUClient
### loading from a file ###
# loading profile from a file, creating 10K clients, sending 1K clients per second.
emu_client.load_profile(profile = 'emu/simple_emu.py', max_rate = 1000, tunables = ['--clients', '10000'])
Load emu profile, might be EMUProfile object or a path to a valid profile. Supported type for now is .py
**Pay attention** sending many clients with no `max_rate` may cause the router to crash, if you are going to send more than 10K clients perhaps you should limit `max_rate` using the plicer to 1000/sec ~.
:parameters:
profile : string or EMUProfile
Filename (with path) of the profile or a valid EMUProfile object.
max_rate : int
Max clients rate to send (clients/sec), "None" means with no policer interference. see :class:`trex.emu.trex_emu_profile.EMUProfile`
tunables : list of strings
Tunables line as list of strings. i.e: ['--ns', '1', '--clients', '10'].
dry: bool
True will not send the profile, only print as JSON.
verbose: bool
True will print timings for converting and sending profile.
:raises:
+ :exc:`TRexError` In any case of invalid profile.
"""
if tunables is None:
tunables = []
ver_args =[
{'name': 'profile', 'arg': profile, 't': [EMUProfile, str]},
{'name': 'max_rate', 'arg': max_rate, 't': int, 'must': False},
{'name': 'tunables', 'arg': tunables, 't': 'tunables'},
{'name': 'dry', 'arg': dry, 't': bool},
{'name': 'verbose', 'arg': verbose, 't': bool},
]
EMUValidator.verify(ver_args)
help_flags = ('-h', '--help')
if any(h in tunables for h in help_flags):
# don't print external messages on help
profile = EMUProfile.load(profile, tunables)
return
s = time.time()
self.ctx.logger.pre_cmd("Converting file to profile")
if type(profile) is str:
profile = EMUProfile.load(profile, tunables)
if profile is None:
self.ctx.logger.post_cmd(False)
self._err('Failed to convert EMU profile')
self.ctx.logger.post_cmd(True)
if verbose:
print("Converting profile took: %s" % (format_time(time.time() - s)))
if not dry:
self._start_profile(profile, max_rate, verbose = verbose)
else:
dump_json_yaml(profile.to_json(), to_json = True)
def _start_profile(self, profile, max_rate, verbose = False):
"""
Start EMU profile with all the required commands to the server.
:parameters:
profile: EMUProfile
Emu profile, define all namespaces and clients. see :class:`trex.emu.trex_emu_profile.EMUProfile`
max_rate : int
max clients rate to send (clients/sec)
verbose: bool
True will print more messages and a progress bar.
:raises:
+ :exc:`TRexError`
"""
if not isinstance(profile, EMUProfile):
self._err('Profile must be from `EMUProfile` type, got: %s' % type(profile))
s = time.time()
try:
# make sure there are no clients and ns in emu server
self.remove_profile(max_rate)
self.ctx.logger.pre_cmd('Sending emu profile')
# set the default ns plugins
self.set_def_ns_plugs(profile.def_ns_plugs)
# adding all the namespaces in profile
self.add_ns(profile.ns_list)
# adding all the clients for each namespace
for ns in profile.ns_list:
# set client default plugins
ns_key = ns.key
self.set_def_c_plugs(ns_key, ns.def_c_plugs)
clients_list = list(ns.c_map.values())
self.add_clients(ns_key, clients_list, max_rate = max_rate, verbose = verbose)
except Exception as e:
self.ctx.logger.post_cmd(False)
raise TRexError('Could not load profile, error: %s' % str(e))
self.ctx.logger.post_cmd(True)
if verbose:
print("Sending profile took: %s" % (format_time(time.time() - s)))
@client_api('command', True)
def remove_profile(self, max_rate = None):
"""
Remove current profile from emu server, all namespaces and clients will be removed
:parameters:
max_rate: int
Max rate of client / sec for removing clients, defaults to None.
"""
self.ctx.logger.pre_cmd('Removing old emu profile')
self.remove_all_clients_and_ns(max_rate)
self.ctx.logger.post_cmd(True)
return RC_OK()
@client_api('command', True)
def add_ns(self, ns_list):
"""
Add namespaces to EMU server.
:parameters:
ns_list: list of EMUNamespaceObj
see :class:`trex.emu.trex_emu_profile.EMUNamespaceObj`
:raises:
+ :exc:`TRexError`
"""
ver_args = [{'name': 'ns_list', 'arg': ns_list, 't': EMUNamespaceObj, 'allow_list': True},]
EMUValidator.verify(ver_args)
ns_list = listify(ns_list)
namespaces_fields = [ns.get_fields() for ns in ns_list]
data = {'tunnels': namespaces_fields}
self._send_chunks('ctx_add', data = data)
return RC_OK()
@client_api('command', True)
def remove_ns(self, ns_keys):
"""
Remove namespaces from EMU server.
:parameters:
ns_keys: list of EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
:raises:
+ :exc:`TRexError`
"""
ver_args = [{'name': 'ns_keys', 'arg': ns_keys, 't': EMUNamespaceKey, 'allow_list': True},]
EMUValidator.verify(ver_args)
ns_keys = listify(ns_keys)
# tear down all plugins
for ns_key in ns_keys:
for pl_obj in self.registered_plugs.values():
pl_obj.tear_down_ns(ns_key)
namespaces_keys = [k.conv_to_dict(False) for k in ns_keys]
self._send_chunks('ctx_remove', data = {'tunnels': namespaces_keys})
return RC_OK()
@client_api('command', True)
def add_clients(self, ns_key, clients, max_rate = None, verbose = False):
"""
Add client to EMU server.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
clients: list of EMUClientObj
see :class:`trex.emu.trex_emu_profile.EMUClientObj`
max_rate: int
Max clients rate to send (clients/sec), "None" means with no policer interference.
verbose: bool
True will print messages to screen as well as a progress bar.
:raises:
+ :exc:`TRexError`
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},
{'name': 'clients', 'arg': clients, 't': EMUClientObj, 'allow_list': True},
{'name': 'max_rate', 'arg': max_rate, 't': int, 'must': False},]
EMUValidator.verify(ver_args)
clients = listify(clients)
clients_fields = [c.get_fields(to_bytes = True) for c in clients]
data = {'clients': clients_fields}
data.update(ns_key.conv_to_dict(add_tunnel_key = True))
self._send_chunks('ctx_client_add', data = data, max_data_rate = max_rate, track_progress = verbose)
return RC_OK()
@client_api('command', True)
def remove_clients(self, c_keys, max_rate):
"""
Remove clients from a specific namespace.
:parameters:
c_keys: list of EMUClientKey
see :class:`trex.emu.trex_emu_profile.EMUClientKey`
max_rate: int
Max clients rate to send (clients/sec), "None" means with no policer interference.
:raises:
+ :exc:`TRexError`
"""
ver_args = [{'name': 'c_keys', 'arg': c_keys, 't': EMUClientKey, 'allow_list': True},
{'name': 'max_rate', 'arg': max_rate, 't': int, 'must': False}]
EMUValidator.verify(ver_args)
c_keys = listify(c_keys)
if len(c_keys) == 0:
return RC_OK()
data = self._conv_macs_and_validate_ns(c_keys)
self._send_chunks('ctx_client_remove', data = data, max_data_rate = max_rate, track_progress = True)
return RC_OK()
@client_api('command', True)
def remove_all_clients_and_ns(self, max_rate = None):
"""
Remove all current namespaces and their clients from emu server
:parameters:
max_rate: int
Max clients rate to send (clients/sec), "None" means with no policer interference.
"""
ver_args = [{'name': 'max_rate', 'arg': max_rate, 't': int, 'must': False}]
EMUValidator.verify(ver_args)
ns_keys_gen = self._get_n_ns()
for ns_chunk in ns_keys_gen:
for ns_key in ns_chunk:
c_keys = self.get_all_clients_for_ns(ns_key)
self.remove_clients(c_keys, max_rate)
self.remove_ns(ns_chunk)
return RC_OK()
# Default Plugins
@client_api('command', True)
def set_def_ns_plugs(self, def_plugs):
"""
Set the namespace default plugins. Every new namespace in that profile will have that plugin.
:parameters:
def_plugs: dictionary
Map plugin_name -> plugin_data, each plugin here will be added to every new namespace.
If new namespace will provide a plugin, it will override the default one.
"""
ver_args = [{'name': 'def_plugs', 'arg': def_plugs, 't': dict, 'must': False},]
EMUValidator.verify(ver_args)
self._send_chunks('ctx_set_def_plugins', data = {'def_plugs': def_plugs})
return RC_OK()
@client_api('command', True)
def set_def_c_plugs(self, ns_key, def_plugs):
"""
Set the client default plugins. Every new client in that namespace will have that plugin.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
def_plugs: dictionary
Map plugin_name -> plugin_data, each plugin here will be added to every new client.
If new client will provide a plugin, it will override the default one.
"""
ver_args =[{'name': 'def_plugs', 'arg': def_plugs, 't': dict, 'must': False},
{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey, 'allow_list': True},
]
EMUValidator.verify(ver_args)
data = {'def_plugs': def_plugs}
data.update(ns_key.conv_to_dict(add_tunnel_key = True))
self._send_chunks('ctx_client_set_def_plugins', data = data)
return RC_OK()
@client_api('command', False)
def set_verbose (self, level):
"""
Sets verbose level
:parameters:
level : str
"none" - be silent no matter | |
"question": "Exercitation nisi reprehenderit ea anim dolor nostrud occaecat ad cupidatat esse mollit id pariatur id."
}
],
"created": 1487858347,
"modified": 1493557147
},
{
"id": "d2793f99-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Proident sint laboris pariatur dolor velit ullamco officia officia aute esse proident fugiat dolor veniam velit fugiat magna nulla id.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d2793f9a-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Veniam laborum deserunt elit proident consectetur elit est magna culpa sit dolor ad occaecat non deserunt ullamco excepteur non qui id elit fugiat commodo officia occaecat non."
}
],
"paraphraseQuestions": [
{
"id": "d2793f9b-c3c3-11e7-8d29-d15d28ee5381",
"question": "Laborum cupidatat deserunt tempor id voluptate aute ullamco dolore commodo dolore quis eu."
}
],
"created": 1487426347,
"modified": 1497704347
},
{
"id": "d2793f9c-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Dolore reprehenderit excepteur deserunt eu sint id laboris consequat veniam consequat nisi labore aliqua exercitation aliqua nisi non aute exercitation quis ipsum duis laboris commodo consectetur incididunt magna proident irure consequat exercitation id incididunt eu enim pariatur consectetur veniam dolor eiusmod deserunt esse magna.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d2793f9d-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Tempor incididunt ex ullamco sunt adipisicing nisi tempor minim est ad Lorem amet sit proident."
}
],
"paraphraseQuestions": [
{
"id": "d2793f9e-c3c3-11e7-8d29-d15d28ee5381",
"question": "Commodo voluptate id ad qui consectetur do fugiat amet cillum irure ut officia eiusmod in ea do commodo eiusmod in ullamco deserunt qui mollit sunt proident exercitation in reprehenderit quis est ad ipsum deserunt ut esse irure."
}
],
"created": 1487167147,
"modified": 1495025947
},
{
"id": "d27bd7be-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Ut incididunt mollit eu occaecat deserunt ea elit reprehenderit laboris ut eiusmod minim dolore nisi velit commodo nisi nulla ad Lorem occaecat consequat adipisicing tempor consequat aliquip consectetur enim dolor voluptate non pariatur.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d27bd7bf-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Nisi tempor in nisi fugiat culpa officia nostrud cupidatat Lorem reprehenderit commodo Lorem sit mollit cillum laboris ullamco elit nulla magna cillum ullamco veniam ullamco aliqua deserunt voluptate proident id cupidatat."
}
],
"paraphraseQuestions": [
{
"id": "d27bd7c0-c3c3-11e7-8d29-d15d28ee5381",
"question": "Ea occaecat aliquip duis excepteur duis qui non nisi labore qui aute ad magna ad occaecat velit esse non cillum laborum nisi ut aute pariatur id aute fugiat culpa sunt ad."
}
],
"created": 1485439147,
"modified": 1490450347
},
{
"id": "d2793f9f-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Irure esse fugiat.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d27966a0-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Sit mollit ad eiusmod cillum reprehenderit et amet cillum laboris sit ad non laborum minim Lorem aute cillum officia et sit exercitation eiusmod quis reprehenderit laborum quis et non est commodo eu amet anim proident laborum occaecat exercitation id anim irure irure aute incididunt."
}
],
"paraphraseQuestions": [
{
"id": "d27966a1-c3c3-11e7-8d29-d15d28ee5381",
"question": "Et labore quis irure cupidatat consectetur eu Lorem velit consectetur cillum veniam ea ad dolore magna in laborum et eu occaecat ipsum velit ipsum ea tempor tempor nisi quis aliqua proident deserunt ad ad et aliqua excepteur exercitation quis ut qui."
}
],
"created": 1485352747,
"modified": 1507985947
},
{
"id": "d27a02e5-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Consectetur deserunt est culpa Lorem non nisi elit anim veniam incididunt enim.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d27a02e6-c3c3-11e7-8d29-d15d28ee5381",
"answer": "In occaecat nisi laborum occaecat enim incididunt non quis excepteur ullamco aliquip irure adipisicing consectetur incididunt labore laborum do laborum aliqua quis esse voluptate laboris in."
}
],
"paraphraseQuestions": [
{
"id": "d27a02e7-c3c3-11e7-8d29-d15d28ee5381",
"question": "Adipisicing enim do minim deserunt ullamco enim quis in ad incididunt anim elit magna incididunt velit excepteur deserunt incididunt pariatur laboris Lorem proident nostrud deserunt reprehenderit aliquip magna."
}
],
"created": 1485352747,
"modified": 1494507547
},
{
"id": "d2798db0-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Culpa occaecat esse ullamco adipisicing anim voluptate ad ipsum ea dolor ullamco excepteur magna eiusmod occaecat irure id aute exercitation ullamco nisi in eiusmod elit deserunt non voluptate irure ex.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d2798db1-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Voluptate ad ea sint ipsum id aliquip tempor mollit ut cillum nostrud dolor ea ullamco ipsum reprehenderit incididunt cillum pariatur officia fugiat ut anim aliqua est."
}
],
"paraphraseQuestions": [
{
"id": "d2798db2-c3c3-11e7-8d29-d15d28ee5381",
"question": "Magna ullamco exercitation anim veniam cupidatat ex occaecat laborum cillum."
}
],
"created": 1485093547,
"modified": 1486130347
},
{
"id": "d27b3b60-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Duis ut reprehenderit commodo dolore dolor sunt deserunt exercitation consectetur incididunt irure id ipsum voluptate do in in voluptate ad esse laboris deserunt.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d27b3b61-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Irure irure cupidatat magna duis incididunt id qui Lorem officia irure esse dolore cupidatat eiusmod proident anim ea qui ad non mollit nulla est laborum id amet ullamco laborum velit veniam culpa culpa sit reprehenderit culpa id ullamco est."
}
],
"paraphraseQuestions": [
{
"id": "d27b3b62-c3c3-11e7-8d29-d15d28ee5381",
"question": "Officia cillum qui culpa cillum sint exercitation sit ullamco sint quis qui amet incididunt Lorem ex dolore ad cupidatat incididunt ullamco amet irure laborum nostrud velit eiusmod."
}
],
"created": 1485007147,
"modified": 1490965147
},
{
"id": "d27bd7c1-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Quis cillum culpa culpa in adipisicing aliquip eu culpa deserunt excepteur duis deserunt ullamco esse est non aliqua sint velit voluptate.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d27bd7c2-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Anim amet velit cupidatat ipsum culpa ex."
}
],
"paraphraseQuestions": [
{
"id": "d27bd7c3-c3c3-11e7-8d29-d15d28ee5381",
"question": "Voluptate non labore non magna consequat occaecat in ad deserunt excepteur laboris velit laborum ipsum aute dolore occaecat cillum laboris eiusmod in cillum dolore qui aliquip irure nulla consequat consectetur et laborum consectetur magna."
}
],
"created": 1484920747,
"modified": 1487426347
},
{
"id": "d279dbd0-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Officia dolor enim exercitation culpa excepteur magna eu est ullamco culpa ut consectetur excepteur do eu elit laboris qui quis consequat ad elit proident ea aliqua id ea reprehenderit irure sit enim officia.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d279dbd1-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Cillum velit pariatur cillum est sit aliquip aliquip nisi eu incididunt minim fugiat in minim nisi laborum anim consequat ullamco Lorem."
}
],
"paraphraseQuestions": [
{
"id": "d279dbd2-c3c3-11e7-8d29-d15d28ee5381",
"question": "Ullamco et proident laboris nulla aliquip amet est elit excepteur aute officia irure ex excepteur ullamco quis ea sint est esse consectetur do sit aliquip qui."
}
],
"created": 1484747947,
"modified": 1485525547
},
{
"id": "d27bfebd-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Laboris ea est consectetur non cupidatat minim minim sint amet veniam voluptate velit occaecat enim aute esse magna adipisicing veniam irure laboris sunt esse reprehenderit fugiat tempor laborum ea aute sit.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d27bfebe-c3c3-11e7-8d29-d15d28ee5381",
"answer": "Aliquip proident."
}
],
"paraphraseQuestions": [
{
"id": "d27bfebf-c3c3-11e7-8d29-d15d28ee5381",
"question": "Labore culpa magna cupidatat voluptate ut mollit proident aute qui est consectetur aliqua incididunt mollit ullamco aliqua ut qui aute esse voluptate do magna id laborum cillum magna culpa officia enim dolor Lorem id ipsum quis veniam elit dolore consectetur id eiusmod."
}
],
"created": 1484575147,
"modified": 1481032747
},
{
"id": "d278ca60-c3c3-11e7-8d29-d15d28ee5381",
"canonicalQuestion": "Exercitation exercitation proident mollit pariatur commodo eu ullamco irure proident magna minim incididunt et qui aliqua cillum id excepteur nisi Lorem et.",
"documentId": "d27b6283-c3c3-11e7-8d29-d15d28ee5381",
"page": 5,
"position": {
"boundingBoxes": [{
"x1": 432,
"y1": 145,
"x2": 587,
"y2": 354
}]
},
"answers": [
{
"id": "d278ca61-c3c3-11e7-8d29-d15d28ee5381",
"answer": | |
from wikidata.client import Client
from re import compile
from nltk.corpus import wordnet as wn
import pycountry
class DavarWord:
"""Informal abstract class for Words, like Rel and Node.
"""
def __init__(self, id: str):
"""Initializes a Word from an identifier. This should only be used on
super().__init__.
Parameters
----------
id : str
Word identifier
"""
self._validate_id(id)
self.id = id
def _validate_id(self, id: str):
"""Informal abstract method for validating id on init. This method should be
implemented by any subclass that will be initialized. When implemented, it
should do nothing if the id is valid and throw a ValueError if the id is invalid
.
Parameters
----------
id : str
Word identifier
Raises
------
NotImplementedError
This will always be raised if this method is called, as it is an abstract
method.
"""
raise NotImplementedError
def __repr__(self) -> str:
"""Returns representation of self"""
return f'{type(self).__name__}("{self.id}")'
def __str__(self) -> str:
"""Returns pretty printed representation of self"""
return str(self.id)
def __eq__(self, other) -> bool:
"""Returns True if equal, false if not."""
return self.id == other.id
def describe(self, lang: str, lvl: int = 0) -> str:
"""Informal abstract method. Should be implemented by any subclasses. When
implemented, returns a human-readable description of the Word in a given
language depending on level of hierachy.
Parameters
----------
lang : str
BCP 47 language tag
lvl : int, optional
Hierachy level of description in output text, by default 0
Returns
-------
str
Description of self
Raises
------
NotImplementedError
Will always be raised if this method as called, as it is an abstract method.
"""
raise NotImplementedError
class Node(DavarWord):
"""Informal abstract class for Nodes in davar, which represent nouns, ideas, or
entities. Is empty because all code is shared with Rel, and exists so that
subclasses of Node and of Rel can be distinguished easily.
"""
def __init__(self, id):
super().__init__(id)
class Rel(DavarWord):
"""Informal abstract class for Rel in davar, which represent properties, verbs, or any
other type of relationship between Nodes. Is empty because all code is shared with
Node, and exists so that subclasses of Node and of Rel can be distinguished easily.
"""
def __init__(self, id):
super().__init__(id)
class WikidataItem(Node):
"""Class for Wikidata Items, a type of Node, which is itself a type of DavarWord.
"""
_compiled_id_regex = compile(r"Q\d+")
def __init__(self, id: str):
"""Constructs and initializes a WikidataItem from a valid Wikidata Property
identifier.
Parameters
----------
id : str
A valid Wikidata Item identifier in the form Q# where # is a natural number.
"""
super().__init__(id)
self.data = Client().get(id)
@classmethod
def _validate_id(cls, id: str):
"""Called at `__init__` to validate identifiers, which should be in the form
`Q#` where `#` is a natural number. Does nothing if the identifier is valid, but
throws a ValueError if it is invalid.
Parameters
----------
id : str
Hopefully, a valid Wikidata Item identifier.
Raises
------
ValueError
Thrown if `id` is not, in fact, a valid Wikidata Item identifier.
"""
if cls._compiled_id_regex.fullmatch(id) == None:
raise ValueError(f"{id} is not a valid Wikidata Item ID")
def describe(self, lang: str, lvl: int = 0) -> str:
"""Returns the Wikidata Item label in a given language. Ignores hierarchy, as
Wikidata Items are displayed the same regardless of hierarchy.
Parameters
----------
lang : str
BCP 47 language tag
lvl : int, optional
Hierachy level of description in output text, by default 0
Returns
-------
str
Wikidata Item label in given language
"""
return self.data.label[lang]
class WikidataProperty(Rel):
"""Class for Wikidata Properties, a type of Rel, which is itself a type of DavarWord.
"""
_compiled_id_regex = compile(r"P\d+")
def __init__(self, id: str):
"""Constructs and initializes a WikidataProperty from a valid Wikidata
Property identifier.
Parameters
----------
id : str
A valid Wikidata Property identifier in the form `P#` where `#` is a
natural number.
"""
super().__init__(id)
self.data = Client().get(id)
@classmethod
def _validate_id(cls, id):
"""Called at `__init__` to validate identifiers, which should be in the form
`P#` where `#` is a natural number. Does nothing if the identifier is valid, but
throws a ValueError if it is invalid.
Parameters
----------
id : str
Hopefully, a valid Wikidata Property identifier.
Raises
------
ValueError
Thrown if `id` is not, in fact, a valid Wikidata Property identifier.
"""
if cls._compiled_id_regex.fullmatch(id) == None:
raise ValueError(f"{id} is not a valid Wikidata Property ID")
def describe(self, lang: str, lvl: int = 0) -> str:
"""Returns the Wikidata Property label in a given language. Ignores hierarchy,
as Wikidata Properties are displayed the same regardless of hierarchy.
Parameters
----------
lang : str
BCP 47 language tag
lvl : int, optional
Hierachy level of description in output text, by default 0
Returns
-------
str
Wikidata Property label in given language
"""
return self.data.label[lang]
class OMWSynset(Node, Rel):
"""A Synonym Set in the Open Multilingual Wordnet, representing a set of synonyms
that represent the same idea across languages.
"""
_compiled_id_regex = compile(r"\d{8}-[v|r|n|a]")
def __init__(self, id: str):
"""Constructs a OMWSynset from an "identifier," more formally Part Of Speech and
offset, in the form `OFFSET-POS` where `OFFSET` is an eight digit code and POS
is a letter representing part of speech, either 'v,' 'r,' 'n,' or 'a.'
Parameters
----------
id : str
Synset offset and POS.
"""
super().__init__(id)
@classmethod
def _validate_id(cls, id):
if cls._compiled_id_regex.fullmatch(id) == None:
raise ValueError(
f"{id} is not a valid Open Multilingual WordNet Synset offset and POS"
)
@staticmethod
def _bcp_47_to_iso_639_2(lang_tag: str) -> str:
"""Utility function for getting a ISO 639-2 three letter language code from a
BCP 47 language tag.
Parameters
----------
lang_tag : str
BCP 47 language tag/code.
Returns
-------
str
Roughly equivalent ISO 639-2 three letter language code.
"""
if "-" in lang_tag:
lang_tag = lang_tag[: lang_tag.find("-")]
if len(lang_tag) == 3:
# three letter lang tags are already in alpha_3 format
return lang_tag
else:
return pycountry.languages.get(alpha_2=lang_tag).alpha_3
def describe(self, lang: str, lvl: int = 0) -> str:
"""Returns the first listed lemma name for Synset in a given language. Ignores
hierarchy as Synsets do not change display depending on hierarchy.
Parameters
----------
lang : str
BCP 47 language tag
lvl : int, optional
Hierachy level of description in output text, by default 0
Returns
-------
str
First lemma name for Synset in a given language.
"""
return wn.synset_from_pos_and_offset(
self.id[-1], int(self.id[:-2])
).lemma_names(self._bcp_47_to_iso_639_2(lang))[0]
class Statement:
"""Most basic form of Statement (often refered to as a Singleton Statement)
involving only a subject.
"""
def __init__(self, sub):
"""Constructs a Singleton Statement from a subject.
Parameters
----------
sub : Node or Statement
The subject of this statement
"""
self.sub = sub
def __eq__(self, other) -> bool:
return self.__dict__ == other.__dict__ # FIXME: messy, bad
def __repr__(self):
return f"Statement({repr(self.sub)})"
def __str__(self):
return f"({self.sub})"
def describe(self, lang: str, lvl: int = 0) -> str:
"""Describes self in human readable format in a given language by calling
`.describe()` on children and structuring results in a human readable format.
Will return slightly different formatting to minimize confusion if lvl is
greater than 0.
Parameters
----------
lang : str
BCP 47 language tag
lvl : int, optional
The level of hierarchy in the text description, by default 0
Returns
-------
str
Description of self in given language.
"""
sub_label = self.sub.describe(lang, lvl + 1)
if lvl == 0: # give fancy formatting if it is top level
return f"{sub_label}."
else: # give utilitarian formatting if it is not
return f"[{sub_label}]"
class Edge(Statement):
"""Statement defining an unlabeled relationship between a subject and an object.
"""
def __init__(self, sub, ob):
"""Constructs an edge from the subject to the object.
Parameters
----------
sub : Node or Statement
Subject / origin of Edge
ob : Node or Statement
Object / endpoint of Edge
"""
self.ob = ob
super().__init__(sub)
def __repr__(self):
return f"Edge({repr(self.sub)}, {repr(self.ob)})"
def __str__(self):
return f"({self.sub} {self.ob})"
def describe(self, lang: str, lvl: int = 0) -> str:
"""Describes self in human readable format in a given language by calling
`.describe()` on children and structuring results in a human readable format.
Will | |
<gh_stars>1-10
# encoding=UTF-8
# Copyright © 2007-2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Nodes of Javalette syntax trees.'''
_type = type
import type
import bp
import x86
__all__ = ['argv', 'base', 'block', 'block_statement', 'declaration', 'error', 'evaluation', 'function', 'if_then_else', 'program', 'return_statement', 'statement', 'variable', 'while_loop']
class base(object):
'''An abstract node.'''
@property
def x(self):
'''Column number of appearance.'''
return self.position[1]
@property
def y(self):
'''Line number of appearance.'''
return self.position[0]
_doc = {
'validate': 'Look for type mismatches.\nCheck for proper variable usage.',
'to_py': '[py] Generate code.',
'to_x86_asm': '[x86] Generate code.',
'bind_to_function': 'Bind the statement to the function in which it appears.',
'get_blocks': 'Return a sequence of sub-blocks.',
'get_var_refs': 'Return a sequence of referenced variables.',
'check_var_usage': "Check for proper variable usage.\n'lsv' - set of declared variables\n'rsv' - set of used variables.\nBoth sets are updated."
}
class block(base):
'''An abstract block.'''
def __init__(self, contents):
'''Initialize the block with 'contents' - a list or a single item.'''
if not isinstance(contents, list):
contents = [contents]
self.contents = contents
@staticmethod
def indent(s):
return ' ' + str(s).replace('\n', '\n ')
def validate(self):
ok = True
for line in self.contents:
ok &= line.validate()
return ok
def __str__(self):
if len(self.contents) == 0:
return block.indent('skip')
else:
return '\n'.join(block.indent(item) for item in self.contents)
class statement(base):
'''A statement.'''
def get_blocks(self):
return ()
def get_var_refs(self):
return ()
def check_var_usage(self, lsv, rsv):
raise NotImplementedError()
def validate(self):
raise NotImplementedError()
def bind_to_function(self, function):
pass
def returns(self):
'''Check if the function returns (in this statement).'''
return False
class block_statement(block, statement):
'''A block statement.'''
def returns(self):
'''Check if the function returns (in this block).'''
for line in self.contents:
if line.returns():
return True
return False
def check_var_usage(self, lsv, rsv):
ok = True
for line in self.contents:
ok &= line.check_var_usage(lsv, rsv)
return ok
def to_py(self):
result = []
for line in self.contents:
result += line.to_py()
return result
def to_x86_asm(self, env):
env2 = env.clone()
result = []
for line in self.contents:
result += line.to_x86_asm(env2)
result += x86.AddESP(env2.vsp - env.vsp),
return result
class program(block):
'''A program.'''
def __str__(self):
return '\n\n'.join(str(item) for item in self.contents)
def to_py(self):
from builtins import py_stub_pre, py_stub_post
listing = []
listing += py_stub_pre
for item in self.contents:
listing += item.to_py(self.filename)
listing += py_stub_post
return listing
def to_pyc(self):
'''[py] Generate bytecode for the program.'''
from builtins import this_module_file_name as builtins_module_file_name
listing = self.to_py()
return bp.Code(
code=listing,
freevars=[],
args=[],
varargs=False,
varkwargs=False,
newlocals=False,
name='__stub__',
filename=builtins_module_file_name,
firstlineno=0,
docstring=None)
def compile_pyc(self, output_file):
'''[py] Compile the program into a Python bytecode file.'''
import imp
import marshal
output_file.write(imp.get_magic())
output_file.write('\x00\x00\x00\x00')
pyc = self.to_pyc()
pyo = pyc.to_code()
marshal.dump(pyo, output_file)
def to_x86_asm(self):
from builtins import x86_stub
listing = list(x86_stub)
for item in self.contents:
listing += item.to_x86_asm()
return listing
def compile_x86(self, output_file):
'''[x86] Compile the program into an ELF executable.'''
x86_asm = self.to_x86_asm()
x86.build(x86_asm, output_file)
# Just to change the docstring
def validate(self):
return block.validate(self)
validate.__doc__ = base._doc['validate'] + '\nCheck if every function returns.'
class error(base):
'''An error indicator.'''
def __init__(self):
pass
def __str__(self):
return '!!!'
class variable(base):
'''A variable declaration (possibly, with initialization).'''
def __init__(self, type, name, value, position):
object.__init__(self)
self.name = name
self.type = type
self.value = value
self.position = position
self.uid = '&%x' % id(self)
def get_var_refs(self):
from expression import expression
if isinstance(self.value, expression):
return self.value.get_var_refs()
else:
return ()
def check_var_usage(self, lsv, rsv):
ok = True
if self.value is not None:
ok &= self.value.check_var_usage(lsv, rsv)
lsv.add(self)
return ok
def validate(self):
if self.value is None:
return True
ok = self.value.validate()
if self.value.type is None or self.value.type == self.type:
return ok
else:
TypeMismatch(self.position,
'Incompatible types in initialization: <%s> provided but <%s> expected' %
(self.value.type, self.type)).warn()
return False
def py_write(self, value=None, pop=True):
'''[py] Generate code for storing the value to the variable.'''
if value is None:
value = self.value
if value is None:
return []
else:
result = []
result += value.to_py()
if not pop:
result += (bp.DUP_TOP, None),
result += (bp.STORE_FAST, self.uid),
return result
def py_read(self):
'''[py] Generate code for reading the variables.'''
return [(bp.LOAD_FAST, self.uid)]
def x86_asm_write(self, expression, env):
'''[x86] Generate code for storing value of the expression to the variable.'''
if expression is None:
return []
else:
return self.type.x86_asm_write(self, expression, env)
def x86_asm_read(self, env):
'''[x86] Generate code for loading value of the variable.'''
return self.type.x86_asm_read(self, env)
def __str__(self):
return 'var $%s : %s = %s' % (self.name, self.type, self.value)
class function(variable):
'''A function declaration.'''
def __init__(self, name, return_type, arguments, code, position):
self.type = type.function_type(return_type, [arg.type for arg in arguments])
code = block_statement([argv(arguments), code])
variable.__init__(self, self.type, name, code, position)
def validate(self):
ok = self.value.validate()
if not self.value.returns():
MissingReturn(self.position, "Missing return statement for function '%s'" % self.name).warn()
return False
lsv = set()
rsv = set()
self.value.check_var_usage(lsv, rsv)
return ok
validate.__doc__ = base._doc['validate'] + '\nCheck if the function returns.'
def to_py(self, filename):
body_code = self.body_to_pyc(filename)
return [
(bp.LOAD_CONST, body_code),
(bp.MAKE_FUNCTION, 0),
(bp.STORE_GLOBAL, self.name)
]
def py_read(self):
'''[py] Generate code for reading the function address.'''
return [(bp.LOAD_GLOBAL, self.name)]
def body_to_pyc(self, filename):
'''[py] Generate bytecode for function body.'''
code = bp.Code(
code=self.value.to_py(),
freevars=[],
args=['_%d' % n for n in xrange(len(self.type.arg_type_list))],
varargs=False,
varkwargs=False,
newlocals=True,
name=self.name,
filename=filename,
firstlineno=self.y,
docstring=None)
return code
@property
def x86_name(self):
'''[x86] Mangled function name.'''
return '_f_%s' % self.name
def to_x86_asm(self):
result = [
x86.SyncESP(),
'%s:' % self.x86_name,
]
result += self.value.to_x86_asm(x86.Env())
result += x86.SyncESP(),
return result
def __str__(self):
return 'function %s : %s =\n%s' % (self.name, self.type, self.value)
class evaluation(statement):
'''An evaluation statement.'''
def __init__(self, expression):
statement.__init__(self)
self.expression = expression
self.position = expression.position
def validate(self):
ok = self.expression.validate()
xtype = self.expression.type
if xtype is None or self.expression.is_evaluatable():
return ok
TypeMismatch(self.position,
'Incompatible types in evaluation: <%s> provided but <void> expected' % xtype).warn()
return False
def get_var_refs(self):
return self.expression.get_var_refs()
def check_var_usage(self, lsv, rsv):
return self.expression.check_var_usage(lsv, rsv)
def to_py(self):
result = self.expression.to_py() + [(bp.POP_TOP, None)]
return result
def to_x86_asm(self, env):
return \
self.expression.to_x86_asm(env) + \
self.expression.x86_asm_discard(env)
def __str__(self):
return str(self.expression)
class declaration(statement):
'''A group of variable declarations.'''
def __init__(self, variables, position):
statement.__init__(self)
self.variables = variables
self.position = position
def validate(self):
ok = True
for variable in self.variables:
ok &= variable.validate()
return ok
def get_var_refs(self):
result = []
for variable in self.variables:
result += variable.get_var_refs()
return result
def check_var_usage(self, lsv, rsv):
ok = True
for variable in self.variables:
ok &= variable.check_var_usage(lsv, rsv)
return ok
def to_py(self):
result = [(bp.SetLineno, self.y)]
for var in self.variables:
result += var.py_write()
return result
def to_x86_asm(self, env):
salloc = 0
for var in self.variables:
size = var.type.x86_size()
env.vsp += size
salloc += size
var.uid = '##(-%d)' % env.vsp
result = [x86.SubESP(salloc)]
for var in self.variables:
result += var.x86_asm_write(var.value, env)
return result
def __str__(self):
return 'declare: ' + ', '.join(str(var) for var in self.variables)
class argv(declaration):
'''An artificial declaration of function arguments.'''
def __init__(self, variables):
declaration.__init__(self, variables, None)
def validate(self):
return True
def get_var_refs(self):
return ()
def check_var_usage(self, lsv, rsv):
lsv |= set(self.variables)
return True
def to_py(self):
for no, var in enumerate(self.variables):
var.uid = '_%d' % no
return []
def to_x86_asm(self, env):
for i, var in enumerate(self.variables):
var.uid = '##(%d)' % (4 * (i + 1))
return []
def __str__(self):
return 'argv: ' + ', '.join(str(var) for var in self.variables)
class if_then_else(statement):
'''A condition | |
changed"""
# This expression matches our 'ix_dataset_type_changed' index, so we can scan it quickly.
dataset_changed = func.greatest(
dataset.c.added,
# The 'updated' column doesn't exist on ODC's definition as it's optional.
column("updated"),
dataset.c.archived,
)
return dataset_changed
def _default_crs(dt: DatasetType) -> Optional[str]:
storage = dt.definition.get("storage")
if not storage:
return None
return storage.get("crs")
def _dataset_creation_expression(md: MetadataType) -> ClauseElement:
"""SQLAlchemy expression for the creation (processing) time of a dataset"""
# Either there's a field called "created", or we fallback to the default "creation_dt' in metadata type.
created_field = md.dataset_fields.get("created")
if created_field is not None:
assert isinstance(created_field, PgDocField)
creation_expression = created_field.alchemy_expression
else:
doc = md.dataset_fields["metadata_doc"].alchemy_expression
creation_dt = md.definition["dataset"].get("creation_dt") or ["creation_dt"]
creation_expression = func.agdc.common_timestamp(doc[creation_dt].astext)
# If they're missing a dataset-creation time, fall back to the time it was indexed.
return func.coalesce(creation_expression, DATASET.c.added)
def get_dataset_bounds_query(md_type):
if "lat" not in md_type.dataset_fields:
# Not a spatial product
return None
lat, lon = md_type.dataset_fields["lat"], md_type.dataset_fields["lon"]
assert isinstance(lat, RangeDocField)
assert isinstance(lon, RangeDocField)
return func.ST_MakeBox2D(
func.ST_MakePoint(lat.lower.alchemy_expression, lon.lower.alchemy_expression),
func.ST_MakePoint(
lat.greater.alchemy_expression, lon.greater.alchemy_expression
),
type_=Geometry,
)
def as_sql(expression, **params):
"""Convert sqlalchemy expression to SQL string.
(primarily for debugging: to see what sqlalchemy is doing)
This has its literal values bound, so it's more readable than the engine's
query logging.
"""
if params:
expression = expression.params(**params)
return str(
expression.compile(
dialect=postgres.dialect(), compile_kwargs={"literal_binds": True}
)
)
def _as_json(obj):
def fallback(o, *args, **kwargs):
if isinstance(o, uuid.UUID):
return str(o)
if isinstance(o, WKBElement):
# Following the EWKT format: include srid
prefix = f"SRID={o.srid};" if o.srid else ""
return str(prefix + to_shape(o).wkt)
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, PgRange):
return ["∞" if o.lower_inf else o.lower, "∞" if o.upper_inf else o.upper]
return repr(o)
return json.dumps(obj, indent=4, default=fallback)
# This is tied to ODC's internal Dataset search implementation as there's no higher-level api to allow this.
# When region_code is integrated into core (as is being discussed) this can be replaced.
# pylint: disable=protected-access
def datasets_by_region(
engine: Engine,
index: Index,
product_name: str,
region_code: str,
time_range: Range,
limit: int,
offset: int = 0,
) -> Generator[Dataset, None, None]:
product = index.products.get_by_name(product_name)
query = (
select(postgres_api._DATASET_SELECT_FIELDS)
.select_from(
DATASET_SPATIAL.join(DATASET, DATASET_SPATIAL.c.id == DATASET.c.id)
)
.where(DATASET_SPATIAL.c.region_code == bindparam("region_code", region_code))
.where(
DATASET_SPATIAL.c.dataset_type_ref
== bindparam("dataset_type_ref", product.id)
)
)
if time_range:
query = query.where(
DATASET_SPATIAL.c.center_time > bindparam("from_time", time_range.begin)
).where(DATASET_SPATIAL.c.center_time < bindparam("to_time", time_range.end))
query = (
query.order_by(DATASET_SPATIAL.c.center_time)
.limit(bindparam("limit", limit))
.offset(bindparam("offset", offset))
)
return (
index.datasets._make(res, full_info=True)
for res in engine.execute(query).fetchall()
)
@dataclass
class RegionSummary:
product_name: str
region_code: str
count: int
generation_time: datetime
footprint_wgs84: Geometry
@property
def footprint_geojson(self):
extent = self.footprint_wgs84
if not extent:
return None
return {
"type": "Feature",
"geometry": extent.__geo_interface__,
"properties": {"region_code": self.region_code, "count": self.count},
}
@dataclass
class ProductArrival:
"""What arrived for a given product on a particular day?"""
product_name: str
day: date
# Count of datasets added on the given day.
dataset_count: int
# A few dataset ids among the arrivals
sample_dataset_ids: List[uuid.UUID]
class RegionInfo:
def __init__(
self, product: DatasetType, known_regions: Optional[Dict[str, RegionSummary]]
) -> None:
self.product = product
self._known_regions = known_regions
# Treated as an "id" in view code. What kind of region?
name: str = "region"
# A human-readable description displayed on a UI.
description: str = "Regions"
# Used when printing counts "1 region", "5 regions".
unit_label: str = "region"
units_label: str = "regions"
@classmethod
def for_product(
cls, dataset_type: DatasetType, known_regions: Dict[str, RegionSummary] = None
):
region_code_field: Field = dataset_type.metadata_type.dataset_fields.get(
"region_code"
)
grid_spec = dataset_type.grid_spec
# Ingested grids trump the "region_code" field because they've probably sliced it up smaller.
#
# hltc has a grid spec, but most attributes are missing, so grid_spec functions fail.
# Therefore: only assume there's a grid if tile_size is specified.
if grid_spec is not None and grid_spec.tile_size:
return GridRegionInfo(dataset_type, known_regions)
elif region_code_field is not None:
# Generic region info
return RegionInfo(dataset_type, known_regions)
elif "sat_path" in dataset_type.metadata_type.dataset_fields:
return SceneRegionInfo(dataset_type, known_regions)
return None
def region(self, region_code: str) -> Optional[RegionSummary]:
return self._known_regions.get(region_code)
def dataset_region_code(self, dataset: Dataset) -> Optional[str]:
"""
Get the region code for a dataset.
This should always give the same result as the alchemy_expression() function,
but is computed in pure python.
Classes that override alchemy_expression should override this to match.
"""
return dataset.metadata.region_code
def alchemy_expression(self):
"""
Get an alchemy expression that computes dataset's region code
Classes that override this should also override dataset_region_code to match.
"""
dt = self.product
region_code_field: Field = dt.metadata_type.dataset_fields.get("region_code")
# `alchemy_expression` is part of the postgres driver (PgDocField),
# not the base Field class.
if not hasattr(region_code_field, "alchemy_expression"):
raise NotImplementedError(
"ODC index driver doesn't support alchemy expressions"
)
return region_code_field.alchemy_expression
def region_label(self, region_code: str) -> str:
"""
Convert the region_code into something human-readable.
"""
# Default plain, un-prettified.
return region_code
class GridRegionInfo(RegionInfo):
"""Ingested datacube products have tiles"""
name = "tiled"
description = "Tiled product"
unit_label = "tile"
units_label = "tiles"
def region_label(self, region_code: str) -> str:
return "Tile {:+d}, {:+d}".format(*_from_xy_region_code(region_code))
def alchemy_expression(self):
"""
Get an sqlalchemy expression to calculate the region code (a string)
This is usually the 'region_code' field, if one exists, but there are
fallbacks for other native Satellites/Platforms.
Eg.
On Landsat scenes this is the path/row (separated by underscore)
On tiles this is the tile numbers (separated by underscore: possibly with negative)
On Sentinel this is MGRS number
"""
dt = self.product
grid_spec = dt.grid_spec
doc = _jsonb_doc_expression(dt.metadata_type)
projection_offset = _projection_doc_offset(dt.metadata_type)
# Calculate tile refs
geo_ref_points_offset = projection_offset + ["geo_ref_points"]
center_point = func.ST_Centroid(
func.ST_Collect(
_gis_point(doc, geo_ref_points_offset + ["ll"]),
_gis_point(doc, geo_ref_points_offset + ["ur"]),
)
)
# todo: look at grid_spec crs. Use it for defaults, conversion.
size_x, size_y = grid_spec.tile_size or (1000.0, 1000.0)
origin_x, origin_y = grid_spec.origin
return func.concat(
func.floor((func.ST_X(center_point) - origin_x) / size_x).cast(String),
"_",
func.floor((func.ST_Y(center_point) - origin_y) / size_y).cast(String),
)
def dataset_region_code(self, dataset: Dataset) -> Optional[str]:
tiles = [
tile
for tile, _ in dataset.type.grid_spec.tiles(
dataset.extent.centroid.boundingbox
)
]
if not len(tiles) == 1:
raise ValueError(
"Tiled dataset should only have one tile? "
f"Got {tiles!r} for {dataset!r}"
)
x, y = tiles[0]
return f"{x}_{y}"
def _from_xy_region_code(region_code: str):
"""
>>> _from_xy_region_code('95_3')
(95, 3)
>>> _from_xy_region_code('95_-3')
(95, -3)
"""
x, y = region_code.split("_")
return int(x), int(y)
class SceneRegionInfo(RegionInfo):
"""Landsat WRS2"""
name = "scenes"
description = "Landsat WRS2 scene-based product"
unit_label = "scene"
units_label = "scenes"
def region_label(self, region_code: str) -> str:
if "_" in region_code:
x, y = _from_xy_region_code(region_code)
return f"Path {x}, Row {y}"
else:
return f"Path {region_code}"
def alchemy_expression(self):
dt = self.product
# Generate region code for older sat_path/sat_row pairs.
md_fields = dt.metadata_type.dataset_fields
path_field: RangeDocField = md_fields["sat_path"]
row_field: RangeDocField = md_fields["sat_row"]
return case(
[
# Is this just one scene? Include it specifically
(
row_field.lower.alchemy_expression
== row_field.greater.alchemy_expression,
func.concat(
path_field.lower.alchemy_expression.cast(String),
"_",
row_field.greater.alchemy_expression.cast(String),
),
),
],
# Otherwise it's a range of rows, so our region-code is the whole path.
else_=path_field.lower.alchemy_expression.cast(String),
)
def dataset_region_code(self, dataset: Dataset) -> Optional[str]:
path_range = dataset.metadata.fields["sat_path"]
row_range = dataset.metadata.fields["sat_row"]
if row_range is None and path_range is None:
return None
# If it's just one scene? Include it specifically
if row_range[0] == row_range[1]:
return f"{path_range[0]}_{row_range[1]}"
# Otherwise it's a range of rows, so we say the whole path.
else:
return f"{path_range[0]}"
def _region_code_field(dt: DatasetType):
"""
Get an sqlalchemy expression to calculate the region code (a string)
"""
region_info = RegionInfo.for_product(
dt,
# The None is here bad OO design. The class probably should be split in two for different use-cases.
None,
)
if region_info is not None:
return region_info.alchemy_expression()
else:
_LOG.debug(
"no_region_code",
product_name=dt.name,
metadata_type_name=dt.metadata_type.name,
)
return null()
def get_sample_dataset(*product_names: str, index: Index = None) -> Iterable[Dict]:
with Datacube(index=index) as dc:
index = dc.index
for product_name in product_names:
product = index.products.get_by_name(product_name)
res = (
alchemy_engine(index)
.execute(
select(_select_dataset_extent_columns(product))
.where(
DATASET.c.dataset_type_ref
== bindparam("product_ref", product.id, type_=SmallInteger)
)
.where(DATASET.c.archived == None)
.limit(1)
)
.fetchone()
)
if res:
yield dict(res)
@functools.lru_cache()
def _get_path_row_shapes():
path_row_shapes = {}
for shape_file in _WRS_PATH_ROW:
with fiona.open(str(shape_file)) as f:
for _k, item in f.items():
prop = item["properties"]
key = prop["PATH"], prop["ROW"]
assert key not in path_row_shapes
path_row_shapes[key] = shape(item["geometry"])
return path_row_shapes
def get_mapped_crses(*product_names: str, index: Index = None) -> Iterable[Dict]:
with Datacube(index=index) as dc:
index = dc.index
for product_name in product_names:
product = index.products.get_by_name(product_name)
# SQLAlchemy queries require "column == None", not "column is None" due to operator | |
session, resource)
return self.to_client_json(order)
def _do_update_with_patch(self, session, record, resource):
self._validate_patch_update(record, resource)
if resource["status"].lower() == "cancelled":
record = self._cancel_record(record, resource)
if resource["status"].lower() == "restored":
record = self._restore_record(record, resource)
logging.info(f"{resource['status']} physical measurement {record.physicalMeasurementsId}.")
super(PhysicalMeasurementsDao, self)._do_update(session, record, record)
self._update_participant_summary(session, record)
return record
@staticmethod
def make_measurement_id(physical_measurements_id, measurement_count):
# To generate unique IDs for measurements that are randomly distributed for different
# participants (without having to randomly insert and check for the existence of IDs for each
# measurement row), we multiply the parent physical measurements ID (nine digits) by 1000 and
# add the measurement count within physical_measurements. This must not reach 1000 to avoid
# collisions; log an error if we start getting anywhere close. (We don't expect to.)
assert measurement_count < 1000
if measurement_count == 900:
logging.error("measurement_count > 900; nearing limit of 1000.")
return (physical_measurements_id * 1000) + measurement_count
@staticmethod
def get_preferred_coding(codeable_concept):
"""Extract the code with the PMI system, if there is one."""
pm_coding = None
for coding in codeable_concept.coding:
if pm_coding is None:
pm_coding = coding
elif coding.system.startswith(_PM_SYSTEM_PREFIX):
if pm_coding.system.startswith(_PM_SYSTEM_PREFIX):
raise BadRequest(f"Multiple measurement codes starting system {_PM_SYSTEM_PREFIX}")
pm_coding = coding
return pm_coding
@staticmethod
def from_component(observation, component):
if not component.code or not component.code.coding:
logging.warning(f"Skipping component without coding: {component.as_json()}")
return None
value_string = None
value_decimal = None
value_unit = None
value_code_system = None
value_code_value = None
value_date_time = None
if component.valueQuantity:
value_decimal = component.valueQuantity.value
value_unit = component.valueQuantity.code
if component.valueDateTime:
value_date_time = component.valueDateTime.date
if component.valueString:
value_string = component.valueString
if len(value_string) > _BYTE_LIMIT:
raise BadRequest("Component notes field exceeds limit.")
if component.valueCodeableConcept and component.valueCodeableConcept.coding:
value_coding = PhysicalMeasurementsDao.get_preferred_coding(component.valueCodeableConcept)
value_code_system = value_coding.system
value_code_value = value_coding.code
pm_coding = PhysicalMeasurementsDao.get_preferred_coding(component.code)
return Measurement(
codeSystem=pm_coding.system,
codeValue=pm_coding.code,
measurementTime=observation.effectiveDateTime.date,
valueString=value_string,
valueDecimal=value_decimal,
valueUnit=value_unit,
valueCodeSystem=value_code_system,
valueCodeValue=value_code_value,
valueDateTime=value_date_time,
)
@staticmethod
def from_observation(observation, full_url, qualifier_map, first_pass):
if first_pass:
if observation.related:
# Skip anything with a related observation on the first pass.
return None
else:
if not observation.related:
# Skip anything *without* a related observation on the second pass.
return None
if not observation.effectiveDateTime:
logging.warning(f"Skipping observation without effectiveDateTime: {observation.as_json()}")
return None
if not observation.code or not observation.code.coding:
logging.warning(f"Skipping observation without coding: {observation.as_json()}")
return None
body_site_code_system = None
body_site_code_value = None
value_string = None
value_decimal = None
value_unit = None
value_code_system = None
value_code_value = None
value_code_description = None
value_date_time = None
if observation.bodySite and observation.bodySite.coding:
body_site_coding = PhysicalMeasurementsDao.get_preferred_coding(observation.bodySite)
body_site_code_system = body_site_coding.system
body_site_code_value = body_site_coding.code
if observation.valueQuantity:
value_decimal = observation.valueQuantity.value
value_unit = observation.valueQuantity.code
if observation.valueDateTime:
value_date_time = observation.valueDateTime.date.replace(tzinfo=None)
if observation.valueString:
value_string = observation.valueString
if len(value_string) > _BYTE_LIMIT:
raise BadRequest("Observation notes field exceeds limit.")
if observation.valueCodeableConcept and observation.valueCodeableConcept.coding:
value_coding = PhysicalMeasurementsDao.get_preferred_coding(observation.valueCodeableConcept)
value_code_system = value_coding.system
value_code_value = value_coding.code
value_code_description = observation.valueCodeableConcept.text
desc_char_count = len(value_code_description)
char_limit = Measurement.valueCodeDescription.type.length
if desc_char_count > char_limit:
logging.warning(f'Truncating codeable concept description of length {desc_char_count}')
value_code_description = value_code_description[:char_limit]
measurements = []
if observation.component:
for component in observation.component:
child = PhysicalMeasurementsDao.from_component(observation, component)
if child:
measurements.append(child)
qualifiers = []
if observation.related:
for related in observation.related:
if related.type == _QUALIFIED_BY_RELATED_TYPE and related.target and related.target.reference:
qualifier = qualifier_map.get(related.target.reference)
if qualifier:
qualifiers.append(qualifier)
else:
logging.warning(f"Could not find qualifier {related.target.reference}")
pm_coding = PhysicalMeasurementsDao.get_preferred_coding(observation.code)
result = Measurement(
codeSystem=pm_coding.system,
codeValue=pm_coding.code,
measurementTime=observation.effectiveDateTime.date.replace(tzinfo=None),
bodySiteCodeSystem=body_site_code_system,
bodySiteCodeValue=body_site_code_value,
valueString=value_string,
valueDecimal=value_decimal,
valueUnit=value_unit,
valueCodeSystem=value_code_system,
valueCodeValue=value_code_value,
valueCodeDescription=value_code_description,
valueDateTime=value_date_time,
measurements=measurements,
qualifiers=qualifiers,
)
if first_pass:
qualifier_map[full_url] = result
return result
@staticmethod
def get_location_site_id(location_value):
if not location_value.startswith(_LOCATION_PREFIX):
logging.warning(f"Invalid location: {location_value}")
return None
google_group = location_value[len(_LOCATION_PREFIX) :]
site = SiteDao().get_by_google_group(google_group)
if not site:
logging.warning(f"Unknown site: {google_group}")
return None
return site.siteId
@staticmethod
def get_author_username(author_value):
if not author_value.startswith(_AUTHOR_PREFIX):
logging.warning(f"Invalid author: {author_value}")
return None
return author_value[len(_AUTHOR_PREFIX) :]
@staticmethod
def get_authoring_step(extension):
url = extension.get("url")
if url == _AUTHORING_STEP:
return extension.get("valueCode")
return None
def to_client_json(self, model):
# pylint: disable=unused-argument
"""Converts the given model to a JSON object to be returned to API clients.
Subclasses must implement this unless their model store a model.resource attribute.
"""
doc, composition = self.load_record_fhir_doc(model) # pylint: disable=unused-variable
return doc
def from_client_json(self, resource_json, participant_id=None, **unused_kwargs):
# pylint: disable=unused-argument
measurements = []
observations = []
qualifier_map = {}
created_site_id = None
created_username = None
finalized_site_id = None
finalized_username = None
for entry in resource_json["entry"]:
resource = entry.get("resource")
if resource:
resource_type = resource.get("resourceType")
if resource_type == _OBSERVATION_RESOURCE_TYPE:
observations.append((entry["fullUrl"], fhir_observation.Observation(resource)))
elif resource_type == _COMPOSITION_RESOURCE_TYPE:
extensions = resource.get("extension", [])
if not extensions:
logging.warning("No extensions in composition resource (expected site info).")
for extension in extensions:
# DA-1499 convert to 'valueString' key value instead of 'valueReference'.
value_reference = extension.get("valueString")
if not value_reference:
value_reference = extension.get("valueReference")
if value_reference:
url = extension.get("url")
if url == _CREATED_LOC_EXTENSION:
created_site_id = PhysicalMeasurementsDao.get_location_site_id(value_reference)
elif url == _FINALIZED_LOC_EXTENSION:
finalized_site_id = PhysicalMeasurementsDao.get_location_site_id(value_reference)
elif url not in _ALL_EXTENSIONS:
logging.warning(
f"Unrecognized extension URL: {url} (should be one of {_ALL_EXTENSIONS})"
)
else:
logging.warning(f"No valueReference in extension, skipping: {extension}")
authors = resource.get("author")
for author in authors:
author_extension = author.get("extension")
# DA-1435 Support author extension as both an object and an array of objects.
# Convert object to list to meet FHIR spec.
if author_extension and not isinstance(author_extension, list):
new_ae = list()
new_ae.append(author_extension)
author_extension = author['extension'] = new_ae
reference = author.get("reference")
if author_extension and reference:
authoring_step = PhysicalMeasurementsDao.get_authoring_step(author_extension[0])
if authoring_step == _FINALIZED_STATUS:
finalized_username = PhysicalMeasurementsDao.get_author_username(reference)
elif authoring_step == _CREATED_STATUS:
created_username = PhysicalMeasurementsDao.get_author_username(reference)
else:
logging.warning(
f"Unrecognized resource type (expected {_OBSERVATION_RESOURCE_TYPE} \
or {_COMPOSITION_RESOURCE_TYPE}), skipping: {resource_type}"
)
# Take two passes over the observations; once to find all the qualifiers and observations
# without related qualifiers, and a second time to find all observations with related
# qualifiers.
for first_pass in [True, False]:
for fullUrl, observation in observations:
measurement = PhysicalMeasurementsDao.from_observation(observation, fullUrl, qualifier_map, first_pass)
if measurement:
measurements.append(measurement)
record = PhysicalMeasurements(
participantId=participant_id,
measurements=measurements,
createdSiteId=created_site_id,
createdUsername=created_username,
finalizedSiteId=finalized_site_id,
finalizedUsername=finalized_username,
)
record = self.store_record_fhir_doc(record, resource_json)
return record
def _validate_patch_update(self, measurement, resource):
"""validates request of resource"""
cancelled_required_fields = ["status", "reason", "cancelledInfo"]
restored_required_fields = ["status", "reason", "restoredInfo"]
if resource.get("status").lower() == "cancelled":
if measurement.status == PhysicalMeasurementsStatus.CANCELLED:
raise BadRequest("This order is already cancelled")
for field in cancelled_required_fields:
if field not in resource:
raise BadRequest(f"{field} is required in cancel request.")
elif resource.get("status").lower() == "restored":
if measurement.status != PhysicalMeasurementsStatus.CANCELLED:
raise BadRequest("Can not restore an order that is not cancelled.")
for field in restored_required_fields:
if field not in resource:
raise BadRequest(f"{field} is required in restore request.")
else:
raise BadRequest("status is required in restore request.")
def _get_patch_args(self, resource):
"""
returns author and site based on resource cancelledInfo/restoredInfo. Validation that
these exists is handled by _validate_patch_update
:param resource: Request JSON Payload
:return: Tuple (site_id, author, reason)
"""
site_id = None
author = None
reason = resource.get("reason", None)
if "cancelledInfo" in resource:
site_id = self.get_location_site_id(_LOCATION_PREFIX + resource["cancelledInfo"]["site"]["value"])
author = self.get_author_username(_AUTHOR_PREFIX + resource["cancelledInfo"]["author"]["value"])
elif "restoredInfo" in resource:
site_id = self.get_location_site_id(_LOCATION_PREFIX + resource["restoredInfo"]["site"]["value"])
author = self.get_author_username(_AUTHOR_PREFIX + resource["restoredInfo"]["author"]["value"])
return site_id, author, reason
@staticmethod
def load_record_fhir_doc(record):
"""
Retrieve the FHIR document from the DB record.
:param record: Measurement DB record
:return: Tuple (FHIR document dict, Composition entry)
"""
# DA-1435 Support old/new resource field type
if str(PhysicalMeasurements.resource.property.columns[0].type) == 'JSON':
doc = record.resource
else:
doc = json.loads(record.resource)
composition = None
entries = doc.get('entry', list())
for entry in entries:
resource = entry.get('resource', None)
if resource and 'resourceType' in resource and resource['resourceType'].lower() == 'composition':
composition = resource
return doc, composition
@staticmethod
def store_record_fhir_doc(record, doc):
"""
Store the FHIR document into the DB record.
:param record: Measurement DB record
:param doc: FHIR document dict
:return: Measurement DB record
"""
if isinstance(doc, str):
doc = json.loads(doc)
# DA-1435 Support old/new resource field type
if str(PhysicalMeasurements.resource.property.columns[0].type) == 'JSON':
record.resource = doc
else:
record.resource = json.dumps(doc)
# sqlalchemy does not mark the 'resource' field as dirty, we need to force it.
flag_modified(record, 'resource')
return record
def _cancel_record(self, record, resource):
"""
Cancel the Physical Measurements record.
:param record: Measurement DB record
:param resource: Request JSON payload
:return: Measurement record
"""
site_id, author, reason = self._get_patch_args(resource)
record.cancelledUsername = author
record.cancelledSiteId = site_id
record.reason = reason
record.cancelledTime = clock.CLOCK.now()
record.status = PhysicalMeasurementsStatus.CANCELLED
record.createdSiteId = None
record.finalizedSiteId = None
record.finalized = None
doc, composition = self.load_record_fhir_doc(record)
composition['status'] = 'entered-in-error'
# remove all restored entries if found
extensions = list()
for ext in composition['extension']:
if 'restore' not in ext['url']:
extensions.append(ext)
extensions.append({
| |
#!/usr/bin/python3
import sys
from sys import argv, exit, stderr
import os
import nbformat as nbf
import yaml
from collections import OrderedDict
import numpy as np
import re, ast
def represent_dictionary_order(self, dict_data):
return self.represent_mapping('tag:yaml.org,2002:map', dict_data.items())
def setup_yaml():
yaml.add_representer(OrderedDict, represent_dictionary_order)
setup_yaml()
def add_cell(cell_type,cell_string,cell_metadata):
if cell_type=="Code":
nb['cells'].append(nbf.v4.new_code_cell(cell_string,metadata=cell_metadata));
elif cell_type=="Markdown":
nb['cells'].append(nbf.v4.new_markdown_cell(cell_string,metadata=cell_metadata));
elif cell_type=="Raw":
nb['cells'].append(nbf.v4.new_raw_cell(cell_string,metadata=cell_metadata));
#new_heading non esiste
#elif cell_type=="Heading": nb['cells'].append(nbf.v4.new_heading_cell(cell_string,metadata=cell_metadata));
else:
assert False
def usage():
print(f"""Usage: ./{os.path.basename(argv[0])} instance_file.yaml\n\n dove il parametro obbligatorio <instance_file.yaml> è il nome del file coi dati di istanza specifica.""", file=stderr)
# THE MAIN PROGRAM:
#Usage: command_name instance file.yaml
if len(argv) != 2:
print(f"Mh... you have called the script {os.path.basename(argv[0])} passing to it {len(argv)-1} parameters. Expecting just one!")
usage()
exit(1)
# BEGIN instance specific data loading
try:
with open(argv[1], 'r') as stream:
data_instance = yaml.safe_load(stream)
except FileNotFoundError:
print(f"Can\'t open file {argv[1]}. Wrong file name or file path")
exit(1)
except IOError:
print("Error: can\'t read the file")
exit(1)
#except Exception:
# tb = sys.exc_info()[2]
# raise OtherException(...).with_traceback(tb)
# BEGIN creazione variabili per generare istanza yaml per modalità libera
yaml_gen=OrderedDict()
yaml_gen['name']=data_instance['name']
yaml_gen['title']=data_instance['title']
tasks_istanza_libera=[]
edges=data_instance['edges']
edges2=data_instance['edges2']
# END creazione variabili per generare istanza yaml per modalità libera
tasks=data_instance['tasks']
total_point=0
n = 0
for i in range (0,len(tasks)):
total_point+=tasks[i]['tot_points']
n += 1
num_of_question=1
# END instance specific data loading
# Handy Ctrl-C Ctrl-V stuff:
#meta_init={"hide_input": True, "init_cell": True, "trusted": True, "deletable": False, "editable": False}
#meta_run={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell"], "trusted": True}
#meta_stud_input={"trusted": True, "deletable": False}
# NOTEBOOK DEFINITION:
nb = nbf.v4.new_notebook()
nb['cells']=[]
# ( CELL 1:
cell_type='Code'
cell_string = """\
%%javascript
window.findCellIndicesByTag = function findCellIndicesByTag(tagName) {
return (Jupyter.notebook.get_cells()
.filter(
({metadata: {tags}}) => tags && tags.includes(tagName)
)
.map((cell) => Jupyter.notebook.find_cell_index(cell))
);
};
window.runCells = function runCells() {
var c = window.findCellIndicesByTag('runcell');
Jupyter.notebook.execute_cells(c);
};
"""
cell_metadata={"hide_input": True,"tags": ["noexport"], "init_cell": True, "trusted": True, "deletable": False, "editable": False}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 1 -END)
##############
# ( CELL 2:
cell_type='Code'
cell_string =f"""\
from IPython.core.display import display, HTML, Markdown, Javascript
from IPython.display import SVG, display
from IPython.display import Latex
import copy as cp
import numpy as np
def start():
display(Javascript("window.runCells()"))
arr_point={str([-1] * n)}
"""
cell_metadata={"hide_input": True, "init_cell": True,"tags": ["noexport"], "trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 2 -END)
##############
# ( CELL 3:
cell_type='Code'
cell_string="""\
#seleziona la cella e premi ctrl-invio
start()
"""
cell_metadata={"tags": ["noexport"], "trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 3 -END)
##############
# ( CELL 4:
cell_type='Code'
cell_string=f"""\
edges={edges}
edges2={edges2}
"""
cell_metadata={"hide_input": True, "editable": False, "init_cell": True, "deletable": False, "tags": ["noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 4 -END)
############
# ( CELL 5:
cell_type='Markdown'
cell_string=f"## Esercizio \[{total_point} pts\]<br/>"\
+f"{data_instance['title']}."
cell_metadata={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell","noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 5 -END)
##############
# ( CELL 6:
cell_type='Markdown'
cell_string=f"""\
Consideriamo i seguenti due grafi chiamati GRAFO 1 (a sinistra) e GRAFO 2 (a destra):
"""
cell_metadata={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell","noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
# per istanza_libera
descript='Consideriamo i seguenti due grafi chiamati GRAFO 1 (a sinistra) e GRAFO 2 (a destra):'
# CELL 6 -END)
##############
# ( CELL 7:
cell_type='Code'
cell_string="""\
import matplotlib.pyplot as plt
from networkx import nx
n = 20
# segue una lista di precedenze della forma [u,v], cl significato che u deve essere schedulato oprima di v.
nodes=[(0+i) for i in range(n)]
prec_original_instance = []
for e in edges:
if e["flip"] == 1:
prec_original_instance.append((e["head"],e["tail"]))
else:
prec_original_instance.append((e["tail"],e["head"]))
prec_original_instance2 = []
for e in edges2:
if e["flip"] == 1:
prec_original_instance2.append((e["head"],e["tail"]))
else:
prec_original_instance2.append((e["tail"],e["head"]))
def evaluation_format(answ, pt_green,pt_red, index_pt):
pt_blue=0
if pt_green!=0:
pt_blue=pt_red-pt_green
pt_red=0
arr_point[index_pt]=pt_green
file = open("points.txt", "w")
file.write(str(arr_point))
file.close()
return f"{answ}. Totalizzeresti <span style='color:green'>[{pt_green} safe pt]</span>, \
<span style='color:blue'>[{pt_blue} possible pt]</span>, \
<span style='color:red'>[{pt_red} out of reach pt]</span>.<br>"
def visualizza_e_valuta_le_precedenze_non_rispettate(soluzione_problem_solver,lista_di_precedenze, pt_green, pt_red, index_pt, silent=False):
lista_visualizza=[] # lista di tuple (archi)
#controllo sulla lunghezza della lista fornita
if(len(soluzione_problem_solver)!=n):
#modifcare l'output, dire di che lunghezza voglio la lista e di che lunghezza l'ha data lui
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Hai fornito una soluzione di lungezza "+str(len(soluzione_problem_solver)) + ": \
dovresti fornirla di lunghezza "+str(n)
return display(Markdown(str_to_print))
check=np.zeros(len(soluzione_problem_solver))
#incremento la posizione soluzione_problem_solver[i] di uno , se sono inseriti tutti correttamente avrò
#un array di soli 1
for i in range(len(soluzione_problem_solver)):
try:
check[soluzione_problem_solver[i]]=check[soluzione_problem_solver[i]]+1
except:
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Hai inserito il nodo "+str(soluzione_problem_solver[i])+", ti ricordo che i nodi \
vanno da 0 a " + str(n-1)
return display(Markdown(str_to_print))
contatore_errori=0
#la lista contiene una e una volta sola tutti gli elementi
if(np.all((check == 1))):
if(lista_di_precedenze==1):
for element in prec_original_instance:
indice1=soluzione_problem_solver.index(element[0])
indice2=soluzione_problem_solver.index(element[1])
if(indice1>indice2):
lista_visualizza.append((element[0], element[1]))
contatore_errori=contatore_errori+1
if(lista_di_precedenze==2):
for element in prec_original_instance2:
indice1=soluzione_problem_solver.index(element[0])
indice2=soluzione_problem_solver.index(element[1])
if(indice1>indice2):
lista_visualizza.append((element[0], element[1]))
contatore_errori=contatore_errori+1
if(lista_di_precedenze!=2 and lista_di_precedenze!=1):
return "Vorresti valutare la tua soluzione rispetto alla lista di precedenze numero \
" +str(lista_di_precedenze)+ " Ti ricordo che le liste di precedenze sono 2, \
se vuoi valutare la tua soluzione rispetto alla prima lista digita 1 , altrimenti 2"
if(contatore_errori==0):
if(silent):
return 1
else:
str_to_print=evaluation_format("Si", pt_green, pt_red, index_pt) + "Sei riuscito a rispettare tutte le precedenze : hai dimostrato che il grafo fornito è un DAG!"
return display(Markdown(str_to_print))
else:
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Non hai rispettato " + str(contatore_errori) + " precedenze "
display(Markdown(str_to_print))
return visualizza(lista_visualizza)
#manca un elemento e/o un elemento viene ripetuto più di una volta
else:
if(silent):
return 0
else:
for k in range(len(check)):
if(check[k]==0):
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) +"L'array NON contiene tutti i nodi, il nodo numero " + str(k) + " non è presente "
return display(Markdown(str_to_print))
def visualizza(ordinamento):
G = nx.DiGraph()
# mathplotlib o networkx o ?
# visualizziamo il grafo coi nodi sulla linea nelle posizioni specificate da ordinamento e gli archi che fanno panza per poterli vedere
# il problem-solver deve rendersi conto di quali archi sono rivolti all'indietro.
#for i in range(len(ordinamento)-1):
# G.add_edge(ordinamento[i],ordinamento[i+1])
G.add_edges_from(ordinamento)
nx.draw_planar(G,with_labels=True,arrows=True)
plt.plot()
def ciclo_di_precedenze(soluzione_problem_solver,lista_di_precedenze, pt_green=10, pt_red=10, index_pt=5, silent=False):
lunghezza=len(soluzione_problem_solver)
precedenze_da_valutare=0
if(lista_di_precedenze==1):
precedenze_da_valutare=prec_original_instance
if(lista_di_precedenze==2):
precedenze_da_valutare=prec_original_instance2
if(lista_di_precedenze!=1 and lista_di_precedenze!=2):
if(silent):
return 0
else:
return "Vorresti valutare la tua soluzione rispetto alla lista di precedenze numero \
" +str(lista_di_precedenze)+ " ti ricordo che le liste di precedenze sono 2, \
se vuoi valutare la tua soluzione rispetto alla prima lista digita 1 , altrimenti 2"
#la lista contiene una e una volta sola tutti gli elementi
# creo una stringa che raccoglie i nodi non esistenti (se forniti dallo studente in soluzione_problem_solver)
mystr=''
for node in soluzione_problem_solver:
if node not in nodes:
if mystr=='':
mystr=f'{node}'
else:
mystr=mystr+f', {node}'
if (lunghezza>n) or (mystr!='') or (lunghezza==0):
if lunghezza>n:
str_to_print=f"Attenzione: hai fornito un ciclo più lungo del numero totale di nodi del grafo, ovvero {n}."
elif lunghezza==0:
str_to_print=f"Attenzione: hai fornito un ciclo privo di nodi"
else:
str_to_print=f"Attenzione: i nodi {mystr} non esistono !"
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + str_to_print
return display(Markdown(str_to_print))
else:
if ((soluzione_problem_solver[(len(soluzione_problem_solver)-1)],soluzione_problem_solver[0]) in precedenze_da_valutare):
for i in range(len(soluzione_problem_solver)-1):
if((soluzione_problem_solver[i],soluzione_problem_solver[i+1]) not in precedenze_da_valutare):
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Sembra che la tua lista non contenga un ciclo : controlla le precedenze tra il nodo " + str(soluzione_problem_solver[i]) + " e il nodo " + str(soluzione_problem_solver[i+1])
return display(Markdown(str_to_print))
if(silent):
return 1
else:
str_to_print=evaluation_format("Si", pt_green, pt_red, index_pt) + "La sequenza di nodi " + str(soluzione_problem_solver)+f" che hai fornito descrive un ciclo presente in GRAFO_CON_CICLO={lista_di_precedenze}"
return display(Markdown(str_to_print))
else:
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Sembra che la tua lista non contenga un ciclo : controlla le precedenze tra il nodo " + str(soluzione_problem_solver[lunghezza-1]) + " e il nodo " + str(soluzione_problem_solver[lunghezza-lunghezza])
return display(Markdown(str_to_print))
def visualizza_icosaedro(grafo):
front_face = [15, 16, 17, 18, 19]
back_face = [0, 1, 2, 3, 4]
middle = list(set(range(20)).difference(front_face + back_face))
shells = [front_face] + [middle] + [back_face]
pos = nx.shell_layout(grafo, shells)
#nx.draw_networkx(icosaedro, pos)
nx.draw_networkx_nodes(grafo, pos, alpha=0.6) #node_color='cyan',
nx.draw_networkx_labels(grafo, pos)
#disegna archi e etichette sugli archi
#positive=[(u,v) for (u,v,d) in grafo.edges(data=True) if d['w'] >= 0]
#negative=[(u,v) for (u,v,d) in grafo.edges(data=True) if d['w'] < 0]
positive=[(u,v)for (u,v,d) in grafo.edges(data=True)]
nx.draw_networkx_edges(grafo,pos,edgelist=positive,width=2,alpha = 0.6,edge_color = "g",arrows=True)
#nx.draw_networkx_edges(grafo,pos,edgelist=negative,width=2,alpha = 0.6,edge_color = "r",arrows=True)
#labels = nx.get_edge_attributes(grafo,'w')
#nx.draw_networkx_edge_labels(grafo,pos,edge_labels=labels)
ax = plt.gca()
ax.set_aspect('equal')
ax.set_axis_off()
def subplt(grafo_1, grafo_2):
fig = plt.figure()
plt.rcParams["figure.figsize"] = (15,7)
front_face = [15, 16, 17, 18, 19]
back_face = [0, 1, 2, 3, 4]
middle = list(set(range(20)).difference(front_face + back_face))
shells = [front_face] + [middle] + [back_face]
plt.subplot(121).title.set_text('GRAFO 1')
pos = nx.shell_layout(grafo_1, shells)
nx.draw_networkx_nodes(grafo_1, pos, alpha=0.6)
nx.draw_networkx_labels(grafo_1, pos)
positive=[(u,v) for (u,v,d) in grafo_1.edges(data=True)]
nx.draw_networkx_edges(grafo_1,pos,edgelist=positive,width=2,alpha = 0.6,edge_color = "g",arrows=True)
plt.subplot(122).title.set_text('GRAFO 2')
pos = nx.shell_layout(grafo_2, shells)
nx.draw_networkx_nodes(grafo_2, pos, alpha=0.6)
nx.draw_networkx_labels(grafo_2, pos)
positive=[(u,v) for (u,v,d) in grafo_2.edges(data=True)]
nx.draw_networkx_edges(grafo_2,pos,edgelist=positive,width=2,alpha = 0.6,edge_color = "g",arrows=True)
| |
import numpy as np
import pandas as pd
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem.Fingerprints import FingerprintMols
from DeepPurpose.pybiomed_helper import _GetPseudoAAC, CalculateAADipeptideComposition, \
calcPubChemFingerAll, CalculateConjointTriad, GetQuasiSequenceOrder
import torch
from torch.utils import data
from torch.autograd import Variable
try:
from descriptastorus.descriptors import rdDescriptors, rdNormalizedDescriptors
except:
raise ImportError("Please install pip install git+https://github.com/bp-kelley/descriptastorus.")
from DeepPurpose.chemutils import get_mol, atom_features, bond_features, MAX_NB, ATOM_FDIM, BOND_FDIM
from subword_nmt.apply_bpe import BPE
import codecs
import pickle
import wget
from zipfile import ZipFile
import os
import sys
# ESPF encoding
vocab_path = './DeepPurpose/ESPF/drug_codes_chembl_freq_1500.txt'
bpe_codes_drug = codecs.open(vocab_path)
dbpe = BPE(bpe_codes_drug, merges=-1, separator='')
sub_csv = pd.read_csv('./DeepPurpose/ESPF/subword_units_map_chembl_freq_1500.csv')
idx2word_d = sub_csv['index'].values
words2idx_d = dict(zip(idx2word_d, range(0, len(idx2word_d))))
vocab_path = './DeepPurpose/ESPF/protein_codes_uniprot_2000.txt'
bpe_codes_protein = codecs.open(vocab_path)
pbpe = BPE(bpe_codes_protein, merges=-1, separator='')
#sub_csv = pd.read_csv(dataFolder + '/subword_units_map_protein.csv')
sub_csv = pd.read_csv('./DeepPurpose/ESPF/subword_units_map_uniprot_2000.csv')
idx2word_p = sub_csv['index'].values
words2idx_p = dict(zip(idx2word_p, range(0, len(idx2word_p))))
from DeepPurpose.chemutils import get_mol, atom_features, bond_features, MAX_NB
def create_var(tensor, requires_grad=None):
if requires_grad is None:
return Variable(tensor)
else:
return Variable(tensor, requires_grad=requires_grad)
def roc_curve(y_pred, y_label, figure_file, method_name):
'''
y_pred is a list of length n. (0,1)
y_label is a list of same length. 0/1
https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py
'''
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
y_label = np.array(y_label)
y_pred = np.array(y_pred)
fpr = dict()
tpr = dict()
roc_auc = dict()
fpr[0], tpr[0], _ = roc_curve(y_label, y_pred)
roc_auc[0] = auc(fpr[0], tpr[0])
lw = 2
plt.plot(fpr[0], tpr[0],
lw=lw, label= method_name + ' (area = %0.2f)' % roc_auc[0])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
fontsize = 14
plt.xlabel('False Positive Rate', fontsize = fontsize)
plt.ylabel('True Positive Rate', fontsize = fontsize)
plt.title('Receiver Operating Characteristic Curve')
plt.legend(loc="lower right")
plt.savefig(figure_file)
return
def prauc_curve(y_pred, y_label, figure_file, method_name):
'''
y_pred is a list of length n. (0,1)
y_label is a list of same length. 0/1
reference:
https://machinelearningmastery.com/roc-curves-and-precision-recall-curves-for-classification-in-python/
'''
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve, average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import auc
lr_precision, lr_recall, _ = precision_recall_curve(y_label, y_pred)
# plt.plot([0,1], [no_skill, no_skill], linestyle='--')
plt.plot(lr_recall, lr_precision, lw = 2, label= method_name + ' (area = %0.2f)' % average_precision_score(y_label, y_pred))
fontsize = 14
plt.xlabel('Recall', fontsize = fontsize)
plt.ylabel('Precision', fontsize = fontsize)
plt.title('Precision Recall Curve')
plt.legend()
plt.savefig(figure_file)
return
def length_func(list_or_tensor):
if type(list_or_tensor)==list:
return len(list_or_tensor)
return list_or_tensor.shape[0]
def index_select_ND(source, dim, index):
index_size = index.size()
suffix_dim = source.size()[1:]
final_size = index_size + suffix_dim
target = source.index_select(dim, index.view(-1))
return target.view(final_size)
def smiles2morgan(s, radius = 2, nBits = 1024):
try:
mol = Chem.MolFromSmiles(s)
features_vec = AllChem.GetHashedMorganFingerprint(mol, radius, nBits=nBits)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
except:
print('rdkit not found this smiles for morgan: ' + s + ' convert to all 1 features')
features = np.ones((nBits, ))
return features
def smiles2rdkit2d(s):
try:
generator = rdNormalizedDescriptors.RDKit2DNormalized()
features = np.array(generator.process(s)[1:])
NaNs = np.isnan(features)
features[NaNs] = 0
except:
print('descriptastorus not found this smiles: ' + s + ' convert to all 1 features')
features = np.ones((200, ))
return np.array(features)
def smiles2daylight(s):
try:
NumFinger = 2048
mol = Chem.MolFromSmiles(s)
bv = FingerprintMols.FingerprintMol(mol)
temp = tuple(bv.GetOnBits())
features = np.zeros((NumFinger, ))
features[np.array(temp)] = 1
except:
print('rdkit not found this smiles: ' + s + ' convert to all 1 features')
features = np.ones((2048, ))
return np.array(features)
def smiles2mpnnfeature(smiles):
## mpn.py::tensorize
'''
data-flow:
data_process(): apply(smiles2mpnnfeature)
DBTA: train(): data.DataLoader(data_process_loader())
mpnn_collate_func()
'''
try:
padding = torch.zeros(ATOM_FDIM + BOND_FDIM)
fatoms, fbonds = [], [padding]
in_bonds,all_bonds = [], [(-1,-1)]
mol = get_mol(smiles)
n_atoms = mol.GetNumAtoms()
for atom in mol.GetAtoms():
fatoms.append( atom_features(atom))
in_bonds.append([])
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
x = a1.GetIdx()
y = a2.GetIdx()
b = len(all_bonds)
all_bonds.append((x,y))
fbonds.append( torch.cat([fatoms[x], bond_features(bond)], 0) )
in_bonds[y].append(b)
b = len(all_bonds)
all_bonds.append((y,x))
fbonds.append( torch.cat([fatoms[y], bond_features(bond)], 0) )
in_bonds[x].append(b)
total_bonds = len(all_bonds)
fatoms = torch.stack(fatoms, 0)
fbonds = torch.stack(fbonds, 0)
agraph = torch.zeros(n_atoms,MAX_NB).long()
bgraph = torch.zeros(total_bonds,MAX_NB).long()
for a in range(n_atoms):
for i,b in enumerate(in_bonds[a]):
agraph[a,i] = b
for b1 in range(1, total_bonds):
x,y = all_bonds[b1]
for i,b2 in enumerate(in_bonds[x]):
if all_bonds[b2][0] != y:
bgraph[b1,i] = b2
except:
print('Molecules not found and change to zero vectors..')
fatoms = torch.zeros(0,39)
fbonds = torch.zeros(0,50)
agraph = torch.zeros(0,6)
bgraph = torch.zeros(0,6)
#fatoms, fbonds, agraph, bgraph = [], [], [], []
#print(fatoms.shape, fbonds.shape, agraph.shape, bgraph.shape)
Natom, Nbond = fatoms.shape[0], fbonds.shape[0]
shape_tensor = torch.Tensor([Natom, Nbond]).view(1,-1)
return [fatoms.float(), fbonds.float(), agraph.float(), bgraph.float(), shape_tensor.float()]
# random_fold
def create_fold(df, fold_seed, frac):
train_frac, val_frac, test_frac = frac
test = df.sample(frac = test_frac, replace = False, random_state = fold_seed)
train_val = df[~df.index.isin(test.index)]
val = train_val.sample(frac = val_frac/(1-test_frac), replace = False, random_state = 1)
train = train_val[~train_val.index.isin(val.index)]
return train, val, test
# cold protein
def create_fold_setting_cold_protein(df, fold_seed, frac):
train_frac, val_frac, test_frac = frac
gene_drop = df['Target Sequence'].drop_duplicates().sample(frac = test_frac, replace = False, random_state = fold_seed).values
test = df[df['Target Sequence'].isin(gene_drop)]
train_val = df[~df['Target Sequence'].isin(gene_drop)]
gene_drop_val = train_val['Target Sequence'].drop_duplicates().sample(frac = val_frac/(1-test_frac),
replace = False,
random_state = fold_seed).values
val = train_val[train_val['Target Sequence'].isin(gene_drop_val)]
train = train_val[~train_val['Target Sequence'].isin(gene_drop_val)]
return train, val, test
# cold drug
def create_fold_setting_cold_drug(df, fold_seed, frac):
train_frac, val_frac, test_frac = frac
drug_drop = df['SMILES'].drop_duplicates().sample(frac = test_frac, replace = False, random_state = fold_seed).values
test = df[df['SMILES'].isin(drug_drop)]
train_val = df[~df['SMILES'].isin(drug_drop)]
drug_drop_val = train_val['SMILES'].drop_duplicates().sample(frac = val_frac/(1-test_frac),
replace = False,
random_state = fold_seed).values
val = train_val[train_val['SMILES'].isin(drug_drop_val)]
train = train_val[~train_val['SMILES'].isin(drug_drop_val)]
return train, val, test
def encode_drug(df_data, drug_encoding, column_name = 'SMILES', save_column_name = 'drug_encoding'):
print('encoding drug...')
print('unique drugs: ' + str(len(df_data[column_name].unique())))
if drug_encoding == 'Morgan':
unique = pd.Series(df_data[column_name].unique()).apply(smiles2morgan)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'Pubchem':
unique = pd.Series(df_data[column_name].unique()).apply(calcPubChemFingerAll)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'Daylight':
unique = pd.Series(df_data[column_name].unique()).apply(smiles2daylight)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'rdkit_2d_normalized':
unique = pd.Series(df_data[column_name].unique()).apply(smiles2rdkit2d)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'CNN':
unique = pd.Series(df_data[column_name].unique()).apply(trans_drug)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
# the embedding is large and not scalable but quick, so we move to encode in dataloader batch.
elif drug_encoding == 'CNN_RNN':
unique = pd.Series(df_data[column_name].unique()).apply(trans_drug)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'Transformer':
unique = pd.Series(df_data[column_name].unique()).apply(drug2emb_encoder)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'MPNN':
unique = pd.Series(df_data[column_name].unique()).apply(smiles2mpnnfeature)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
else:
raise AttributeError("Please use the correct drug encoding available!")
return df_data
def encode_protein(df_data, target_encoding, column_name = 'Target Sequence', save_column_name = 'target_encoding'):
print('encoding protein...')
print('unique target sequence: ' + str(len(df_data[column_name].unique())))
if target_encoding == 'AAC':
print('-- Encoding AAC takes time. Time Reference: 24s for ~100 sequences in a CPU.\
Calculate your time by the unique target sequence #, instead of the entire dataset.')
AA = pd.Series(df_data[column_name].unique()).apply(CalculateAADipeptideComposition)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'PseudoAAC':
print('-- Encoding PseudoAAC takes time. Time Reference: 462s for ~100 sequences in a CPU.\
Calculate your time by the unique target sequence #, instead of the entire dataset.')
AA = pd.Series(df_data[column_name].unique()).apply(_GetPseudoAAC)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'Conjoint_triad':
AA = pd.Series(df_data[column_name].unique()).apply(CalculateConjointTriad)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'Quasi-seq':
AA = pd.Series(df_data[column_name].unique()).apply(GetQuasiSequenceOrder)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'CNN':
AA = pd.Series(df_data[column_name].unique()).apply(trans_protein)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
# the embedding is large and not scalable but quick, so we move to encode in dataloader batch.
elif target_encoding == 'CNN_RNN':
AA = pd.Series(df_data[column_name].unique()).apply(trans_protein)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'Transformer':
AA = pd.Series(df_data[column_name].unique()).apply(protein2emb_encoder)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
else:
raise AttributeError("Please use the correct protein encoding available!")
return df_data
def data_process(X_drug = None, X_target = None, y=None, drug_encoding=None, target_encoding=None,
split_method = 'random', frac = [0.7, 0.1, 0.2], random_seed = 1, sample_frac = 1, mode = 'DTI', X_drug_ = None, X_target_ = None):
#property_prediction_flag = X_target is None
property_prediction_flag, function_prediction_flag, DDI_flag, PPI_flag, DTI_flag = False, False, False, False, False
if (X_target is None) and (X_drug is not None) and (X_drug_ is None):
property_prediction_flag = True
elif (X_target is not None) and (X_drug is None) and (X_target_ is None):
function_prediction_flag = True
elif (X_drug is not None) and (X_drug_ is not None):
DDI_flag = True
if (X_drug is None) or (X_drug_ is None):
raise AttributeError("Drug pair sequence should be in X_drug, X_drug_")
elif (X_target is not None) and (X_target_ is not None):
PPI_flag = True
if (X_target is None) or (X_target_ is None):
raise AttributeError("Target pair sequence should be in X_target, X_target_")
elif (X_drug is not None) and (X_target is not None):
DTI_flag = True
if (X_drug is None) or (X_target is None):
raise AttributeError("Target pair sequence should be in X_target, X_drug")
else:
raise AttributeError("Please use the correct mode. Currently, we support DTI, DDI, PPI, Drug Property Prediction and Protein Function Prediction...")
if split_method == 'repurposing_VS':
y = [-1]*len(X_drug) # create temp y for compatitibility
if DTI_flag:
print('Drug Target Interaction Prediction Mode...')
if isinstance(X_target, str):
X_target = [X_target]
if len(X_target) == 1:
# one target high throughput screening setting
X_target = np.tile(X_target, (length_func(X_drug), ))
df_data = pd.DataFrame(zip(X_drug, X_target, y))
df_data.rename(columns={0:'SMILES',
1: 'Target Sequence',
2: 'Label'},
inplace=True)
print('in total: ' + str(len(df_data)) + ' drug-target pairs')
elif property_prediction_flag:
print('Drug Property Prediction Mode...')
df_data = pd.DataFrame(zip(X_drug, y))
df_data.rename(columns={0:'SMILES',
1: 'Label'},
inplace=True)
print('in total: ' + str(len(df_data)) + ' drugs')
elif function_prediction_flag:
print('Protein Function Prediction Mode...')
df_data = pd.DataFrame(zip(X_target, y))
df_data.rename(columns={0:'Target | |
# Global Variables
listaErroresLexicos = []
listaErroresSintacticos = []
# Declaracion palabras reservadas
reservadas = {
'true' : 'TRUE',
'false' : 'FALSE',
'smallint' : 'SMALLINT',
'integer': 'INTEGER',
'bigint' : 'BIGINT',
'decimal' : 'DECIMAL',
'numeric' : 'NUMERIC',
'real' : 'REAL',
'precision':'PRECISION',
'money':'MONEY',
'character' : 'CHARACTER',
'varying' : 'VARYING',
'char': 'CHAR',
'text': 'TEXT',
'varchar' : 'VARCHAR',
'double': 'DOUBLE',
'float': 'FLOAT',
'timestamp' : 'TIMESTAMP',
'date' : 'DATE',
'time' : 'TIME',
'year' : 'YEAR',
'month' : 'MONTH',
'day' : 'DAY',
'hour' : 'HOUR',
'minute' : 'MINUTE',
'second' : 'SECOND',
'to' : 'TO',
'interval' : 'INTERVAL',
'boolean' : 'BOOLEAN',
'if': 'IF',
'else': 'ELSE',
'default': 'DEFAULT',
'case': 'CASE',
'void': 'VOID',
'end' : 'END',
'then' : 'THEN',
'elseif': 'ELSEIF',
'when' : 'WHEN',
'create' :'CREATE',
'function' : 'FUNCTION',
'procedure' : 'PROCEDURE',
'call' : 'CALL',
'returns' : 'RETURNS',
'as' : 'AS',
'declare' : 'DECLARE',
'begin' : 'BEGIN',
'language' : 'LANGUAGE',
'plpgsql' : 'PLPGSQL',
'or' : 'OR',
'and' : 'AND',
'replace' : 'REPLACE',
'raise' : 'RAISE',
'select' : 'SELECT',
'database': 'DATABASE',
'not' : 'NOT',
'exists' : 'EXISTS',
'owner': 'OWNER',
'mode' : 'MODE',
'show': 'SHOW',
'tables':'TABLES',
'use' : 'USE',
'drop': 'DROP',
'databases': 'DATABASES',
'table':'TABLE',
'null' : 'NULL',
'constraint': 'CONSTRAINT',
'unique' : 'UNIQUE',
'inherits': 'INHERITS',
'primary' : 'PRIMARY',
'key' : 'KEY',
'check' : 'CHECK',
'foreign' : 'FOREIGN',
'insert': 'INSERT',
'update': 'UPDATE',
'delete': 'DELETE',
'count': 'COUNT',
'from': 'FROM',
'into': 'INTO',
'values': 'VALUES',
'sum' : 'SUM',
'set': 'SET',
'inner': 'INNER',
'join': 'JOIN',
'on': 'ON',
'case': 'CASE',
'when': 'WHEN',
'then': 'THEN',
'end': 'END',
'and': 'AND',
'or': 'OR',
'else': 'ELSE',
'where': 'WHERE',
'as': 'AS',
'create': 'CREATE',
'table': 'TABLE',
'inherits': 'INHERITS',
'alter': 'ALTER',
'database': 'DATABASE',
'rename': 'RENAME',
'owner': 'OWNER',
'currUser' : 'CURRENT_USER',
'sessUser' : 'SESSION_USER',
'add' : 'ADD',
'column' : 'COLUMN',
'references' : 'REFERENCES',
'type' : 'TYPE',
'not' : 'NOT',
'like' : 'LIKE',
# ---- DATA TYPES AND SPECIFICATIONS--------
'text': 'TEXT',
'float': 'FLOAT',
'integer': 'INTEGER',
'char': 'CHAR',
'varchar' : 'VARCHAR',
'smallint':'SMALLINT',
'bigint' : 'BIGINT',
'decimal' : 'DECIMAL',
'numeric' : 'NUMERIC',
'real' : 'REAL',
'double' : 'DOUBLE',
'precision' : 'PRECISION',
'character' : 'CHARACTER',
'varying' : 'VARYING',
'timestamp' : 'TIMESTAMP',
'date' : 'DATE',
'time' : 'TIME',
'interval' : 'INTERVAL',
'extract' : 'EXTRACT',
'now' : 'NOW',
'date_part' : 'DATE_PART',
'current_date': 'CURRENT_DATE',
'current_time' : 'CURRENT_TIME',
'enum' : 'ENUM',
'money' : 'MONEY',
# ---- DELETE --------
'only' : 'ONLY',
'in' : 'IN',
'returning' : 'RETURNING',
'using' : 'USING',
'exists' : 'EXISTS',
# ---- USE DATABASE --------
#----- SELECT-----------
'distinct' : 'DISTINCT',
'group' : 'GROUP',
'by' : 'BY',
'order' : 'ORDER',
'asc' : 'ASC',
'desc' : 'DESC',
'avg' : 'AVG',
'min' : 'MIN',
'max' : 'MAX',
'between' : 'BETWEEN',
'having' : 'HAVING',
#----- FUNCIONES TRIGONOMETRICAS -----------
'acos' : 'ACOS',
'acosd' : 'ACOSD',
'asin' : 'ASIN',
'asind' : 'ASIND',
'atan' : 'ATAN',
'atand' : 'ATAND',
'atan2' : 'ATAN2',
'atan2d' : 'ATAN2D',
'cos' : 'COS',
'cosd' : 'COSD',
'cot' : 'COT',
'cotd' : 'COTD',
'sin' : 'SIN',
'sind' : 'SIND',
'tan' : 'TAN',
'tand' : 'TAND',
'sinh' : 'SINH',
'cosh' : 'COSH',
'tanh' : 'TANH',
'asinh' : 'ASINH',
'acosh' : 'ACOSH',
'atanh' : 'ATANH',
#----- FUNCIONES MATEMATICAS-----------
'abs' : 'ABS',
'cbrt' : 'CBRT',
'ceil' : 'CEIL',
'ceiling' : 'CEILING',
'degrees' : 'DEGREES',
'div' : 'DIV',
'exp' : 'EXP',
'factorial' : 'FACTORIAL',
'floor' : 'FLOOR',
'gcd' : 'GCD',
'lcm' : 'LCM',
'ln' : 'LN',
'log' : 'LOG',
'log10' : 'LOG10',
'min_scale' : 'MIN_SCALE',
'mod' : 'MOD',
'pi' : 'PI',
'power' : 'POWER',
'radians' : 'RADIANS',
'round' : 'ROUND',
'scale' : 'SCALE',
'sign' : 'SIGN',
'sqrt' : 'SQRT',
'trim_scale' : 'TRIM_SCALE',
'truc' : 'TRUC',
'width_bucket' : 'WIDTH_BUCKET',
'random' : 'RANDOM',
'setseed' : 'SETSEED',
#----- DATATYPES -----------
'symmetric' : 'SYMMETRIC',
'isnull' : 'ISNULL',
'true': 'TRUE',
'notnull' : 'NOTNULL',
'is' : 'IS',
'false' : 'FALSE',
'unknown' : 'UNKNOWN',
#----- BYNARY STRING FUNCTIONS -----------
'length' : 'LENGTH',
'substring' : 'SUBSTRING',
'trim' : 'TRIM',
'get_byte' : 'GET_BYTE',
'md5' : 'MD5',
'set_byte' : 'SET_BYTE',
'sha256' : 'SHA256',
'substr' : 'SUBSTR',
'convert' : 'CONVERT',
'encode' : 'ENCODE',
'decode' : 'DECODE',
#----- COMBINING QUERIES -----------
'union' : 'UNION',
'intersect' : 'INTERSECT',
'except' : 'EXCEPT',
'all' : 'ALL',
#----- LIMIT AND OFFSET -----------
'limit' : 'LIMIT',
'offset' : 'OFFSET',
'some' : 'SOME',
'any' : 'ANY',
##----- COMBINING QUERIES -----------
# 'left' : 'LEFT',
# 'right' : 'RIGHT',
# 'full' : 'FULL',
# 'natural' : 'NATURAL',
# 'outer' : 'OUTER',
'bytea' : 'BYTEA',
'trunc' : 'TRUNC',
'greatest' : 'GREATEST',
'least' : 'LEAST',
# ----- AGREGADOS INDEX -----------------
'index' : 'INDEX',
'hash' : 'HASH',
'nulls' : 'NULLS',
'first' : 'FIRST',
'last' : 'LAST',
'lower' : 'LOWER',
'include' : 'INCLUDE',
'collate' : 'COLLATE',
##--------------- PARTE DE LA SEGUNDA FASE --------
'function' : 'FUNCTION',
'returns' : 'RETURNS',
'declare' : 'DECLARE',
'begin' : 'BEGIN',
'raise' : 'RAISE',
'notice' : 'NOTICE',
'return' : 'RETURN',
'record' : 'RECORD',
'constant' : 'CONSTANT',
'alias' : 'ALIAS',
'for' : 'FOR',
'real' : 'REAL',
#-------------Agregado por Dulce :D ---------------
'if' : 'IF',
'prepare' : 'PREPARE',
'perform' : 'PERFORM',
# ANCHOR ----------- NUEVOS----------------
'exception' : 'EXCEPTION',
'next' : 'NEXT',
'query' : 'QUERY',
'execute' : 'EXECUTE',
'call' : 'CALL',
'loop' : 'LOOP',
'exit' : 'EXIT',
'text_pattern_ops' : 'TEXT_PATTERN_OPS',
'varchar_pattern_ops' : 'VARCHAR_PATTERN_OPS',
'bpchar_pattern_ops' : 'BPCHAR_PATTERN_OPS'
}
# Declaracion tokens
tokens = [
'FLOTANTE',
'ENTERO',
'CADENA',
'ID',
'DOSPUNTOS',
'PTCOMA',
'PARA',
'PARC',
'LLAVEA',
'LLAVEC',
'CORCHETEA',
'CORCHETEC',
'COMA',
'ANDB',
'MENOS',
'MAS',
'POR',
'DIVISION',
'MODULO',
'NOTB',
'ORB',
'XORB',
'SHIFTI',
'SHIFTD',
'IGUALIGUAL',
'MAYORIGUAL',
'MENORIGUAL',
'NOTIGUAL',
'MAYOR',
'MENOR',
'IGUAL',
'DOLAR',
'D_DOSPTS',
'NOIG',
'AMPERMEN',
'AMPERMAY',
'MENMENOR',
'AMPMENOR',
'ORAMPMAY',
'ORMAYMAY',
'ARROBAMAY',
'MENARROBA',
'CEJILLAIGUAL',
'AMPERSON_D',
'MENPOT',
'MAYPOT',
'PUNTO',
'D_OR',
'HASHTAG',
'ESCAPE',
'HEX',
'BASE64',
] + list(reservadas.values())
# Tokens ER
t_NOIG = r'<>'
t_D_DOSPTS = r'::'
t_DOSPUNTOS = r':'
t_COMA = r','
t_PTCOMA = r';'
t_PARA = r'\('
t_PARC = r'\)'
t_LLAVEA = r'{'
t_LLAVEC = r'}'
t_CORCHETEA = r'\['
t_CORCHETEC = r'\]'
t_ANDB = r'&'
t_MENOS = r'-'
t_MAS = r'\+'
t_POR = r'\*'
t_DIVISION = r'/'
t_MODULO = r'%'
t_NOTB = r'~'
t_ORB = r'\|'
t_D_OR = r'\|\|'
t_XORB = r'\^'
t_SHIFTI = r'<<'
t_SHIFTD = r'>>'
t_IGUALIGUAL = r'=='
t_IGUAL = r'='
t_MAYORIGUAL = r'>='
t_MENORIGUAL = r'<='
t_NOTIGUAL = r'!='
t_MAYOR = r'>'
t_MENOR = r'<'
t_DOLAR = r'\$'
t_PUNTO = r'\.'
t_HASHTAG = r'\#'
# ANCHOR
t_AMPERMEN = r'&<'
t_AMPERMAY = r'&>'
t_MENMENOR = r'<<\|'
t_AMPMENOR = r'&<\|'
t_ORAMPMAY = r'\|&>'
t_ORMAYMAY = r'\|>>'
t_ARROBAMAY = r'@>'
t_MENARROBA = r'<@'
t_CEJILLAIGUAL = r'~='
t_AMPERSON_D = r'&&'
t_MENPOT = r'<\^'
t_MAYPOT = r'>\^'
# Caracteres ignorados (espacio)
t_ignore = " \t"
# Cadena ER
def t_CADENA(t):
r'\".*?\"|\'.*?\''
t.value = t.value[1:-1] # remuevo las comillas
return t
# Decimal ER
def t_FLOTANTE(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("Float value too large %d", t.value)
t.value = 0
return t
# Entero ER
def t_ENTERO(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
# Id de la forma aceptara ER
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reservadas.get(t.value.lower(), 'ID') # Check for reserved words
return t
def t_ESCAPE(t):
r'\'(?i)escape\'' #ignore case
t.value = t.value[1:-1] # remuevo las comillas
return t
def t_BASE64(t):
r'\'(?i)base64\''
t.value = t.value[1:-1] # remuevo las comillas
return t
def t_HEX(t):
r'\'(?i)hex\''
t.value = t.value[1:-1] # remuevo las comillas
return t
# Comentario de múltiples líneas /* .. */
def t_COMENTARIO_MULTILINEA(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
# Comentario simple -- ...
def t_COMENTARIO_SIMPLE(t):
r'--.*\n'
t.lexer.lineno += 1
# Salto de linea
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Error Lexico
def t_error(t):
print("Illegal character '%s'" % t.value[0])
listaErroresLexicos.append(ErrorLexico(t.value[0], t.lexer.lineno, t.lexpos))
t.lexer.skip(1)
# Construyendo el analizador léxico
# Asociación de operadores y precedencia
precedence = (
('left', 'OR'),
('left', 'AND'),
('left', 'XORB'),
('left', 'ORB'),
('left', 'ANDB'),
('left', 'IGUALIGUAL', 'NOTIGUAL'),
('left', 'MAYOR', 'MENOR', 'MAYORIGUAL', 'MENORIGUAL'),
('left', 'SHIFTD', 'SHIFTI'),
('left', 'MAS', 'MENOS'),
('left', 'POR', 'DIVISION', 'MODULO'),
('right', 'NOT', 'NOTB', 'UMINUS'),
('left', 'PARA', 'PARC')
)
# Importacion de clases para la creación del AST
from PLSQL.expresionesPLSQL import *
from PLSQL.instruccionesPLSQL import *
# Definición de la gramática ---------------------------------------------------------------------------------------------------
listaGramatica = []
def p_inicio(t):
'inicio : codigo'
t[0] = t[1]
def p_lenguaje_augus(t):
'''codigo : instrucciones_globales_list'''
t[0] = t[1]
def p_instrucciones_globales_list(t):
'instrucciones_globales_list : instrucciones_globales_list instrucciones_global_sent'
t[1].append(t[2])
t[0] = t[1]
def p_instrucciones_globales_list_sent(t):
'instrucciones_globales_list : instrucciones_global_sent'
t[0] = [t[1]]
def p_instrucciones_global_sent(t):
'''instrucciones_global_sent : funcion
| llamada_funcion
| createDB_insrt
| show_databases_instr
| show_tables_instr
| use_database_instr
| drop_database_instr
| create_Table_isnrt
| drop_insrt
| alterDB_insrt
| alterTable_insrt
| insert_insrt
| createIndex'''
t[0] = t[1]
def p_instrucciones_global_sent1(t):
'''instrucciones_global_sent : select_insrt PTCOMA'''
t[0] = SelectTable(' ' + str(t[1]) + ';')
def p_instrucciones_global_sent2(t):
'''instrucciones_global_sent : select_uniones PTCOMA'''
| |
is row vector, q is column vector
cls.lambda0_vec=numpy.matrix(cls.Lambda_q_s(s=qs,q=0)).T
cls.W=numpy.matrix(numpy.diag(ws))
cls.I=numpy.matrix(numpy.eye(len(qs)))
#Compute all matrix elements that are different each time
ss=qs
G=numpy.matrix(cls.GetGMatrixElements(freq=freq,nz=nz,rp=rp,\
s=cls.s_grid,q=cls.q_grid,\
recompute=recompute))
cls.G=G
cls.rp=rp
cls.nz=nz
cls.freq=freq
#Invert total matrix
lambda_s_vec=numpy.array((cls.I-G*cls.W).getI()*cls.lambda0_vec).squeeze()
cls.lambda_s_vec=lambda_s_vec
pre_part=(rp*ss**(1+cls.response_power)\
*numpy.exp(-2*ss*nz)\
*cls.quasistatic_correction_factor(freq,ss))/ss**2
return numpy.sum(ws*pre_part*lambda_s_vec)
@classmethod
def get_charge_distribution(cls,zs):
ss=cls.qs
ws=cls.ws
rp=cls.rp
freq=cls.freq
a=cls.a
z=cls.z
lambda_s_vec=cls.lambda_s_vec
sum=0
for i in range(len(ss)):
s=ss[i]
w=ws[i]
charge_element=lambda_s_vec[i]
charge_strength=w*charge_element*\
s*numpy.exp(-2*s*z/float(a))*\
cls.GeometricResponse(s)
charge_contribution=-(2*numpy.exp(-2*s*zs/float(a))-numpy.exp(-s*zs/float(a)))
sum+=charge_strength*charge_contribution
return AWA(sum,axes=[zs],axis_names=['Z'])
@classmethod
def get_q_distribution(cls,qs):
ss=cls.qs
ws=cls.ws
rp=cls.rp
freq=cls.freq
a=cls.a
z=cls.z
lambda_s_vec=cls.lambda_s_vec
sum=0
q_contribution_correction=cls.quasistatic_correction_factor(freq,qs)
alpha=numpy.tan(numpy.pi*cls.taper/180.)
for i in range(len(ss)):
s=ss[i]
w=ws[i]
charge_element=lambda_s_vec[i]
charge_strength=w*charge_element*\
s*numpy.exp(-2*s*z/float(a))*\
cls.GeometricResponse(s)
q_contribution=-qs*(2/numpy.sqrt((qs+2*s)**2+(qs*alpha)**2)+\
-1/numpy.sqrt((qs+s)**2+(qs*alpha)**2))/2.
sum+=charge_strength*q_contribution
return AWA(sum*numpy.exp(-qs*z),axes=[qs],axis_names=['q-vector'])
@classmethod
def get_signal(cls,freqs,rp,zs=None,\
a=25,zmin=1,amplitude=80,Nzs=15,\
Nqs=72,qmin=None,qmax=None,\
quadrature=numrec.GL,\
demodulate=True,Nts=50,harmonics=[1,2,3],\
zscale='log',\
**rpkwargs):
"""qmin and qmax in units of 1/a"""
if verbose: Logger.write('Using SSEQ Model with parameters:\n'+\
'\tresponse magnitude: %s\n'%cls.response+\
'\tresponse exponent: %s\n'%cls.response_power)
#Make sure frequencies are iterable
if not hasattr(freqs,'__len__'): freqs=[freqs]
freqs=numpy.array(freqs)
#Get quadrature q values and heights z
rp,freqs,qs,ws,nzs=cls.condition_arguments(freqs,rp,zs,a,\
Nzs=Nzs,zmin=zmin,amplitude=amplitude,\
Nqs=Nqs,qmin=qmin,qmax=qmax,\
quadrature=quadrature,\
zscale=zscale,\
**rpkwargs)
#Compute raw signal at each frequency, z-height
raw_signals=[]
recompute=True #At first, want to recompute all charge dists etc.
for i,freq in enumerate(freqs):
raw_signal_at_freq=[]
progress=i*len(nzs)/float(len(nzs)*len(freqs))*100.
if verbose: Logger.write('\tPROGRESS: %i%% - Computing raw signal at frequency w=%scm^-1...'%\
(progress,freq))
if isinstance(rp,AWA): rp_at_freq=rp.cslice[freq]
else: rp_at_freq=rp
for j,nz in enumerate(nzs):
raw_signal_at_freq.append(cls.get_raw_signal_at_freq_and_z(freq=freq,nz=nz,rp=rp_at_freq,\
qs=qs,ws=ws,a=a,\
recompute=recompute))
recompute=True
raw_signals.append(raw_signal_at_freq)
#Flip raw signal to make z-axis first
raw_signals=AWA(numpy.array(raw_signals).transpose(),\
axes=[nzs*a,freqs], axis_names=['Z','Frequency']) #z axis in units of a
cls.raw_signals=raw_signals
if verbose: Logger.write('\tSignal shape: %s'%repr(cls.raw_signals.shape))
#Demodulate if requested#
if demodulate:
#Iterate over all desired harmonics#
signals={}
for harmonic in harmonics:
signal=cls.demodulate(raw_signals,Nts=Nts,harmonic=harmonic)
#Turn to spectrum (but don't tell to FFT along any new axes)
signal=Spectrum(signal,axes=[freqs],\
axis_names=['Frequency'],\
axis=None).squeeze()
signals[harmonic]=signal
return signals
else:
return raw_signals.squeeze()
SSEQModel=_SSEQModel_()
def get_charge_data_path(geometry,L,skin_depth,taper_angle,quadrature_type,Nzs,Nqs,freq):
if geometry=='sphere': L=2
geometry_title=geometry.capitalize()
if geometry in ('cone','hyperboloid'):
filepath=os.path.join(charge_data_dir,\
'%sCharge_L=%.2E_SkinDepth=%.2E_Taper=%i_Quad=%s_Nzs=%i_Nqs=%i_freq=%.2E.pickle'%\
(geometry_title,L,skin_depth,taper_angle,quadrature_type,Nzs,Nqs,freq))
elif geometry=='PtSi': #Taper angle is disabled
filepath=os.path.join(charge_data_dir,\
'%sCharge_L=%.2E_SkinDepth=%.2E_Quad=%s_Nzs=%i_Nqs=%i_freq=%.2E.pickle'%\
(geometry_title,L,skin_depth,quadrature_type,Nzs,Nqs,freq))
else:
filepath=os.path.join(charge_data_dir,\
'%sCharge_L=%.2E_SkinDepth=%.2E_Quad=%s_Nzs=%i_Nqs=%i_freq=%.2E.pickle'%\
(geometry_title,L,skin_depth,quadrature_type,Nzs,Nqs,freq))
return filepath
class _LightningRodModel_(TipModel):
#Hyperboloid geometry tends to predict stronger material contrast
geometric_params={'a':30,\
'L':19000/30.,\
'skin_depth':0.05,\
'taper_angle':20,\
'geometry':'hyperboloid',\
'beam_shape':'plane_wave',\
'incidence_angle':30}
load_params={'reload_model':True,\
'quadrature':quadrature,\
'Nzs':244,\
'Nqs':244,\
'freq':30e-7*1000,\
'comsol_lambda0':False,\
'comsol_filename':'Comsol_AvgCharge_60deg_WL10um.pickle'}
quadrature_params={'xWarp':True,\
'quadrature':quadrature,\
'x0':.99,\
'b':.75,\
'q_correction':False,\
'q_correction_exponent':1,\
'interpolation':'linear'} #This b-value obtains convergence for both SiC and SiO2 at Nqs>=144
resonant_sample=True #Internal self consistency with calculated charge response requires this to be True
def __call__(self,*args,**kwargs):
##Make sure all "ambient" arguments are identified
if 'ambient_rp' not in kwargs: kwargs['ambient_rp']=None
if 'normalization_ambient_rp' not in kwargs: kwargs['normalization_ambient_rp']=None
ambient_rp=kwargs['ambient_rp']
normalization_ambient_rp=kwargs.pop('normalization_ambient_rp')
##See if we'll want to normalize##
exkwargs=misc.extract_kwargs(kwargs,\
normalize_to=None,normalize_at=None,\
normalization_ambient=None)
normalize_to=exkwargs['normalize_to']
normalize_at=exkwargs['normalize_at']
if 'demodulate' in kwargs: demodulate=kwargs['demodulate']
else: demodulate=True
##Get signal using whatever model##
if verbose: Logger.write('Getting signal...')
signal=self.get_signal(*args,**kwargs)
if verbose: Logger.write('Done getting signal')
if normalize_to and demodulate:
if verbose: Logger.write('Getting normalization signal...')
original_reload_model=self.load_params['reload_model']
self.load_params['reload_model']=False
kwargs['rp']=normalize_to
##Pick local ambient for normalization if we must pick something#
if ambient_rp and not normalization_ambient_rp:
Logger.write('\tUsing same ambient rp as that for the sample spectrum (quasi-local normalization)...')
normalization_ambient_rp=normalize_to
kwargs['ambient_rp']=normalization_ambient_rp
if normalize_at is not None:
if len(args): args=list(args); args[0]=normalize_at
else: kwargs['freqs']=normalize_at
normalization=self.get_signal(*args,**kwargs)
#Take note that signal will be a dictionary of harmonics
for key in list(signal.keys()):
signal['sample_'+key]=signal[key]
signal['norm_'+key]=normalization[key]
#if signal is a harmonic, normalize
if re.compile('signal_[0-9]+').search(key):
if signal[key].ndim != normalization[key].ndim:
normalization[key].resize(normalization[key].shape+(1,))
signal[key]=signal[key]/normalization[key]
if verbose: Logger.write('Done normalizing.')
self.load_params['reload_model']=original_reload_model
return signal
def get_zs(self,Nzs=None,zmin=1,amplitude=80):
if not Nzs: Nzs=20
a=self.geometric_params['a']
zmin/=float(a)
zmax=zmin+(2*amplitude)/float(a)
log=numpy.log
zs=numpy.logspace(log(zmin)/log(10.),\
log(zmax)/log(10.),\
Nzs)
return zs
@staticmethod
def demodulate(signals,harmonics=list(range(5)),Nts=None,\
quadrature=numrec.GL):
"""Takes z-axis as first axis, frequency as final axis."""
global ts,wts,weights,signals_vs_time,zs
#max harmonic resolvable will be frequency = 1/dt = Nts
if not Nts: Nts=4*numpy.max(harmonics)
if isinstance(quadrature,str) or hasattr(quadrature,'calc_nodes'):
ts,wts=numrec.GetQuadrature(N=Nts,xmin=-.5,xmax=0,quadrature=quadrature)
else:
ts,wts=numpy.linspace(-.5,0,Nts),None
if quadrature is None: quadrature=simps
freqs=signals.axes[1]
zmin=signals.axes[0].min()
zmax=signals.axes[0].max()
amplitude=(zmax-zmin)/2.
zs=zmin+amplitude*(1+numpy.cos(2*numpy.pi*ts))
harmonics=numpy.array(harmonics).reshape((len(harmonics),1))
weights=numpy.cos(2*numpy.pi*harmonics*ts)*wts
weights_grid=weights.reshape(weights.shape+(1,)*(signals.ndim-1))
signals_vs_time=signals.interpolate_axis(zs,axis=0) ; signals_vs_time.set_axes([ts],axis_names=['t'])
if wts is not None:
demodulated=2*2*numpy.sum(signals_vs_time*weights_grid,axis=1) #perform quadrature
else: demodulated=2*2*quadrature(signals_vs_time,x=ts,axis=1)
demodulated=Spectrum(demodulated,axes=[harmonics,freqs],axis_names=['harmonic','Frequency'])
return signals_vs_time,demodulated
#Something is wrong with this demodulate function, not sure what, remains unresolved.
#Results disagree with the simpler `demodulate(...)`
def demodulate2(self,signals,Nts=None,harmonics=[1,2,3]):
"""Takes z-axis as first axis, frequency as final axis."""
#Assume z-axis is first axis#
zs_nm,freqs=signals.axes
zmin=zs_nm.min()
zmax=zs_nm.max()
A=(zmax-zmin)/2.
#Decide time values and new z values
if Nts is None: Nts=500
Nts=numpy.max((Nts,len(zs_nm)))
ts=numpy.arange(Nts)/numpy.float(Nts)*.5#snumpy.linspace(0,.5,Nts) #Need only integrate over half period
new_zs=zmin+A*(1-numpy.cos(2*numpy.pi*ts))
ts_grid=ts.reshape((Nts,1))
self.ts=ts
#Interpolate
if verbose: Logger.write('Demodulating at desired harmonics...'); time1=time.time()
#try:
# interp1=RectBivariateSpline(x=zs_nm,y=freqs,z=signals.real,s=0)
# interp2=RectBivariateSpline(x=zs_nm,y=freqs,z=signals.imag,s=0)
# new_signals=interp1(x=new_zs,y=freqs)\
# +1j*interp2(x=new_zs,y=freqs)
# if verbose:
# Logger.write('\tInterpolated with bivariate spline, time: %1.2f'%(time.time()-time1))
#
#In case RectBivariateSpline needs more points to run
#except:
# new_signals=[]
# for i in xrange(len(freqs)):
# interp1=UnivariateSpline(x=zs_nm,y=signals[:,i].real,s=0)
# interp2=UnivariateSpline(x=zs_nm,y=signals[:,i].imag,s=0)
# new_signals.append(interp1(new_zs)+1j*interp2(new_zs))
# new_signals=numpy.array(new_signals).transpose()
# if verbose:
# Logger.write('\tInterpolated with sequence of univariate splines, time: %1.2f'%(time.time()-time1))
new_signals=signals.interpolate_axis(new_zs,axis=0)
signal_v_time=AWA(new_signals,axes=[ts,freqs],axis_names=['T','Frequency'])
#Demodulate
demodulated_signals={}
for harmonic in harmonics:
demodulated_signal=2*simps(numpy.cos(2*numpy.pi*harmonic*ts_grid)*signal_v_time,\
x=ts,axis=0)*2 #Last factor of 2 to make up for integrating only half period
demodulated_signal=Spectrum(demodulated_signal,axes=[freqs],axis_names=['Frequency'],axis=None)
demodulated_signals[harmonic]=demodulated_signal
if verbose: Logger.write('\tDone.')
return signal_v_time.squeeze(),demodulated_signals
def load_comsol_lambda0(self,filename):
qs,zs=self.charges.axes
Rs=self.charge_radii
freq=self.load_params['freq']
Logger.write('Loading electrodynamic charge data from file "%s"...'%filename)
try: file=open(os.path.join(charge_data_dir,filename))
except IOError: Logger.raiseException('No pre-computed Lambda0 data was found for parameters:\n'+\
'Nqs=%i\n'%Nqs)
from common import unpickle_legacy
self.charges0=unpickle_legacy(filename)
file.close()
zs0=self.charges0.axes[0]
zs0-=zs0.min()
Rs0=AWA(Rs,axes=[zs]).interpolate_axis(zs0,axis=0,extrapolate=True,bounds_error=False)
charge_grid=self.charges0.reshape((len(zs0),1))
zs_grid=zs0.reshape((len(zs0),1))
Rs_grid=Rs0.reshape((len(Rs0),1))
ss_grid=qs.reshape((1,len(qs)))
k=2*numpy.pi*freq
#pref_grid=numpy.exp(-ss_grid*zs_grid)*j0(numpy.sqrt(ss_grid**2+k**2)*Rs_grid)
skin_depth=self.geometric_params['skin_depth']
if skin_depth:
factor=1#-1j
delta=skin_depth/factor
pref_grid=(numpy.exp(-ss_grid*zs_grid)-numpy.exp(-zs_grid/delta))\
/(1-ss_grid*delta)\
*j0(numpy.sqrt(ss_grid**2+k**2)*Rs_grid)
else:
pref_grid=numpy.exp(-ss_grid*zs_grid)\
*j0(numpy.sqrt(ss_grid**2+k**2)*Rs_grid)
integrand=pref_grid*charge_grid
self.Lambda0=AWA(simps(x=zs0,y=integrand,axis=0),\
axes=[qs],axis_names=['s'])
def load_charge_data(self):
#All the stored geometry and quadrature parameters determine
#which charge data to load
geometry=self.geometric_params['geometry']
L=self.geometric_params['L']
skin_depth=self.geometric_params['skin_depth']
taper=self.geometric_params['taper_angle']
Nzs=self.load_params['Nzs']
Nqs=self.load_params['Nqs']
freq=self.load_params['freq']
quadrature_type=self.load_params['quadrature']
filepath=get_charge_data_path(geometry,L,skin_depth,taper_angle,quadrature_type,Nzs,Nqs,freq)
if verbose: Logger.write('Loading charge data from file "%s"...'%filepath)
try: file=open(filepath,'rb')
except IOError:
Logger.raiseException('No pre-computed charge data was found correspondent '+\
'to the desired charge profile:\n'+\
'"%s"'%filepath)
from common.misc import unpickle_legacy
charge_data=unpickle_legacy(filepath)
self.load_params['charge_data_file_path']=filepath #@ASM 2020.09.03: added for diagnostics
self.qs,self.wqs=charge_data['quadrature'] #qs, ws
self.charges=charge_data['charges'] #axes s, z x q
self.Lambda=charge_data['integral_xforms'].transpose() #axes q, s --> s, q
self.charge_data=charge_data
self.dipole_moments=charge_data['dipole_moments'] #array with axis s
self.charge_radii=charge_data['charge_radii']
if self.charge_radii.ndim is 2: self.charge_radii=self.charge_radii[0]
try: self.charge_quadrature=charge_data['charge_quadrature']
except KeyError: pass
if self.load_params['comsol_lambda0']:
filename=self.load_params['comsol_filename']
self.load_comsol_lambda0(filename)
else:
beam_shape=self.geometric_params['beam_shape']
incidence_angle=self.geometric_params['incidence_angle']
if verbose: Logger.write('\tUsing incident beam profile: "%s"'%beam_shape+\
'\n\tIncidence angle: %s degrees'%incidence_angle)
##Try to load with specified incidence angle
try:
self.Lambda0=charge_data['integral_xforms_%s%s'%(beam_shape,incidence_angle)]
self.charges0=charge_data['charges_%s%s'%(beam_shape,incidence_angle)]
self.Lambda0Refl=charge_data['integral_xforms_%s%s'%(beam_shape,180-incidence_angle)]
self.charges0Refl=charge_data['charges_%s%s'%(beam_shape,180-incidence_angle)]
except KeyError:
self.Lambda0=charge_data['integral_xforms_%s'%beam_shape]
self.charges0=charge_data['charges_%s'%beam_shape]
self.Lambda0Refl=charge_data['integral_xforms_%s'%beam_shape]
self.charges0Refl=charge_data['charges_%s'%beam_shape]
def prepare_model(self,zs=None,zmin=1e-1,Nzs=None,Nqs=122,amplitude=80,a=None,interpolation=None,**kwargs):
if verbose: Logger.write('Preparing model...')
##Get zs first##
if a: self.geometric_params['a']=a
if zs is None: zs=self.get_zs(Nzs,zmin,amplitude)
else:
if verbose: Logger.write('Using provided z-value(s).')
if not hasattr(zs,'__len__'): zs=[zs]
zs=numpy.array(zs).astype(float)
zs/=self.geometric_params['a'] #take incoming nm values and normalize to tip radius
self.zs=zs
#Load/modify all the next stuff only if reloading model
if self.load_params['reload_model']:
for key in list(kwargs.keys()):
if key.startswith('load_'):
new_key=key[len('load_'):]
self.load_params[new_key]=kwargs.pop(key)
#Store all the provided geometry and quadrature parameters
if interpolation: self.quadrature_params['interpolation']=interpolation
if 'geometry' in kwargs: self.geometric_params['geometry']=kwargs.pop('geometry')
if 'taper_angle' in kwargs: self.geometric_params['taper_angle']=kwargs.pop('taper_angle')
##Load charge data
self.load_charge_data()
xWarp=self.quadrature_params['xWarp']
if xWarp:
if verbose: Logger.write('\tComputing xWarp quadrature for q-values...')
self.get_xWarp_coords(qmin=self.qs.min(),qmax=20,Nqs=Nqs)
else:
if verbose: Logger.write('\tUsing the already loaded quadrature rather than xWarp quadrature...')
self.qxs,self.wqxs=self.qs,self.wqs
def get_xWarp_coords(self,qmin,qmax,Nqs):
quadrature=self.quadrature_params['quadrature']
if quadrature=='TS': quadrature=numrec.TS
elif quadrature=='GL': quadrature=numrec.GL
elif quadrature=='CC': quadrature=numrec.CC
xs,wxs=numrec.GetQuadrature(xmin=-1,xmax=1,N=Nqs,quadrature=quadrature)
#x0=self.quadrature_params['x0']
b=self.quadrature_params['b']
x0=xs[-2]
a=(2*b+x0)*((1+x0)/(1-x0))**-b\
/float(2*b/1e2) #focus on q0=1e2/a
q1=qmin; deltaq=qmax-q1; delta=2*(deltaq/a)**(-1/float(b))
qxs=a*((1+xs)/(1-xs+delta))**b+q1
#New weights results from change of variables: dqx = dqx/dx * dx
wqxs=a*b*((1+xs)/(2+delta))**b\
*((2+delta)/(1-xs+delta))**(1+b)\
/(1+xs)*wxs
self.qxs=qxs
self.wqxs=wqxs
return qxs,wqxs
def Lambda0Vector(self,qxs):
Lambda0=self.Lambda0.interpolate_axis(qxs,axis=0,extrapolate=True,bounds_error=False)
return numpy.matrix(Lambda0).T
def Lambda0VectorRefl(self,qxs):
Lambda0Refl=self.Lambda0Refl.interpolate_axis(qxs,axis=0,extrapolate=True,bounds_error=False)
return numpy.matrix(Lambda0Refl).T
def LambdaMatrix(self,qxs):
#Should be no bounds error, we only interpolate into sampled region
Lambda=self.Lambda.interpolate_axis(qxs,axis=0,kind='linear',bounds_error=False)\
.interpolate_axis(qxs,axis=1,kind='linear',bounds_error=False)
return numpy.matrix(Lambda)
def evaluate_rp(self,freq,rp,qxs,**rpkwargs):
a=self.geometric_params['a'] #used to convert qxs from units of 1/a
k=2*numpy.pi*freq
#exclude near the light line as comparatively unimportant
if self.resonant_sample: Qs=numpy.sqrt(k**2+(qxs/(a*1e-7))**2) #This is technically the right way to evaluate either way, since qxs correspond to out-of-plane propogation
else: Qs=qxs/(a*1e-7)
#Evaluate surface response#
if hasattr(rp,'__call__'):
#evaluate with q in units of cm-1 rather than 1/a
rp=rp(freq, Qs, **rpkwargs)
elif isinstance(rp,AWA) and 'Frequency' in rp.axis_names[0]:
rp=rp.cslice[freq]
self.rp=rp
if isinstance(rp,numpy.ndarray):
Logger.raiseException('If "rp" | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
import numpy as np
import scipy
import scipy.ndimage
import skimage
from skimage import feature, measure
from skimage.filters import threshold_local
from skimage.morphology import remove_small_objects
from qtpy import QtCore, QtGui, QtWidgets
from .. import global_vars as g
from ..utils.BaseProcess import BaseProcess, SliderLabel, WindowSelector, MissingWindowError, CheckBox, ComboBox
from ..roi import makeROI, ROI_Drawing
__all__ = ['threshold','remove_small_blobs','adaptive_threshold','logically_combine','binary_dilation','binary_erosion', 'generate_rois', 'canny_edge_detector']
def convert2uint8(tif):
oldmin = np.min(tif)
oldmax = np.max(tif)
newmax = 2**8-1
tif = ((tif-oldmin)*newmax)/(oldmax-oldmin)
tif = tif.astype(np.uint8)
return tif
class Threshold(BaseProcess):
"""threshold(value, darkBackground=False, keepSourceWindow=False)
Creates a boolean matrix by applying a threshold
Parameters:
value (float): The threshold to be applied
darkBackground (bool): If this is True, pixels below the threshold will be True
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
valueSlider = SliderLabel(2)
if g.win is not None:
image=g.win.image
valueSlider.setRange(np.min(image),np.max(image))
valueSlider.setValue(np.mean(image))
preview=CheckBox()
preview.setChecked(True)
self.items.append({'name': 'value','string': 'Value','object': valueSlider})
self.items.append({'name': 'darkBackground', 'string':'Dark Background','object': CheckBox()})
self.items.append({'name': 'preview','string': 'Preview','object': preview})
super().gui()
def __call__(self, value, darkBackground=False, keepSourceWindow=False):
self.start(keepSourceWindow)
if self.oldwindow.nDims > 3:
g.alert("You cannot run this function on an image of dimension greater than 3. If your window has color, convert to a grayscale image before running this function")
return None
if darkBackground:
newtif = self.tif < value
else:
newtif = self.tif > value
self.newtif = newtif.astype(np.uint8)
self.newname = self.oldname+' - Thresholded '+str(value)
return self.end()
def preview(self):
if g.win is None or g.win.closed:
return
win = g.win
value = self.getValue('value')
preview = self.getValue('preview')
darkBackground = self.getValue('darkBackground')
if win.nDims > 3:
g.alert("You cannot run this function on an image of dimension greater than 3. If your window has color, convert to a grayscale image before running this function")
return None
if preview:
if win.nDims == 3: # if the image is 3d
testimage = np.copy(win.image[win.currentIndex])
elif win.nDims == 2:
testimage = np.copy(win.image)
if darkBackground:
testimage = testimage<value
else:
testimage = testimage>value
win.imageview.setImage(testimage, autoLevels=False)
win.imageview.setLevels(-.1,1.1)
else:
win.reset()
if win.nDims == 3:
image = win.image[win.currentIndex]
else:
image = win.image
win.imageview.setLevels(np.min(image), np.max(image))
threshold = Threshold()
class BlocksizeSlider(SliderLabel):
def __init__(self,demicals=0):
SliderLabel.__init__(self,demicals)
def updateSlider(self,value):
if value%2==0:
if value<self.slider.value():
value-=1
else:
value+=1
self.label.setValue(value)
self.slider.setValue(int(value*10**self.decimals))
def updateLabel(self,value):
if value%2==0:
value-=1
self.label.setValue(value)
class Adaptive_threshold(BaseProcess):
"""adaptive_threshold(value, block_size, darkBackground=False, keepSourceWindow=False)
Creates a boolean matrix by applying an adaptive threshold using the scikit-image threshold_local function
Parameters:
value (int): The threshold to be applied
block_size (int): size of a pixel neighborhood that is used to calculate a threshold value for the pixel. Must be an odd number greater than 3.
darkBackground (bool): If this is True, pixels below the threshold will be True
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
valueSlider=SliderLabel(2)
valueSlider.setRange(-20,20)
valueSlider.setValue(0)
block_size=BlocksizeSlider(0)
if g.win is not None:
max_block = int(max([g.win.image.shape[-1],g.win.image.shape[-2]])/2)
else:
max_block = 100
block_size.setRange(3, max_block)
preview = CheckBox(); preview.setChecked(True)
self.items.append({'name': 'value', 'string': 'Value', 'object': valueSlider})
self.items.append({'name': 'block_size', 'string':'Block Size', 'object':block_size})
self.items.append({'name': 'darkBackground', 'string': 'Dark Background', 'object': CheckBox()})
self.items.append({'name': 'preview', 'string': 'Preview', 'object': preview})
super().gui()
self.preview()
def __call__(self, value, block_size, darkBackground=False, keepSourceWindow=False):
self.start(keepSourceWindow)
if self.tif.dtype == np.float16:
g.alert("Local Threshold does not support float16 type arrays")
return
newtif = np.copy(self.tif)
if self.oldwindow.nDims == 2:
newtif = threshold_local(newtif, block_size, offset=value)
elif self.oldwindow.nDims == 3:
for i in np.arange(len(newtif)):
newtif[i] = threshold_local(newtif[i], block_size, offset=value)
else:
g.alert("You cannot run this function on an image of dimension greater than 3. If your window has color, convert to a grayscale image before running this function")
return None
if darkBackground:
newtif = np.logical_not(newtif)
self.newtif = newtif.astype(np.uint8)
self.newname = self.oldname + ' - Thresholded ' + str(value)
return self.end()
def preview(self):
if g.win is None or g.win.closed:
return
win = g.win
value = self.getValue('value')
block_size = self.getValue('block_size')
preview = self.getValue('preview')
darkBackground = self.getValue('darkBackground')
nDim = len(win.image.shape)
if nDim > 3:
g.alert("You cannot run this function on an image of dimension greater than 3. If your window has color, convert to a grayscale image before running this function")
return None
if preview:
if nDim == 3: # if the image is 3d
testimage=np.copy(win.image[win.currentIndex])
elif nDim == 2:
testimage=np.copy(win.image)
testimage = threshold_local(testimage, block_size, offset=value)
if darkBackground:
testimage = np.logical_not(testimage)
testimage = testimage.astype(np.uint8)
win.imageview.setImage(testimage, autoLevels=False)
win.imageview.setLevels(-.1, 1.1)
else:
win.reset()
if nDim == 3:
image = win.image[win.currentIndex]
else:
image = win.image
win.imageview.setLevels(np.min(image), np.max(image))
adaptive_threshold=Adaptive_threshold()
class Canny_edge_detector(BaseProcess):
"""canny_edge_detector(sigma, keepSourceWindow=False)
Parameters:
sigma (float):
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
sigma=SliderLabel(2)
if g.win is not None:
sigma.setRange(0,1000)
sigma.setValue(1)
preview=CheckBox(); preview.setChecked(True)
self.items.append({'name':'sigma','string':'Sigma','object':sigma})
self.items.append({'name':'preview','string':'Preview','object':preview})
super().gui()
self.preview()
def __call__(self,sigma, keepSourceWindow=False):
self.start(keepSourceWindow)
nDim=len(self.tif.shape)
newtif=np.copy(self.tif)
if self.tif.dtype == np.float16:
g.alert("Canny Edge Detection does not work on float32 images. Change the data type to use this function.")
return None
if nDim==2:
newtif=feature.canny(self.tif,sigma)
else:
for i in np.arange(len(newtif)):
newtif[i] = feature.canny(self.tif[i],sigma)
self.newtif=newtif.astype(np.uint8)
self.newname=self.oldname+' - Canny '
return self.end()
def preview(self):
if g.win is None or g.win.closed:
return
win = g.win
sigma = self.getValue('sigma')
preview = self.getValue('preview')
nDim = len(win.image.shape)
if preview:
if nDim==3: # if the image is 3d
testimage=np.copy(win.image[win.currentIndex])
elif nDim==2:
testimage=np.copy(win.image)
testimage=feature.canny(testimage,sigma)
win.imageview.setImage(testimage,autoLevels=False)
win.imageview.setLevels(-.1,1.1)
else:
win.reset()
if nDim==3:
image=win.image[win.currentIndex]
else:
image=win.image
win.imageview.setLevels(np.min(image),np.max(image))
canny_edge_detector=Canny_edge_detector()
class Logically_combine(BaseProcess):
"""logically_combine(window1, window2,operator, keepSourceWindow=False)
Combines two windows according to the operator
Parameters:
window1 (Window)
window2 (Window)
operator (str)
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
window1=WindowSelector()
window2=WindowSelector()
operator=ComboBox()
operator.addItem('AND')
operator.addItem('OR')
operator.addItem('XOR')
self.items.append({'name':'window1','string':'Window 1','object':window1})
self.items.append({'name':'window2','string':'Window 2','object':window2})
self.items.append({'name':'operator','string':'Operator','object':operator})
super().gui()
def __call__(self,window1, window2,operator,keepSourceWindow=False):
self.keepSourceWindow=keepSourceWindow
g.m.statusBar().showMessage('Performing {}...'.format(self.__name__))
if window1 is None or window2 is None:
raise(MissingWindowError("You cannot execute '{}' without selecting a window first.".format(self.__name__)))
if window1.image.shape!=window2.image.shape:
g.m.statusBar().showMessage('The two windows have images of different shapes. They could not be combined')
return None
if operator=='AND':
self.newtif=np.logical_and(window1.image,window2.image)
elif operator=='OR':
self.newtif=np.logical_or(window1.image,window2.image)
elif operator=='XOR':
self.newtif=np.logical_xor(window1.image,window2.image)
self.oldwindow=window1
self.oldname=window1.name
self.newname=self.oldname+' - Logical {}'.format(operator)
if keepSourceWindow is False:
window2.close()
g.m.statusBar().showMessage('Finished with {}.'.format(self.__name__))
return self.end()
logically_combine=Logically_combine()
class Remove_small_blobs(BaseProcess):
"""remove_small_blobs(rank, value, keepSourceWindow=False)
Finds all contiguous 'True' pixels in rank dimensions. Removes regions which have fewer than the specified pixels.
Parameters:
rank (int): The number of dimensions. If rank==2, each frame is treated independently
value (int): The size (in pixels) below which each contiguous region must be in order to be discarded.
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
rank = QtWidgets.QSpinBox()
rank.setRange(2,3)
value = QtWidgets.QSpinBox()
value.setRange(1, 100000)
self.items.append({'name': 'rank', 'string': 'Number of Dimensions', 'object': rank})
self.items.append({'name': 'value', 'string': 'Value', 'object': value})
super().gui()
def __call__(self, rank, value, keepSourceWindow=False):
self.start(keepSourceWindow)
if self.tif.dtype == np.float16:
g.alert("remove_small_blobs() does not support float16 type arrays")
return
oldshape = self.tif.shape
newtif = np.zeros_like(self.tif, dtype='bool')
if self.oldwindow.nDims == 2:
newtif = remove_small_objects(self.tif.astype('bool'), value, connectivity=2)
elif self.oldwindow.nDims == 3:
if rank == 2:
for i in np.arange(len(self.tif)):
newtif[i] = remove_small_objects(self.tif[i].astype('bool'), value, connectivity=2)
elif rank == 3:
newtif = remove_small_objects(self.tif.astype('bool'), value, connectivity=2)
self.newtif = newtif
self.newname = self.oldname + ' - Removed Blobs ' + str(value)
return self.end()
def get_init_settings_dict(self):
s = dict()
s['rank'] = 2
s['value'] = 1
return s
remove_small_blobs = Remove_small_blobs()
class Binary_Dilation(BaseProcess):
"""binary_dilation(rank,connectivity,iterations, keepSourceWindow=False)
Performs a binary dilation on a binary image. The 'False' pixels neighboring 'True' pixels become converted to 'True' pixels.
Parameters:
rank (int): The number of dimensions to dilate. Can be either 2 or 3.
connectivity (int): `connectivity` determines the distance to dilate.
`connectivity` may range from 1 (no diagonal elements are neighbors)
to `rank` (all elements are neighbors).
iterations (int): How many times to repeat the dilation
keepSourceWindow (bool): If this is False, a new Window is created with the result. Otherwise, the currentWindow is used
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
rank=QtWidgets.QSpinBox()
rank.setRange(2,3)
connectivity=QtWidgets.QSpinBox()
connectivity.setRange(1,3)
iterations=QtWidgets.QSpinBox()
iterations.setRange(1,100)
self.items.append({'name':'rank','string':'Number of Dimensions','object':rank})
self.items.append({'name':'connectivity','string':'Connectivity','object':connectivity})
self.items.append({'name':'iterations','string':'Iterations','object':iterations})
super().gui()
def __call__(self,rank,connectivity,iterations, keepSourceWindow=False):
self.start(keepSourceWindow)
if self.tif.dtype == np.float16:
g.alert("binary_dilation does not support float16 type arrays")
return
if len(self.tif.shape)==3 and rank==2:
s=scipy.ndimage.generate_binary_structure(3,connectivity)
s[0]=False
s[2]=False
else:
s=scipy.ndimage.generate_binary_structure(rank,connectivity)
self.newtif=scipy.ndimage.morphology.binary_dilation(self.tif,s,iterations)
self.newtif=self.newtif.astype(np.uint8)
self.newname=self.oldname+' - Dilated '
return self.end()
binary_dilation=Binary_Dilation()
class Binary_Erosion(BaseProcess):
"""binary_erosion(rank,connectivity,iterations, keepSourceWindow=False)
Performs a binary erosion on a binary image. The 'True' pixels neighboring 'False' pixels become converted to 'False' pixels.
Parameters:
rank (int): The number of dimensions to erode. Can be either 2 or 3.
connectivity (int): `connectivity` determines the distance to erode. `connectivity` may range from 1 (no diagonal elements are neighbors) to `rank` (all elements are neighbors).
iterations (int): How many times to repeat the | |
<= 1)
m.c6044 = Constraint(expr= m.b533 - m.b535 + m.b544 <= 1)
m.c6045 = Constraint(expr= m.b533 - m.b536 + m.b545 <= 1)
m.c6046 = Constraint(expr= m.b533 - m.b537 + m.b546 <= 1)
m.c6047 = Constraint(expr= m.b534 - m.b535 + m.b547 <= 1)
m.c6048 = Constraint(expr= m.b534 - m.b536 + m.b548 <= 1)
m.c6049 = Constraint(expr= m.b534 - m.b537 + m.b549 <= 1)
m.c6050 = Constraint(expr= m.b535 - m.b536 + m.b550 <= 1)
m.c6051 = Constraint(expr= m.b535 - m.b537 + m.b551 <= 1)
m.c6052 = Constraint(expr= m.b536 - m.b537 + m.b552 <= 1)
m.c6053 = Constraint(expr= m.b538 - m.b539 + m.b543 <= 1)
m.c6054 = Constraint(expr= m.b538 - m.b540 + m.b544 <= 1)
m.c6055 = Constraint(expr= m.b538 - m.b541 + m.b545 <= 1)
m.c6056 = Constraint(expr= m.b538 - m.b542 + m.b546 <= 1)
m.c6057 = Constraint(expr= m.b539 - m.b540 + m.b547 <= 1)
m.c6058 = Constraint(expr= m.b539 - m.b541 + m.b548 <= 1)
m.c6059 = Constraint(expr= m.b539 - m.b542 + m.b549 <= 1)
m.c6060 = Constraint(expr= m.b540 - m.b541 + m.b550 <= 1)
m.c6061 = Constraint(expr= m.b540 - m.b542 + m.b551 <= 1)
m.c6062 = Constraint(expr= m.b541 - m.b542 + m.b552 <= 1)
m.c6063 = Constraint(expr= m.b543 - m.b544 + m.b547 <= 1)
m.c6064 = Constraint(expr= m.b543 - m.b545 + m.b548 <= 1)
m.c6065 = Constraint(expr= m.b543 - m.b546 + m.b549 <= 1)
m.c6066 = Constraint(expr= m.b544 - m.b545 + m.b550 <= 1)
m.c6067 = Constraint(expr= m.b544 - m.b546 + m.b551 <= 1)
m.c6068 = Constraint(expr= m.b545 - m.b546 + m.b552 <= 1)
m.c6069 = Constraint(expr= m.b547 - m.b548 + m.b550 <= 1)
m.c6070 = Constraint(expr= m.b547 - m.b549 + m.b551 <= 1)
m.c6071 = Constraint(expr= m.b548 - m.b549 + m.b552 <= 1)
m.c6072 = Constraint(expr= m.b550 - m.b551 + m.b552 <= 1)
m.c6073 = Constraint(expr= m.b277 - m.b278 - m.b279 <= 0)
m.c6074 = Constraint(expr= - m.b279 + m.b280 - m.b281 <= 0)
m.c6075 = Constraint(expr= - m.b279 + m.b282 - m.b283 <= 0)
m.c6076 = Constraint(expr= - m.b279 + m.b284 - m.b285 <= 0)
m.c6077 = Constraint(expr= - m.b279 + m.b286 - m.b287 <= 0)
m.c6078 = Constraint(expr= - m.b279 + m.b288 - m.b289 <= 0)
m.c6079 = Constraint(expr= - m.b279 + m.b290 - m.b291 <= 0)
m.c6080 = Constraint(expr= - m.b279 + m.b292 - m.b293 <= 0)
m.c6081 = Constraint(expr= - m.b279 + m.b294 - m.b295 <= 0)
m.c6082 = Constraint(expr= - m.b279 + m.b296 - m.b297 <= 0)
m.c6083 = Constraint(expr= - m.b279 + m.b298 - m.b299 <= 0)
m.c6084 = Constraint(expr= - m.b279 + m.b300 - m.b301 <= 0)
m.c6085 = Constraint(expr= - m.b279 + m.b302 - m.b303 <= 0)
m.c6086 = Constraint(expr= - m.b279 + m.b304 - m.b305 <= 0)
m.c6087 = Constraint(expr= - m.b279 + m.b306 - m.b307 <= 0)
m.c6088 = Constraint(expr= - m.b279 + m.b308 - m.b309 <= 0)
m.c6089 = Constraint(expr= - m.b279 + m.b310 - m.b311 <= 0)
m.c6090 = Constraint(expr= - m.b279 + m.b312 - m.b313 <= 0)
m.c6091 = Constraint(expr= - m.b279 + m.b314 - m.b315 <= 0)
m.c6092 = Constraint(expr= - m.b279 + m.b316 - m.b317 <= 0)
m.c6093 = Constraint(expr= - m.b279 + m.b318 - m.b319 <= 0)
m.c6094 = Constraint(expr= - m.b279 + m.b320 - m.b321 <= 0)
m.c6095 = Constraint(expr= - m.b277 + m.b280 - m.b322 <= 0)
m.c6096 = Constraint(expr= - m.b277 + m.b282 - m.b323 <= 0)
m.c6097 = Constraint(expr= - m.b277 + m.b284 - m.b324 <= 0)
m.c6098 = Constraint(expr= - m.b277 + m.b286 - m.b325 <= 0)
m.c6099 = Constraint(expr= - m.b277 + m.b288 - m.b326 <= 0)
m.c6100 = Constraint(expr= - m.b277 + m.b290 - m.b327 <= 0)
m.c6101 = Constraint(expr= - m.b277 + m.b292 - m.b328 <= 0)
m.c6102 = Constraint(expr= - m.b277 + m.b294 - m.b329 <= 0)
m.c6103 = Constraint(expr= - m.b277 + m.b296 - m.b330 <= 0)
m.c6104 = Constraint(expr= - m.b277 + m.b298 - m.b331 <= 0)
m.c6105 = Constraint(expr= - m.b277 + m.b300 - m.b332 <= 0)
m.c6106 = Constraint(expr= - m.b277 + m.b302 - m.b333 <= 0)
m.c6107 = Constraint(expr= - m.b277 + m.b304 - m.b334 <= 0)
m.c6108 = Constraint(expr= - m.b277 + m.b306 - m.b335 <= 0)
m.c6109 = Constraint(expr= - m.b277 + m.b308 - m.b336 <= 0)
m.c6110 = Constraint(expr= - m.b277 + m.b310 - m.b337 <= 0)
m.c6111 = Constraint(expr= - m.b277 + m.b312 - m.b338 <= 0)
m.c6112 = Constraint(expr= - m.b277 + m.b314 - m.b339 <= 0)
m.c6113 = Constraint(expr= - m.b277 + m.b316 - m.b340 <= 0)
m.c6114 = Constraint(expr= - m.b277 + m.b318 - m.b341 <= 0)
m.c6115 = Constraint(expr= - m.b277 + m.b320 - m.b342 <= 0)
m.c6116 = Constraint(expr= - m.b280 + m.b282 - m.b343 <= 0)
m.c6117 = Constraint(expr= - m.b280 + m.b284 - m.b344 <= 0)
m.c6118 = Constraint(expr= - m.b280 + m.b286 - m.b345 <= 0)
m.c6119 = Constraint(expr= - m.b280 + m.b288 - m.b346 <= 0)
m.c6120 = Constraint(expr= - m.b280 + m.b290 - m.b347 <= 0)
m.c6121 = Constraint(expr= - m.b280 + m.b292 - m.b348 <= 0)
m.c6122 = Constraint(expr= - m.b280 + m.b294 - m.b349 <= 0)
m.c6123 = Constraint(expr= - m.b280 + m.b296 - m.b350 <= 0)
m.c6124 = Constraint(expr= - m.b280 + m.b298 - m.b351 <= 0)
m.c6125 = Constraint(expr= - m.b280 + m.b300 - m.b352 <= 0)
m.c6126 = Constraint(expr= - m.b280 + m.b302 - m.b353 <= 0)
m.c6127 = Constraint(expr= - m.b280 + m.b304 - m.b354 <= 0)
m.c6128 = Constraint(expr= - m.b280 + m.b306 - m.b355 <= 0)
m.c6129 = Constraint(expr= - m.b280 + m.b308 - m.b356 <= 0)
m.c6130 = Constraint(expr= - m.b280 + m.b310 - m.b357 <= 0)
m.c6131 = Constraint(expr= - m.b280 + m.b312 - m.b358 <= 0)
m.c6132 = Constraint(expr= - m.b280 + m.b314 - m.b359 <= 0)
m.c6133 = Constraint(expr= - m.b280 + m.b316 - m.b360 <= 0)
m.c6134 = Constraint(expr= - m.b280 + m.b318 - m.b361 <= 0)
m.c6135 = Constraint(expr= - m.b280 + m.b320 - m.b362 <= 0)
m.c6136 = Constraint(expr= - m.b282 + m.b284 - m.b363 <= 0)
m.c6137 = Constraint(expr= - m.b282 + m.b286 - m.b364 <= 0)
m.c6138 = Constraint(expr= - m.b282 + m.b288 - m.b365 <= 0)
m.c6139 = Constraint(expr= - m.b282 + m.b290 - m.b366 <= 0)
m.c6140 = Constraint(expr= - m.b282 + m.b292 - m.b367 <= 0)
m.c6141 = Constraint(expr= - m.b282 + m.b294 - m.b368 <= 0)
m.c6142 = Constraint(expr= - m.b282 + m.b296 - m.b369 <= 0)
m.c6143 = Constraint(expr= - m.b282 + m.b298 - m.b370 <= 0)
m.c6144 = Constraint(expr= - m.b282 + m.b300 - m.b371 <= 0)
m.c6145 = Constraint(expr= - m.b282 + m.b302 - m.b372 <= 0)
m.c6146 = Constraint(expr= - m.b282 + m.b304 - m.b373 <= 0)
m.c6147 = Constraint(expr= - m.b282 + m.b306 - m.b374 <= 0)
m.c6148 = Constraint(expr= - m.b282 + m.b308 - m.b375 <= 0)
m.c6149 = Constraint(expr= - m.b282 + m.b310 - m.b376 <= 0)
m.c6150 = Constraint(expr= - m.b282 + m.b312 - m.b377 <= 0)
m.c6151 = Constraint(expr= - m.b282 + m.b314 - m.b378 <= 0)
m.c6152 = Constraint(expr= - m.b282 + m.b316 - m.b379 <= 0)
m.c6153 = Constraint(expr= - m.b282 + m.b318 - m.b380 <= 0)
m.c6154 = Constraint(expr= - m.b282 + m.b320 - m.b381 <= 0)
m.c6155 = Constraint(expr= - m.b284 + m.b286 - m.b382 <= 0)
m.c6156 = Constraint(expr= - m.b284 + m.b288 - m.b383 <= 0)
m.c6157 = Constraint(expr= - m.b284 + m.b290 - m.b384 <= 0)
m.c6158 = Constraint(expr= - m.b284 + m.b292 - m.b385 <= 0)
m.c6159 = Constraint(expr= - m.b284 + m.b294 - m.b386 <= 0)
m.c6160 = Constraint(expr= - m.b284 + m.b296 - m.b387 <= 0)
m.c6161 = Constraint(expr= - m.b284 + m.b298 - m.b388 <= 0)
m.c6162 = Constraint(expr= - m.b284 + m.b300 - m.b389 <= 0)
m.c6163 = Constraint(expr= - m.b284 + m.b302 - m.b390 <= 0)
m.c6164 = Constraint(expr= - m.b284 + m.b304 - m.b391 <= 0)
m.c6165 = Constraint(expr= - m.b284 + m.b306 - m.b392 <= 0)
m.c6166 = Constraint(expr= - m.b284 + m.b308 - m.b393 <= 0)
m.c6167 = Constraint(expr= - m.b284 + m.b310 - m.b394 <= 0)
m.c6168 = Constraint(expr= - m.b284 + m.b312 - m.b395 <= 0)
m.c6169 = Constraint(expr= - m.b284 + m.b314 - m.b396 <= 0)
m.c6170 = Constraint(expr= - m.b284 + m.b316 - m.b397 <= 0)
m.c6171 = Constraint(expr= - m.b284 + m.b318 - m.b398 <= 0)
m.c6172 = Constraint(expr= - m.b284 + m.b320 - m.b399 <= 0)
m.c6173 = Constraint(expr= - m.b286 + m.b288 - m.b400 <= 0)
m.c6174 = Constraint(expr= - m.b286 + m.b290 - m.b401 | |
import builtins
import contextlib
import inspect
import re
import subprocess
import sys
import types
import typing
import unittest
from contextlib import ExitStack
from enum import Enum
from io import StringIO
from pathlib import Path
from tempfile import TemporaryDirectory
import defopt
from defopt import __version__
from examples import (
annotations, booleans, choices, exceptions, lists, parsers, short,
starargs, styles)
Choice = Enum('Choice', [('one', 1), ('two', 2), ('%', 0.01)])
Pair = typing.NamedTuple('Pair', [('first', int), ('second', str)])
# Also check that the Attributes section doesn't trip docutils.
class ConstructibleFromStr:
"""
Attributes
----------
s : str
The s.
"""
def __init__(self, s):
""":type s: str"""
self.s = s
class NotConstructibleFromStr:
def __init__(self, s):
pass
class TestDefopt(unittest.TestCase):
def test_main(self):
def main(foo):
""":type foo: str"""
return foo
self.assertEqual(defopt.run(main, argv=['foo']), 'foo')
def test_subcommands(self):
def sub(*bar):
""":type bar: float"""
return bar
def sub_with_dash(*, baz=None):
""":type baz: int"""
return baz
self.assertEqual(
defopt.run([sub, sub_with_dash], argv=['sub', '1.1']), (1.1,))
self.assertEqual(
defopt.run([sub, sub_with_dash],
argv=['sub-with-dash', '--baz', '1']), 1)
self.assertEqual(
defopt.run({"sub1": sub, "sub_2": sub_with_dash},
argv=['sub1', '1.2']), (1.2,))
self.assertEqual(
defopt.run({"sub1": sub, "sub_2": sub_with_dash},
argv=['sub_2', '--baz', '1']), 1)
def test_nested_lists_invalid(self):
def sub1(*bar):
""":type bar: float"""
return bar
def subsub1(*, baz=None):
""":type baz: int"""
return baz
def subsub2(*foo):
""":type foo: float"""
return foo
with self.assertRaises(ValueError):
defopt.run([sub1, [subsub1, subsub2]], argv=['sub1', '1.2'])
def test_nested_subcommands1(self):
def sub1(*bar):
""":type bar: float"""
return bar
def subsub1(*, baz=None):
""":type baz: int"""
return baz
def subsub2(*foo):
""":type foo: float"""
return foo
self.assertEqual(
defopt.run({"sub-1": [sub1], "sub-2": [subsub1, subsub2]},
argv=['sub-1', 'sub1', '1.2']), (1.2,))
self.assertEqual(
defopt.run({"sub-1": [sub1], "sub-2": [subsub1, subsub2]},
argv=['sub-2', 'subsub1', '--baz', '1']), 1)
self.assertEqual(
defopt.run({"sub-1": [sub1], "sub-2": [subsub1, subsub2]},
argv=['sub-2', 'subsub2', '1.5']), (1.5,))
def test_nested_subcommands2(self):
def sub1(*bar):
""":type bar: float"""
return bar
def subsub1(*, baz=None):
""":type baz: int"""
return baz
def subsub2(*foo):
""":type foo: float"""
return foo
self.assertEqual(
defopt.run({"sub-1": sub1, "sub-2": [subsub1, subsub2]},
argv=['sub-1', '1.2']), (1.2,))
self.assertEqual(
defopt.run({"sub1": sub1, "sub-2": [subsub1, subsub2]},
argv=['sub-2', 'subsub1', '--baz', '1']), 1)
self.assertEqual(
defopt.run({"sub1": sub1, "sub-2": [subsub1, subsub2]},
argv=['sub-2', 'subsub2', '1.5']), (1.5,))
def test_nested_subcommands3(self):
def sub1(*bar):
""":type bar: float"""
return bar
def subsub1(*, baz=None):
""":type baz: int"""
return baz
def subsub2(*foo):
""":type foo: float"""
return foo
self.assertEqual(
defopt.run({"sub-1": sub1,
"sub-2": {'subsub1': subsub1, 'subsub2': subsub2}},
argv=['sub-1', '1.2']), (1.2,))
self.assertEqual(
defopt.run({"sub-1": sub1,
"sub-2": {'subsub1': subsub1, 'subsub2': subsub2}},
argv=['sub-2', 'subsub1', '--baz', '1']), 1)
self.assertEqual(
defopt.run({"sub-1": sub1,
"sub-2": {'subsub1': subsub1, 'subsub2': subsub2}},
argv=['sub-2', 'subsub2', '1.5']), (1.5,))
def test_nested_subcommands_deep(self):
def sub(*bar):
""":type bar: float"""
return bar
self.assertEqual(
defopt.run({'a': {'b': {'c': {'d': {'e': sub}}}}},
argv=['a', 'b', 'c', 'd', 'e', '1.2']), (1.2,))
self.assertEqual(
defopt.run({'a': {'b': {'c': {'d': {'e': [sub]}}}}},
argv=['a', 'b', 'c', 'd', 'e', 'sub', '1.2']), (1.2,))
def test_nested_subcommands_mixed_invalid1(self):
def sub1(*bar):
""":type bar: float"""
return bar
def subsub1(*, baz=None):
""":type baz: int"""
return baz
def subsub2(*foo):
""":type foo: float"""
return foo
with self.assertRaises(ValueError):
defopt.run([sub1, {'sub2': [subsub1, subsub2]}],
argv=['sub1', '1.2'])
with self.assertRaises(ValueError):
defopt.run([sub1, {'sub2': [subsub1, subsub2]}],
argv=['sub2', 'subsub1', '--baz', '1'])
with self.assertRaises(ValueError):
defopt.run([sub1, {'sub2': [subsub1, subsub2]}],
argv=['sub2', 'subsub2', '1.1'])
def test_nested_subcommands_mixed_invalid2(self):
def sub(*bar):
""":type bar: float"""
return bar
def subsub_with_dash(*, baz=None):
""":type baz: int"""
return baz
def subsub(*foo):
""":type foo: float"""
return foo
with self.assertRaises(ValueError):
defopt.run([sub, {'subsub1': subsub_with_dash, 'subsub2': subsub}],
argv=['sub', '1.2'])
with self.assertRaises(ValueError):
defopt.run([sub, {'subsub1': subsub_with_dash, 'subsub2': subsub}],
argv=['subsub1', '--baz', '1'])
with self.assertRaises(ValueError):
defopt.run([sub, {'subsub1': subsub_with_dash, 'subsub2': subsub}],
argv=['subsub2', '1.5'])
def test_var_positional(self):
for doc in [
":type foo: int", r":type \*foo: int", ":param int foo: doc"]:
def main(*foo): return foo
main.__doc__ = doc
self.assertEqual(defopt.run(main, argv=['1', '2']), (1, 2))
self.assertEqual(defopt.run(main, argv=[]), ())
def test_no_default(self):
def main(a):
""":type a: str"""
with self.assertRaises(SystemExit):
defopt.run(main, argv=[])
def test_keyword_only(self):
def main(foo='bar', *, baz='quux'):
"""
:type foo: str
:type baz: str
"""
return foo, baz
self.assertEqual(defopt.run(main, argv=['FOO', '--baz', 'BAZ']),
('FOO', 'BAZ'))
self.assertEqual(defopt.run(main, argv=[]), ('bar', 'quux'))
def test_keyword_only_no_default(self):
def main(*, foo):
""":type foo: str"""
return foo
self.assertEqual(defopt.run(main, argv=['--foo', 'baz']), 'baz')
with self.assertRaises(SystemExit):
defopt.run(main, argv=[])
def test_var_keywords(self):
def bad(**kwargs):
""":type kwargs: str"""
with self.assertRaises(ValueError):
defopt.run(bad)
def test_bad_arg(self):
with self.assertRaises(TypeError):
defopt.run(foo=None)
def test_no_subparser_specified(self):
def sub1(): assert False
def sub2(): assert False
with self.assertRaises(SystemExit):
defopt.run([sub1, sub2], argv=[])
def test_no_param_doc(self):
def bad(foo):
"""Test function"""
with self.assertRaisesRegex(ValueError, 'type.*foo'):
defopt.run(bad, argv=['foo'])
def test_no_type_doc(self):
def bad(foo):
""":param foo: no type info"""
with self.assertRaisesRegex(ValueError, 'type.*foo'):
defopt.run(bad, argv=['foo'])
def test_return(self):
def one(): return 1
def none(): pass
self.assertEqual(defopt.run([one, none], argv=['one']), 1)
self.assertEqual(defopt.run([one, none], argv=['none']), None)
def test_underscores(self):
def main(a_b_c, *, d_e_f=None):
"""Test function
:type a_b_c: int
:type d_e_f: int
"""
return a_b_c, d_e_f
self.assertEqual(defopt.run(main, argv=['1', '--d-e-f', '2']), (1, 2))
def test_private_with_default(self):
def main(_a=None): pass
defopt.run(main, argv=[])
def test_private_without_default(self):
def main(_a: int): assert False
with self.assertRaisesRegex(ValueError,
# Older Pythons have no space post-colon.
r'Parameter _a of main\(_a: ?int\) is '
r'private but has no default'):
defopt.run(main, argv=[])
def test_argparse_kwargs(self):
def main(*, a=None):
""":type a: str"""
return a
self.assertEqual(
defopt.run(main, argparse_kwargs={'prefix_chars': '+'},
argv=['+a', 'foo']),
'foo')
class TestParsers(unittest.TestCase):
def test_parser(self):
def main(value):
""":type value: int"""
return value
self.assertEqual(defopt.run(main, argv=['1']), 1)
def test_overridden_parser(self):
def parser(string):
return int(string) * 2
def main(value):
""":type value: int"""
return value
self.assertEqual(
defopt.run(main, parsers={int: parser}, argv=['1']), 2)
def test_overridden_none_parser(self):
def parser(string):
if string == 'nil':
return None
else:
raise ValueError("Not nil")
def main(ints, strs):
"""
:type ints: typing.List[typing.Optional[int]]
:type strs: typing.List[typing.Optional[str]]
"""
return ints, strs
self.assertEqual(
defopt.run(main, parsers={type(None): parser},
argv=['-i', 'nil', '0', '-s', 'nil', 's']),
([None, 0], [None, 's']))
def test_parse_bool(self):
parser = defopt._get_parser(bool, {})
self.assertEqual(parser('t'), True)
self.assertEqual(parser('FALSE'), False)
self.assertEqual(parser('1'), True)
with self.assertRaises(ValueError):
parser('foo')
def test_parse_path(self):
def main(value):
""":type value: Path"""
return value
self.assertEqual(defopt.run(main, argv=['foo']), Path('foo'))
def test_parse_slice(self):
parser = defopt._get_parser(slice, {})
self.assertEqual(parser(':'), slice(None))
self.assertEqual(parser(':1'), slice(None, 1))
self.assertEqual(parser('"a":"b":"c"'), slice("a", "b", "c"))
with self.assertRaises(ValueError):
parser('1')
def test_no_parser(self):
with self.assertRaisesRegex(Exception, 'no parser'):
defopt._get_parser(object, parsers={type: type})
def test_containers(self):
def main(foo, bar):
"""
:type foo: tuple[float]
:type bar: list[float]
"""
return foo, bar
self.assertEqual(defopt.run(main, argv=['1.1', '--bar', '2.2', '3.3']),
((1.1,), [2.2, 3.3]))
def test_list_kwarg(self):
def main(foo=None):
""":type foo: list[float]"""
return foo
self.assertEqual(
defopt.run(main, argv=['--foo', '1.1', '2.2']), [1.1, 2.2])
def test_list_bare(self):
with self.assertRaises(ValueError):
defopt._get_parser(list, {})
def test_list_keyword_only(self):
def main(*, foo):
""":type foo: list[int]"""
return foo
self.assertEqual(defopt.run(main, argv=['--foo', '1', '2']), [1, 2])
with self.assertRaises(SystemExit):
defopt.run(main, argv=[])
def test_list_var_positional(self):
def modern(*foo):
""":type foo: typing.Iterable[int]"""
return foo
def legacy(*foo):
""":type foo: list[int]"""
return foo
for func in modern, legacy:
out = defopt.run(func, argv=['--foo', '1', '--foo', '2', '3'])
self.assertEqual(out, ([1], [2, 3]))
self.assertEqual(defopt.run(func, argv=[]), ())
def test_bool(self):
def main(foo):
""":type foo: bool"""
return foo
self.assertIs(defopt.run(main, argv=['1']), True)
self.assertIs(defopt.run(main, argv=['0']), False)
with self.assertRaises(SystemExit):
defopt.run(main, argv=[])
def test_bool_list(self):
def main(foo):
""":type foo: list[bool]"""
return foo
self.assertEqual(
defopt.run(main, argv=['--foo', '1', '0']), [True, False])
def test_bool_var_positional(self):
def main(*foo):
""":type foo: bool"""
return foo
self.assertEqual(
defopt.run(main, argv=['1', '1', '0']), (True, True, False))
self.assertEqual(
defopt.run(main, argv=[]), ())
def test_bool_list_var_positional(self):
def main(*foo):
""":type foo: list[bool]"""
return foo
argv = ['--foo', '1', '--foo', '0', '0']
self.assertEqual(
defopt.run(main, argv=argv), ([True], [False, False]))
self.assertEqual(
defopt.run(main, argv=[]), ())
def test_bool_kwarg(self):
def main(foo='default'):
""":type foo: bool"""
return foo
self.assertIs(defopt.run(main, cli_options='has_default',
argv=[]), 'default')
self.assertIs(defopt.run(main, cli_options='has_default',
argv=['--foo']), True)
self.assertIs(defopt.run(main, cli_options='has_default',
argv=['--no-foo']), False)
self.assertIs(defopt.run(main, cli_options='has_default',
argv=['--foo', '--no-foo']), False)
with self.assertWarns(DeprecationWarning):
self.assertIs(defopt.run(main, strict_kwonly=False,
argv=[]), 'default')
with self.assertWarns(DeprecationWarning):
self.assertIs(defopt.run(main, strict_kwonly=False,
argv=['--foo']), True)
with self.assertWarns(DeprecationWarning):
self.assertIs(defopt.run(main, strict_kwonly=False,
argv=['--no-foo']), False)
with self.assertWarns(DeprecationWarning):
self.assertIs(defopt.run(main, strict_kwonly=False,
argv=['--foo', '--no-foo']), False)
def test_bool_keyword_only(self):
def main(*, foo):
""":type foo: bool"""
return foo
self.assertIs(defopt.run(main, argv=['--foo']), True)
self.assertIs(defopt.run(main, argv=['--no-foo']), False)
with self.assertRaises(SystemExit):
defopt.run(main, argv=[])
def test_cli_options(self):
def main(foo):
""":type foo: bool"""
return foo
self.assertIs(
defopt.run(main, cli_options='all', argv=['--foo']), True)
self.assertIs(
defopt.run(main, cli_options='all', argv=['--no-foo']), False)
with self.assertRaises(SystemExit):
defopt.run(main, cli_options='all', argv=['1'])
with self.assertRaises(SystemExit):
defopt.run(main, argv=['--foo'])
with self.assertRaises(SystemExit):
defopt.run(main, argv=['--no-foo'])
with self.assertRaises(SystemExit):
defopt.run(main, argv=[])
def test_implicit_parser(self):
def ok(foo):
""":type foo: ConstructibleFromStr"""
return foo
self.assertEqual(defopt.run(ok, argv=["foo"]).s, "foo")
def test_implicit_noparser(self):
def notok(foo):
""":type foo: NotConstructibleFromStr"""
with self.assertRaises(Exception):
defopt.run(notok, argv=["foo"])
class TestFlags(unittest.TestCase):
def test_short_flags(self):
def func(foo=1):
""":type foo: int"""
return foo
self.assertEqual(
defopt.run(func, short={'foo': 'f'}, cli_options='has_default',
argv=['-f', '2']),
2)
def test_short_negation(self):
def func(*, foo=False):
""":type foo: bool"""
return foo
self.assertIs(
defopt.run(func, short={'foo': 'f', 'no-foo': 'F'}, argv=['-f']),
True)
self.assertIs(
defopt.run(func, short={'foo': 'f', 'no-foo': 'F'}, argv=['-F']),
False)
def test_auto_short(self):
def func(*, foo=1, bar=2, baz=3):
"""
:type foo: int
:type bar: int
:type baz: int
"""
return foo
self.assertEqual(defopt.run(func, argv=['-f', '2']), 2)
with self.assertRaises(SystemExit):
defopt.run(func, argv=['-b', '2'])
def test_auto_short_help(self):
def func(*, hello="world"):
""":type hello: str"""
defopt.run(func, argv=[])
with self.assertRaises(SystemExit):
defopt.run(func, argv=["-h", ""])
self.assertEqual(
defopt.run(
func, argparse_kwargs={"add_help": False}, argv=["-h", ""]),
None)
class TestEnums(unittest.TestCase):
def test_enum(self):
def main(foo):
""":type foo: Choice"""
return foo
self.assertEqual(defopt.run(main, argv=['one']), Choice.one)
self.assertEqual(defopt.run(main, | |
'@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold\'), local(\'Roboto-Bold\'), url(/font-roboto/d-6IYplOFocCacKzxwXSOJBw1xU1rKptJj_0jans920.woff2) format(\'woff2\');",',
" ' unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold\'), local(\'Roboto-Bold\'), url(/font-roboto/97uahxiqZRoncBaCEI3aW4X0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold\'), local(\'Roboto-Bold\'), url(/font-roboto/PwZc-YbIL414wB9rB1IAPYX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold Italic\'), local(\'Roboto-BoldItalic\'), url(/font-roboto/t6Nd4cfPRhZP44Q5QAjcC14sYYdJg5dU2qzJEVSuta0.woff2) format(\'woff2\');",',
" ' unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold Italic\'), local(\'Roboto-BoldItalic\'), url(/font-roboto/t6Nd4cfPRhZP44Q5QAjcC_ZraR2Tg8w2lzm7kLNL0-w.woff2) format(\'woff2\');",',
" ' unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold Italic\'), local(\'Roboto-BoldItalic\'), url(/font-roboto/t6Nd4cfPRhZP44Q5QAjcCwt_Rm691LTebKfY2ZkKSmI.woff2) format(\'woff2\');",',
" ' unicode-range: U+0370-03FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold Italic\'), local(\'Roboto-BoldItalic\'), url(/font-roboto/t6Nd4cfPRhZP44Q5QAjcC1BW26QxpSj-_ZKm_xT4hWw.woff2) format(\'woff2\');",',
" ' unicode-range: U+1F00-1FFF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold Italic\'), local(\'Roboto-BoldItalic\'), url(/font-roboto/t6Nd4cfPRhZP44Q5QAjcC4gp9Q8gbYrhqGlRav_IXfk.woff2) format(\'woff2\');",',
" ' unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold Italic\'), local(\'Roboto-BoldItalic\'), url(/font-roboto/t6Nd4cfPRhZP44Q5QAjcC6E8kM4xWR1_1bYURRojRGc.woff2) format(\'woff2\');",',
" ' unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 700;',",
' " src: local(\'Roboto Bold Italic\'), local(\'Roboto-BoldItalic\'), url(/font-roboto/t6Nd4cfPRhZP44Q5QAjcC9DiNsR5a-9Oe_Ivpu8XWlY.woff2) format(\'woff2\');",',
" ' unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 400;',",
' " src: local(\'Roboto Italic\'), local(\'Roboto-Italic\'), url(/font-roboto/OpXUqTo0UgQQhGj_SFdLWBkAz4rYn47Zy2rvigWQf6w.woff2) format(\'woff2\');",',
" ' unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 400;',",
' " src: local(\'Roboto Italic\'), local(\'Roboto-Italic\'), url(/font-roboto/WxrXJa0C3KdtC7lMafG4dRkAz4rYn47Zy2rvigWQf6w.woff2) format(\'woff2\');",',
" ' unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 400;',",
' " src: local(\'Roboto Italic\'), local(\'Roboto-Italic\'), url(/font-roboto/cDKhRaXnQTOVbaoxwdOr9xkAz4rYn47Zy2rvigWQf6w.woff2) format(\'woff2\');",',
" ' unicode-range: U+0370-03FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 400;',",
' " src: local(\'Roboto Italic\'), local(\'Roboto-Italic\'), url(/font-roboto/1hZf02POANh32k2VkgEoUBkAz4rYn47Zy2rvigWQf6w.woff2) format(\'woff2\');",',
" ' unicode-range: U+1F00-1FFF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 400;',",
' " src: local(\'Roboto Italic\'), local(\'Roboto-Italic\'), url(/font-roboto/vPcynSL0qHq_6dX7lKVByXYhjbSpvc47ee6xR_80Hnw.woff2) format(\'woff2\');",',
" ' unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 400;',",
' " src: local(\'Roboto Italic\'), local(\'Roboto-Italic\'), url(/font-roboto/vSzulfKSK0LLjjfeaxcREhkAz4rYn47Zy2rvigWQf6w.woff2) format(\'woff2\');",',
" ' unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 400;',",
' " src: local(\'Roboto Italic\'), local(\'Roboto-Italic\'), url(/font-roboto/K23cxWVTrIFD6DJsEVi07RkAz4rYn47Zy2rvigWQf6w.woff2) format(\'woff2\');",',
" ' unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light\'), local(\'Roboto-Light\'), url(/font-roboto/Fl4y0QdOxyyTHEGMXX8kcYX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light\'), local(\'Roboto-Light\'), url(/font-roboto/0eC6fl06luXEYWpBSJvXCIX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light\'), local(\'Roboto-Light\'), url(/font-roboto/I3S1wsgSg9YCurV6PUkTOYX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0370-03FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light\'), local(\'Roboto-Light\'), url(/font-roboto/-L14Jk06m6pUHB-5mXQQnYX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+1F00-1FFF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light\'), local(\'Roboto-Light\'), url(/font-roboto/Hgo13k-tfSpn0qi1SFdUfZBw1xU1rKptJj_0jans920.woff2) format(\'woff2\');",',
" ' unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light\'), local(\'Roboto-Light\'), url(/font-roboto/Pru33qjShpZSmG3z6VYwnYX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light\'), local(\'Roboto-Light\'), url(/font-roboto/NYDWBdD4gIq26G5XYbHsFIX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light Italic\'), local(\'Roboto-LightItalic\'), url(/font-roboto/7m8l7TlFO-S3VkhHuR0at14sYYdJg5dU2qzJEVSuta0.woff2) format(\'woff2\');",',
" ' unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light Italic\'), local(\'Roboto-LightItalic\'), url(/font-roboto/7m8l7TlFO-S3VkhHuR0at_ZraR2Tg8w2lzm7kLNL0-w.woff2) format(\'woff2\');",',
" ' unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light Italic\'), local(\'Roboto-LightItalic\'), url(/font-roboto/7m8l7TlFO-S3VkhHuR0atwt_Rm691LTebKfY2ZkKSmI.woff2) format(\'woff2\');",',
" ' unicode-range: U+0370-03FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light Italic\'), local(\'Roboto-LightItalic\'), url(/font-roboto/7m8l7TlFO-S3VkhHuR0at1BW26QxpSj-_ZKm_xT4hWw.woff2) format(\'woff2\');",',
" ' unicode-range: U+1F00-1FFF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light Italic\'), local(\'Roboto-LightItalic\'), url(/font-roboto/7m8l7TlFO-S3VkhHuR0at4gp9Q8gbYrhqGlRav_IXfk.woff2) format(\'woff2\');",',
" ' unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light Italic\'), local(\'Roboto-LightItalic\'), url(/font-roboto/7m8l7TlFO-S3VkhHuR0at6E8kM4xWR1_1bYURRojRGc.woff2) format(\'woff2\');",',
" ' unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 300;',",
' " src: local(\'Roboto Light Italic\'), local(\'Roboto-LightItalic\'), url(/font-roboto/7m8l7TlFO-S3VkhHuR0at9DiNsR5a-9Oe_Ivpu8XWlY.woff2) format(\'woff2\');",',
" ' unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 500;',",
' " src: local(\'Roboto Medium\'), local(\'Roboto-Medium\'), url(/font-roboto/oHi30kwQWvpCWqAhzHcCSIX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 500;',",
' " src: local(\'Roboto Medium\'), local(\'Roboto-Medium\'), url(/font-roboto/ZLqKeelYbATG60EpZBSDy4X0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 500;',",
' " src: local(\'Roboto Medium\'), local(\'Roboto-Medium\'), url(/font-roboto/mx9Uck6uB63VIKFYnEMXrYX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0370-03FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 500;',",
' " src: local(\'Roboto Medium\'), local(\'Roboto-Medium\'), url(/font-roboto/rGvHdJnr2l75qb0YND9NyIX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+1F00-1FFF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 500;',",
' " src: local(\'Roboto Medium\'), local(\'Roboto-Medium\'), url(/font-roboto/RxZJdnzeo3R5zSexge8UUZBw1xU1rKptJj_0jans920.woff2) format(\'woff2\');",',
" ' unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 500;',",
' " src: local(\'Roboto Medium\'), local(\'Roboto-Medium\'), url(/font-roboto/oOeFwZNlrTefzLYmlVV1UIX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: normal;',",
" ' font-weight: 500;',",
' " src: local(\'Roboto Medium\'), local(\'Roboto-Medium\'), url(/font-roboto/mbmhprMH69Zi6eEPBYVFhYX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",',
" ' unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;',",
" '}',",
" '@font-face {',",
' " font-family: \'Roboto\';",',
" ' font-style: italic;',",
" ' font-weight: 500;',",
' " src: local(\'Roboto Medium Italic\'), local(\'Roboto-MediumItalic\'), url(/font-roboto/OLffGBTaF0XFOW1gnuHF0V4sYYdJg5dU2qzJEVSuta0.woff2) format(\'woff2\');",',
" ' unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;',",
" '}',",
| |
+ [1] * (len(xshape) - 1)
drop_mask = keep_rate + tf.random.uniform(drop_mask_shape, dtype=x.dtype)
drop_mask = tf.math.divide(tf.floor(drop_mask), keep_rate)
return x * drop_mask
class FeedForwardLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self, dim_att, dim_mlp, drop_units=0.1, **kwargs):
super(FeedForwardLayer, self).__init__(**kwargs)
self.dense1 = tf.keras.layers.Dense(
dim_mlp, activation=tf.nn.gelu, name='dense1')
self.dropout = tf.keras.layers.Dropout(drop_units)
self.dense2 = tf.keras.layers.Dense(dim_att, name='dense2')
def call(self, x, training):
return self.dense2(self.dropout(self.dense1(x), training=training))
class MLP(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self,
num_layers,
dim,
mlp_ratio,
drop_path=0.1,
drop_units=0.,
**kwargs):
super(MLP, self).__init__(**kwargs)
self.num_layers = num_layers
self.mlp_layers = [
FeedForwardLayer(dim, dim * mlp_ratio, drop_units,
name='ffn' + suffix_id(i))
for i in range(num_layers)
]
self.layernorms = [
tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='ffn/ln' + suffix_id(i))
for i in range(num_layers)
]
self.dropp = DropPath(drop_path)
def call(self, x, training, ret_list=False):
x_list = [x]
for i in range(self.num_layers):
x_residual = self.mlp_layers[i](self.layernorms[i](x), training)
x = x + self.dropp(x_residual, training)
x_list.append(x)
return (x, x_list) if ret_list else x
class TransformerEncoderLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.,
**kwargs):
super(TransformerEncoderLayer, self).__init__(**kwargs)
self.mha_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='mha/ln')
self.mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim // num_heads, dropout=drop_att, name='mha')
self.mlp = MLP(1, dim, mlp_ratio, drop_path, drop_units, name='mlp')
self.dropp = DropPath(drop_path)
def call(self, x, mask, training):
# x shape (bsz, seq_len, dim_att), mask shape (bsz, seq_len, seq_len).
x_ln = self.mha_ln(x)
x_residual = self.mha(x_ln, x_ln, x_ln, mask, training=training)
x = x + self.dropp(x_residual, training)
x = self.mlp(x, training)
return x
class TransformerEncoder(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.,
**kwargs):
super(TransformerEncoder, self).__init__(**kwargs)
self.num_layers = num_layers
self.enc_layers = [
TransformerEncoderLayer( # pylint: disable=g-complex-comprehension
dim, mlp_ratio, num_heads, drop_path, drop_units, drop_att,
name='transformer_encoder' + suffix_id(i))
for i in range(num_layers)
]
def call(self, x, mask, training, ret_list=False):
x_list = [x]
for i in range(self.num_layers):
x = self.enc_layers[i](x, mask, training)
x_list.append(x)
return (x, x_list) if ret_list else x
class TransformerDecoderLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.,
self_attention=True,
cross_attention=True,
**kwargs):
super(TransformerDecoderLayer, self).__init__(**kwargs)
self.self_attention = self_attention
self.cross_attention = cross_attention
if self_attention:
self.self_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='self_mha/ln')
self.self_mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim // num_heads, dropout=drop_att, name='self_mha')
if cross_attention:
self.cross_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='cross_mha/ln')
self.cross_mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim // num_heads, dropout=drop_att, name='cross_mha')
self.mlp = MLP(1, dim, mlp_ratio, drop_path, drop_units, name='mlp')
self.dropp = DropPath(drop_path)
def call(self, x, enc, cache, mask_self, mask_cross, training):
"""x in (bsz, seq, d), enc in (bsz, seq', d)."""
x_for_cache = []
if self.self_attention:
x_for_cache = x_ln = kv_ln = self.self_ln(x)
if cache is not None: # Augment kv_ln with cache in (bsz, c_size, d).
q_size, k_size = tf.shape(x)[1], tf.shape(cache)[1]
mask_self = tf.concat([tf.ones([1, 1, q_size, k_size]), mask_self], -1)
kv_ln = tf.concat([cache, x_ln], axis=1)
x_res = self.self_mha(x_ln, kv_ln, kv_ln, mask_self, training=training)
x = x + self.dropp(x_res, training)
if self.cross_attention:
x_ln = self.cross_ln(x)
x_res = self.cross_mha(x_ln, enc, enc, mask_cross, training=training)
x = x + self.dropp(x_res, training)
x = self.mlp(x, training)
return x, x_for_cache
class TransformerDecoder(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.,
self_attention=True,
cross_attention=True,
**kwargs):
super(TransformerDecoder, self).__init__(**kwargs)
self.num_layers = num_layers
self.dec_layers = [
TransformerDecoderLayer( # pylint: disable=g-complex-comprehension
dim, mlp_ratio, num_heads, drop_path, drop_units, drop_att,
self_attention, cross_attention,
name='transformer_decoder_layer' + suffix_id(i))
for i in range(num_layers)
]
def call(self, x, enc, caches, mask_self, mask_cross, training):
"""x in (bsz, seq, d), enc in (bsz, seq', d)."""
presents = []
for i in range(self.num_layers):
cache = None if caches is None else caches[i]
x, x_for_cache = self.dec_layers[i](
x, enc, cache, mask_self, mask_cross, training)
presents.append(x_for_cache)
return x, tf.stack(presents)
class VisionTransformer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self,
image_height,
image_width,
patch_size,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.,
pos_encoding='learned',
use_cls_token=True,
**kwargs):
super(VisionTransformer, self).__init__(**kwargs)
self.use_cls_token = use_cls_token
self.patch_size = patch_size
self.stem_conv = tf.keras.layers.Conv2D(
filters=dim, kernel_size=patch_size, strides=patch_size,
padding='VALID', use_bias=True, name='stem_conv')
self.stem_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='stem_ln')
if self.use_cls_token:
add_cls_token_emb(self, dim)
self.n_rows, self.n_cols = image_height//patch_size, image_width//patch_size
add_vis_pos_emb(self, pos_encoding, self.n_rows, self.n_cols, dim)
self.transformer_encoder = TransformerEncoder(
num_layers, dim, mlp_ratio, num_heads, drop_path, drop_units, drop_att,
name='transformer_encoder')
self.output_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='ouput_ln')
def call(self, images, training, ret_list=False):
"""Input images of (bsz, h, w, c)."""
tokens = self.stem_conv(images)
bsz, h, w, dim = get_shape(tokens)
tokens = self.stem_ln(tf.reshape(tokens, [bsz, h * w, dim]))
tokens = tokens + tf.expand_dims(self.vis_pos_emb, 0)
if self.use_cls_token:
cls_token = tf.tile(tf.expand_dims(self.cls_token_emb, 0), [bsz, 1, 1])
tokens = tf.concat([cls_token, tokens], 1)
tokens, x_list = self.transformer_encoder(
tokens, None, training=training, ret_list=True)
x = self.output_ln(tokens)
return (x, x_list) if ret_list else x
class ResNetTransformer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self,
image_height,
image_width,
resnet_variant,
resnet_depth,
resnet_width_multiplier,
resnet_sk_ratio,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.,
pos_encoding='learned',
use_cls_token=True,
**kwargs):
super(ResNetTransformer, self).__init__(**kwargs)
self.use_cls_token = use_cls_token
self.resnet = resnet.resnet(
resnet_depth=resnet_depth,
width_multiplier=resnet_width_multiplier,
sk_ratio=resnet_sk_ratio,
variant=resnet_variant)
self.dropout = tf.keras.layers.Dropout(drop_units)
self.stem_projection = tf.keras.layers.Dense(dim, name='stem_projection')
self.stem_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='stem_ln')
if self.use_cls_token:
add_cls_token_emb(self, dim)
if resnet_variant in ['c3']:
factor = 8.
elif resnet_variant in ['c4', 'dc5']:
factor = 16.
else:
factor = 32.
self.n_rows = math.ceil(image_height / factor)
self.n_cols = math.ceil(image_width / factor)
add_vis_pos_emb(self, pos_encoding, self.n_rows, self.n_cols, dim)
self.transformer_encoder = TransformerEncoder(
num_layers, dim, mlp_ratio, num_heads, drop_path, drop_units, drop_att,
name='transformer_encoder')
self.output_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='ouput_ln')
def call(self, images, training, ret_list=False):
"""Input images of (bsz, h, w, c)."""
hidden_stack, _ = self.resnet(images, training)
tokens = hidden_stack[-1]
bsz, h, w, num_channels = get_shape(tokens)
tokens = tf.reshape(tokens, [bsz, h * w, num_channels])
tokens = self.stem_ln(self.stem_projection(self.dropout(tokens, training)))
tokens = tokens + tf.expand_dims(self.vis_pos_emb, 0)
if self.use_cls_token:
cls_token = tf.tile(tf.expand_dims(self.cls_token_emb, 0), [bsz, 1, 1])
tokens = tf.concat([cls_token, tokens], 1)
tokens, x_list = self.transformer_encoder(
tokens, None, training=training, ret_list=True)
x = self.output_ln(tokens)
return (x, x_list) if ret_list else x
class AutoregressiveDecoder(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(self,
vocab_size,
max_seq_len,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.,
pos_encoding='learned',
shared_embedding=True,
output_bias=True,
**kwargs):
super(AutoregressiveDecoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.max_seq_len = max_seq_len
self.num_layers = num_layers
self.dim = dim
self.shared_embedding = shared_embedding
self.output_bias = output_bias
add_seq_pos_emb(self, pos_encoding, max_seq_len, dim)
add_vocab_token_emb(self, vocab_size, dim, shared_embedding, output_bias)
self.decoder = TransformerDecoder(
num_layers, dim, mlp_ratio, num_heads,
drop_path, drop_units, drop_att, name='transformer_decoder')
self.output_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name='ouput_ln')
def call(self, tokens, encoded, training):
"""Teacher-forced prediction.
Args:
tokens: `int` tokens with shape of (bsz, seqlen, dim).
encoded: `float` encoded representations for conditioning with shape of
(bsz, size, dim). This can be optional in case of pure decoder.
training: `boolean` indicator for training vs test mode.
Returns:
logits of `float` with shape of (bsz, seqlen, vocab_size)
"""
_, seqlen = get_shape(tokens)
seq_pos_emb = tf.expand_dims(self.seq_pos_emb[:seqlen], 0)
if self.shared_embedding:
inp_embedding = outp_embedding = self.token_embedding
else:
inp_embedding = self.inp_token_embedding
outp_embedding = self.outp_token_embedding
token_emb = tf.gather(inp_embedding, tokens) + seq_pos_emb
mask_self = 1. - get_ar_mask(seqlen, token_emb.dtype)
outputs, _ = self.decoder(
token_emb, encoded, None, mask_self, None, training)
outputs = self.output_ln(outputs)
logits = tf.matmul(outputs, outp_embedding, transpose_b=True)
if self.output_bias:
logits = tf.nn.bias_add(logits, self.outp_bias)
return logits
def infer(self, prompt, encoded, max_seq_len=None,
temperature=1.0, top_k=1, top_p=1.0, sampling_callback=None):
"""Autoregressive (without teacher-forcing) prediction.
Note: the autoregressive sampling/inference time can be further optimized by
caching *transformed* key / value inside multi-head attention for the
`encoded` and previously generated tokens, but this may make the code less
readable.
Args:
prompt: `int` tokens with shape of (bsz, prompt_len).
encoded: `float` encoded representations for conditioning with shape of
(bsz, size, dim). This can be optional in case of pure decoder.
max_seq_len: `int` of max generated sequence length (including prompt).
temperature: `float` scalar for scaling the logits before sampling.
top_k: `int` scalar for truncating top-k tokens according to logits before
token sampling.
top_p: `float` scalar specifying the threshold of cumulative probablity
for truncating tokens before token sampling.
sampling_callback: a callbak `function` that take `next_logits`, and
return `next_token`. This is used when users need a specific logic
for sampling. Default to `None` with standard free-form sampling.
Returns:
sampled tokens with shape of (bsz, max_seq_len-prompt_len).
logits (temperature-scaled) associated with sampled token, in shape of
(bsz, max_seq_len-prompt_len, vocab_size).
"""
bsz, prompt_len = get_shape(prompt)
seq_len = self.max_seq_len if max_seq_len is None else max_seq_len
seq_pos_emb = tf.expand_dims(self.seq_pos_emb, 0)
if self.shared_embedding:
inp_embedding = outp_embedding = self.token_embedding
else:
inp_embedding = self.inp_token_embedding
outp_embedding = self.outp_token_embedding
# Each step reads caches[:step] and tokens[step:next_step] and updates
# tokens[next_step], logits[next_step] and caches[step:next_step].
# On the first step, step=0, next_step=prompt_len. On subsequent steps
# next_step = step + 1.
def loop_body(step, caches, tokens, logits, is_prompt=False):
if is_prompt:
assert step == 0
x = tf.gather(inp_embedding, tf.transpose(tokens[:prompt_len]))
x = x + | |
sections.
# calculate talpha_fw and bw for attenuation
if transient_asym_att_x:
if np.any(matching_indices):
ta = p_sol[1 + 2 * nt + ix_from_cal_match_to_glob.size:].reshape(
(nt, 2, nta), order='F')
ta_var = p_var[1 + 2 * nt
+ ix_from_cal_match_to_glob.size:].reshape(
(nt, 2, nta), order='F')
else:
ta = p_sol[2 * nt + nx_sec:].reshape((nt, 2, nta), order='F')
ta_var = p_var[2 * nt + nx_sec:].reshape((nt, 2, nta), order='F')
talpha_fw = ta[:, 0, :]
talpha_bw = ta[:, 1, :]
talpha_fw_var = ta_var[:, 0, :]
talpha_bw_var = ta_var[:, 1, :]
else:
talpha_fw = None
talpha_bw = None
talpha_fw_var = None
talpha_bw_var = None
# put E outside of reference section in solution
# concatenating makes a copy of the data instead of using a pointer
ds_sub = ds[['st', 'ast', 'rst', 'rast']]
time_dim = ds_sub.get_time_dim()
ds_sub['df'] = ((time_dim,), p_sol[1:1 + nt])
ds_sub['df_var'] = ((time_dim,), p_var[1:1 + nt])
ds_sub['db'] = ((time_dim,), p_sol[1 + nt:1 + 2 * nt])
ds_sub['db_var'] = ((time_dim,), p_var[1 + nt:1 + 2 * nt])
E_all_exact, E_all_var_exact = calc_alpha_double(
'exact',
ds_sub,
st_var,
ast_var,
rst_var,
rast_var,
'df',
'db',
'df_var',
'db_var',
ix_alpha_is_zero=ix_alpha_is_zero,
transient_asym_att_x=transient_asym_att_x,
talpha_fw=talpha_fw,
talpha_bw=talpha_bw,
talpha_fw_var=talpha_fw_var,
talpha_bw_var=talpha_bw_var)
if np.any(matching_indices):
p_sol_size = 1 + 2 * nt + ix_from_cal_match_to_glob.size + 2 * nt * nta
else:
p_sol_size = 1 + 2 * nt + (nx_sec - 1) + 2 * nt * nta
assert p_sol.size == p_sol_size
assert p_var.size == p_sol_size
if np.any(matching_indices):
po_sol = np.concatenate(
(
p_sol[:1 + 2 * nt], E_all_exact,
p_sol[1 + 2 * nt + ix_from_cal_match_to_glob.size:]))
po_sol[1 + 2 * nt + ix_from_cal_match_to_glob] = \
p_sol[1 + 2 * nt:1 + 2 * nt + ix_from_cal_match_to_glob.size]
else:
po_sol = np.concatenate(
(p_sol[:1 + 2 * nt], E_all_exact, p_sol[2 * nt + nx_sec:]))
po_sol[1 + 2 * nt + ix_sec[1:]] = p_sol[1 + 2 * nt:2 * nt + nx_sec]
po_sol[1 + 2 * nt + ix_sec[0]] = 0. # per definition
if np.any(matching_indices):
po_var = np.concatenate(
(
p_var[:1 + 2 * nt], E_all_var_exact,
p_var[1 + 2 * nt + ix_from_cal_match_to_glob.size:]))
po_var[1 + 2 * nt + ix_from_cal_match_to_glob] = \
p_var[1 + 2 * nt:1 + 2 * nt + ix_from_cal_match_to_glob.size]
else:
po_var = np.concatenate(
(p_var[:1 + 2 * nt], E_all_var_exact, p_var[2 * nt + nx_sec:]))
po_var[1 + 2 * nt + ix_sec[1:]] = p_var[1 + 2 * nt:2 * nt + nx_sec]
po_var[1 + 2 * nt + ix_sec[0]] = 0. # per definition
if calc_cov:
# the COV can be expensive to compute (in the least squares routine)
po_cov = np.diag(po_var).copy()
if np.any(matching_indices):
from_i = np.concatenate(
(
np.arange(1 + 2 * nt),
1 + 2 * nt + ix_from_cal_match_to_glob,
np.arange(
1 + 2 * nt + ix_from_cal_match_to_glob.size, 1 + 2 * nt
+ ix_from_cal_match_to_glob.size + nta * nt * 2)))
else:
from_i = np.concatenate(
(
np.arange(1 + 2 * nt), 1 + 2 * nt + ix_sec[1:],
np.arange(
1 + 2 * nt + nx_sec,
1 + 2 * nt + nx_sec + nta * nt * 2)))
iox_sec1, iox_sec2 = np.meshgrid(from_i, from_i, indexing='ij')
po_cov[iox_sec1, iox_sec2] = p_cov
return po_sol, po_var, po_cov
else:
return po_sol, po_var
def matching_section_location_indices(ix_sec, hix, tix):
# contains all indices of the entire fiber that either are used for
# calibrating to reference temperature or for matching sections. Is sorted.
ix_cal_match = np.unique(np.concatenate((ix_sec, hix, tix)))
# number of locations of interest, width of the section of interest.
nx_cal_match = ix_cal_match.size
# indices in the section of interest. Including E0.
ix_sec2 = np.searchsorted(ix_cal_match, ix_sec)
# indices in the section of interest. Excluding E0
# ix_E0 mask - to exclude E[ix_sec[0]] from the E matrices
ix_E0_mask = np.array(
[ix for ix in range(nx_cal_match) if ix != ix_sec2[0]])
# contains the global coordinate indices of the E
ix_from_cal_match_to_glob = ix_cal_match[ix_E0_mask]
return ix_from_cal_match_to_glob
def construct_submatrices_matching_sections(
x, ix_sec, hix, tix, nt, transient_asym_att_x):
"""
For all matching indices, where subscript 1 refers to the indices in
`hix` and subscript 2 refers to the indices in `tix`.
F1 - F2 = E2 - E1 + TAF2 - TAF1 # EQ1
B1 - B2 = E1 - E2 + TAB2 - TAB1 # EQ2
For matching indices (`hix` and `tix`) that are outside of the reference
sections an additional equation is needed for `E` per time step.
(B3 - F3) / 2 = E3 + (df-db) / 2 + (TAF3 - TAB3) / 2 # EQ3
where subscript 3 refers an a hix or a tix that is not in a reference
section.
Note that E[ix_sec[0]] = 0, and not included in the parameters. Dealt
with by first assuming it is a parameter, then remove it from coefficent
matrices. Note that indices _sec2 contain E[ix_sec[0]]
Ordering when unpaking square matrix: nt observations for location 1 then
nt observations for location 2.
# ix of Observations and weights
# ix_y_eq1_f1 = hix
# ix_y_eq1_f2 = tix
# ix_y_eq2_b1 = hix
# ix_y_eq2_b2 = tix
# ix_y_eq3 = ix_match_not_cal
Parameters
----------
x : array-like of float
coordinates along the fiber, needed to create the matrices for
transient attenuation.
ix_sec : array-like of int
hix : array-like of int
tix : array-like of int
nt : int
Returns
-------
"""
# contains all indices of the entire fiber that either are using for
# calibrating to reference temperature or for matching sections. Is sorted.
ix_cal_match = np.unique(np.concatenate((ix_sec, hix, tix)))
# subscript 3 in doc-eqns
ix_match_not_cal = np.array(
[ix for ix in ix_cal_match if ix not in ix_sec])
# number of locations of interest, width of the section of interest.
nx_cal_match = ix_cal_match.size
npair = len(hix)
# indices in the section of interest.
ix_match_not_cal_sec2 = np.searchsorted(ix_cal_match, ix_match_not_cal)
# indices in the section of interest. Including E0.
ix_sec2 = np.searchsorted(ix_cal_match, ix_sec)
hix_sec2 = np.searchsorted(ix_cal_match, hix) # subscript 1 in doc-eqns
tix_sec2 = np.searchsorted(ix_cal_match, tix) # subscript 2 in doc-eqns
# indices in the section of interest. Excluding E0
# ix_E0 mask - to exclude E[ix_sec[0]] from the E matrices
ix_E0_mask = np.array(
[ix for ix in range(nx_cal_match) if ix != ix_sec2[0]])
# contains the global coordinate indices of the E
ix_from_cal_match_to_glob = ix_cal_match[ix_E0_mask]
# E in EQ1
data = np.ones(nt * npair, dtype=float)
row = np.arange(nt * npair, dtype=int)
col1 = np.repeat(hix_sec2, nt)
col2 = np.repeat(tix_sec2, nt)
E_match_F = sp.coo_matrix(
(
np.concatenate((-data, data)),
(np.concatenate((row, row)), np.concatenate((col1, col2)))),
shape=(nt * npair, nx_cal_match),
copy=False).tocsr(copy=False)[:, ix_E0_mask].tocoo()
Zero_eq12_gamma = sp.coo_matrix(([], ([], [])), shape=(nt * npair, 1))
Zero_d_eq12 = sp.coo_matrix(([], ([], [])), shape=(nt * npair, 2 * nt))
# E in EQ2
data = np.ones(nt * npair, dtype=float)
row = np.arange(nt * npair, dtype=int)
col1 = np.repeat(hix_sec2, nt)
col2 = np.repeat(tix_sec2, nt)
E_match_B = sp.coo_matrix(
(
np.concatenate((data, -data)),
(np.concatenate((row, row)), np.concatenate((col1, col2)))),
shape=(nt * npair, nx_cal_match),
copy=False).tocsr(copy=False)[:, ix_E0_mask].tocoo()
# E in EQ3
nx_nm = ix_match_not_cal_sec2.size
data = np.ones(nt * nx_nm, dtype=float)
row = np.arange(nt * nx_nm, dtype=int)
col = np.repeat(ix_match_not_cal_sec2, nt)
E_match_no_cal = sp.coo_matrix(
(data, (row, col)), shape=(nt * nx_nm, nx_cal_match),
copy=False).tocsr(copy=False)[:, ix_E0_mask].tocoo()
# DF and DB in EQ3
data = np.ones(nt * nx_nm, dtype=float) / 2
row = np.arange(nt * nx_nm, dtype=int)
colf = np.tile(np.arange(nt, dtype=int), nx_nm)
colb = np.tile(np.arange(nt, 2 * nt, dtype=int), nx_nm)
d_no_cal = sp.coo_matrix(
(
np.concatenate((data, -data)),
(np.concatenate((row, row)), np.concatenate((colf, colb)))),
shape=(nt * nx_nm, 2 * nt),
copy=False)
Zero_eq3_gamma = sp.coo_matrix(([], ([], [])), shape=(nt * nx_nm, 1))
# TA
if transient_asym_att_x:
# unpublished BdT
TA_eq1_list = list()
TA_eq2_list = list()
TA_eq3_list = list()
for transient_asym_att_xi in transient_asym_att_x:
"""For forward direction."""
# first index on the right hand side a the difficult splice
# Deal with connector outside of fiber
if transient_asym_att_xi >= x[-1]:
ix_ta_ix0 = x.size
elif transient_asym_att_xi <= x[0]:
ix_ta_ix0 = 0
else:
ix_ta_ix0 = np.flatnonzero(x >= transient_asym_att_xi)[0]
# TAF1 and TAF2 in EQ1
data_taf = np.repeat(
-np.array(hix >= ix_ta_ix0, dtype=float)
+ np.array(tix >= ix_ta_ix0, dtype=float), nt)
row_taf = np.arange(nt * npair)
col_taf = np.tile(np.arange(nt, dtype=int), npair)
mask_taf = data_taf.astype(
bool) # only store non-zeros in sparse m
TA_eq1_list.append(
| |
"""Corresponds to IDD field `Moisture Content 19`"""
self["Moisture Content 19"] = value
@property
def liquid_transport_coefficient_19(self):
"""field `Liquid Transport Coefficient 19`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 19`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_19` or None if not set
"""
return self["Liquid Transport Coefficient 19"]
@liquid_transport_coefficient_19.setter
def liquid_transport_coefficient_19(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 19`"""
self["Liquid Transport Coefficient 19"] = value
@property
def moisture_content_20(self):
"""field `Moisture Content 20`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 20`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_20` or None if not set
"""
return self["Moisture Content 20"]
@moisture_content_20.setter
def moisture_content_20(self, value=None):
"""Corresponds to IDD field `Moisture Content 20`"""
self["Moisture Content 20"] = value
@property
def liquid_transport_coefficient_20(self):
"""field `Liquid Transport Coefficient 20`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 20`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_20` or None if not set
"""
return self["Liquid Transport Coefficient 20"]
@liquid_transport_coefficient_20.setter
def liquid_transport_coefficient_20(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 20`"""
self["Liquid Transport Coefficient 20"] = value
@property
def moisture_content_21(self):
"""field `Moisture Content 21`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 21`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_21` or None if not set
"""
return self["Moisture Content 21"]
@moisture_content_21.setter
def moisture_content_21(self, value=None):
"""Corresponds to IDD field `Moisture Content 21`"""
self["Moisture Content 21"] = value
@property
def liquid_transport_coefficient_21(self):
"""field `Liquid Transport Coefficient 21`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 21`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_21` or None if not set
"""
return self["Liquid Transport Coefficient 21"]
@liquid_transport_coefficient_21.setter
def liquid_transport_coefficient_21(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 21`"""
self["Liquid Transport Coefficient 21"] = value
@property
def moisture_content_22(self):
"""field `Moisture Content 22`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 22`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_22` or None if not set
"""
return self["Moisture Content 22"]
@moisture_content_22.setter
def moisture_content_22(self, value=None):
"""Corresponds to IDD field `Moisture Content 22`"""
self["Moisture Content 22"] = value
@property
def liquid_transport_coefficient_22(self):
"""field `Liquid Transport Coefficient 22`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 22`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_22` or None if not set
"""
return self["Liquid Transport Coefficient 22"]
@liquid_transport_coefficient_22.setter
def liquid_transport_coefficient_22(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 22`"""
self["Liquid Transport Coefficient 22"] = value
@property
def moisture_content_23(self):
"""field `Moisture Content 23`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 23`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_23` or None if not set
"""
return self["Moisture Content 23"]
@moisture_content_23.setter
def moisture_content_23(self, value=None):
"""Corresponds to IDD field `Moisture Content 23`"""
self["Moisture Content 23"] = value
@property
def liquid_transport_coefficient_23(self):
"""field `Liquid Transport Coefficient 23`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 23`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_23` or None if not set
"""
return self["Liquid Transport Coefficient 23"]
@liquid_transport_coefficient_23.setter
def liquid_transport_coefficient_23(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 23`"""
self["Liquid Transport Coefficient 23"] = value
@property
def moisture_content_24(self):
"""field `Moisture Content 24`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 24`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_24` or None if not set
"""
return self["Moisture Content 24"]
@moisture_content_24.setter
def moisture_content_24(self, value=None):
"""Corresponds to IDD field `Moisture Content 24`"""
self["Moisture Content 24"] = value
@property
def liquid_transport_coefficient_24(self):
"""field `Liquid Transport Coefficient 24`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 24`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_24` or None if not set
"""
return self["Liquid Transport Coefficient 24"]
@liquid_transport_coefficient_24.setter
def liquid_transport_coefficient_24(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 24`"""
self["Liquid Transport Coefficient 24"] = value
@property
def moisture_content_25(self):
"""field `Moisture Content 25`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 25`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_25` or None if not set
"""
return self["Moisture Content 25"]
@moisture_content_25.setter
def moisture_content_25(self, value=None):
"""Corresponds to IDD field `Moisture Content 25`"""
self["Moisture Content 25"] = value
@property
def liquid_transport_coefficient_25(self):
"""field `Liquid Transport Coefficient 25`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 25`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_25` or None if not set
"""
return self["Liquid Transport Coefficient 25"]
@liquid_transport_coefficient_25.setter
def liquid_transport_coefficient_25(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 25`"""
self["Liquid Transport Coefficient 25"] = value
class MaterialPropertyHeatAndMoistureTransferRedistribution(DataObject):
""" Corresponds to IDD object `MaterialProperty:HeatAndMoistureTransfer:Redistribution`
HeatBalanceAlgorithm = CombinedHeatAndMoistureFiniteElement solution algorithm only.
Relationship between liquid transport coefficient and moisture content
Has no effect with other HeatBalanceAlgorithm solution algorithms
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'material name',
{'name': u'Material Name',
'pyname': u'material_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'number of redistribution points',
{'name': u'Number of Redistribution points',
'pyname': u'number_of_redistribution_points',
'maximum': 25,
'required-field': True,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'}),
(u'moisture content 1',
{'name': u'Moisture Content 1',
'pyname': u'moisture_content_1',
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'liquid transport coefficient 1',
{'name': u'Liquid Transport Coefficient 1',
'pyname': u'liquid_transport_coefficient_1',
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm2/s'}),
(u'moisture content 2',
{'name': u'Moisture Content 2',
'pyname': u'moisture_content_2',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'liquid transport coefficient 2',
{'name': u'Liquid Transport Coefficient 2',
'pyname': u'liquid_transport_coefficient_2',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm2/s'}),
(u'moisture content 3',
{'name': u'Moisture Content 3',
'pyname': u'moisture_content_3',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'liquid transport coefficient 3',
{'name': u'Liquid Transport Coefficient 3',
'pyname': u'liquid_transport_coefficient_3',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm2/s'}),
(u'moisture content 4',
{'name': u'Moisture Content 4',
'pyname': u'moisture_content_4',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'liquid transport coefficient 4',
{'name': u'Liquid Transport Coefficient 4',
'pyname': u'liquid_transport_coefficient_4',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm2/s'}),
(u'moisture content 5',
{'name': u'Moisture Content 5',
'pyname': u'moisture_content_5',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'liquid transport coefficient 5',
{'name': u'Liquid Transport Coefficient 5',
'pyname': u'liquid_transport_coefficient_5',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm2/s'}),
(u'moisture content 6',
{'name': u'Moisture Content 6',
'pyname': u'moisture_content_6',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'liquid transport coefficient 6',
{'name': u'Liquid Transport Coefficient 6',
'pyname': u'liquid_transport_coefficient_6',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm2/s'}),
(u'moisture content 7',
{'name': u'Moisture Content 7',
'pyname': u'moisture_content_7',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'liquid transport coefficient 7',
{'name': u'Liquid Transport Coefficient 7',
'pyname': u'liquid_transport_coefficient_7',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm2/s'}),
(u'moisture content 8',
{'name': u'Moisture Content 8',
'pyname': u'moisture_content_8',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'liquid transport coefficient 8',
{'name': u'Liquid Transport Coefficient 8',
'pyname': u'liquid_transport_coefficient_8',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'm2/s'}),
(u'moisture content 9',
{'name': u'Moisture Content 9',
'pyname': u'moisture_content_9',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
| |
<reponame>xxdreck/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for smu_utils_lib."""
import copy
import os
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
from rdkit import Chem
from google.protobuf import text_format
from smu import dataset_pb2
from smu.parser import smu_parser_lib
from smu.parser import smu_utils_lib
MAIN_DAT_FILE = 'x07_sample.dat'
STAGE1_DAT_FILE = 'x07_stage1.dat'
TESTDATA_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata')
def str_to_bond_topology(s):
bt = dataset_pb2.BondTopology()
text_format.Parse(s, bt)
return bt
def get_stage1_conformer():
parser = smu_parser_lib.SmuParser(
os.path.join(TESTDATA_PATH, STAGE1_DAT_FILE))
conformer, _ = next(parser.process_stage1())
return conformer
def get_stage2_conformer():
parser = smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH, MAIN_DAT_FILE))
conformer, _ = next(parser.process_stage2())
return conformer
class SpecialIDTest(absltest.TestCase):
def test_from_dat_id(self):
self.assertIsNone(
smu_utils_lib.special_case_bt_id_from_dat_id(123456, 'CC'))
self.assertEqual(
smu_utils_lib.special_case_bt_id_from_dat_id(999998, 'O'), 899650)
self.assertEqual(
smu_utils_lib.special_case_bt_id_from_dat_id(0, 'O'), 899650)
with self.assertRaises(ValueError):
smu_utils_lib.special_case_bt_id_from_dat_id(0, 'NotASpecialCaseSmiles')
def test_from_bt_id(self):
self.assertIsNone(smu_utils_lib.special_case_dat_id_from_bt_id(123456))
self.assertEqual(
smu_utils_lib.special_case_dat_id_from_bt_id(899651), 999997)
class GetCompositionTest(absltest.TestCase):
def test_simple(self):
bt = dataset_pb2.BondTopology()
bt.atoms.extend([
dataset_pb2.BondTopology.ATOM_C, dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_N, dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H, dataset_pb2.BondTopology.ATOM_H
])
self.assertEqual('x03_c2nh3', smu_utils_lib.get_composition(bt))
class GetCanonicalStoichiometryWithHydrogensTest(absltest.TestCase):
def test_cyclobutane(self):
bt = smu_utils_lib.create_bond_topology('CCCC', '110011', '2222')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)4')
def test_ethylene(self):
bt = smu_utils_lib.create_bond_topology('CC', '2', '22')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)2')
def test_acrylic_acid(self):
bt = smu_utils_lib.create_bond_topology('CCCOO', '2000100210', '21001')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(c)(ch)(ch2)(o)(oh)')
def test_fluorine(self):
bt = smu_utils_lib.create_bond_topology('OFF', '110', '000')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(o)(f)2')
def test_fully_saturated(self):
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('C', '', '4')), '(ch4)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('N', '', '3')), '(nh3)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('O', '', '2')), '(oh2)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('F', '', '1')), '(fh)')
def test_nplus_oneg(self):
bt = smu_utils_lib.create_bond_topology('NO', '1', '30')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(nh3)(o)')
class ParseBondTopologyTest(absltest.TestCase):
def test_4_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 4 N+O O O- 010110 3000')
self.assertEqual(num_atoms, 4)
self.assertEqual(atoms_str, 'N+O O O-')
self.assertEqual(matrix, '010110')
self.assertEqual(hydrogens, '3000')
def test_7_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 7 N+O O O O-F F 001011101001000000000 1000000')
self.assertEqual(num_atoms, 7)
self.assertEqual(atoms_str, 'N+O O O O-F F ') # Note the trailing space
self.assertEqual(matrix, '001011101001000000000')
self.assertEqual(hydrogens, '1000000')
class CreateBondTopologyTest(absltest.TestCase):
def test_no_charged(self):
got = smu_utils_lib.create_bond_topology('CNFF', '111000', '1200')
expected_str = """
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_F
atoms: ATOM_F
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 6
bond_type: BOND_SINGLE
}
"""
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_charged(self):
# This is actually C N N+O-
got = smu_utils_lib.create_bond_topology('CNNO', '200101', '2020')
expected_str = """
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 6
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 7
bond_type: BOND_SINGLE
}
"""
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_one_heavy(self):
got = smu_utils_lib.create_bond_topology('C', '', '4')
expected_str = """
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
"""
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
class FromCSVTest(absltest.TestCase):
def test_basic(self):
infile = tempfile.NamedTemporaryFile(mode='w', delete=False)
infile.write(
'id,num_atoms,atoms_str,connectivity_matrix,hydrogens,smiles\n')
infile.write('68,3,C N+O-,310,010,[NH+]#C[O-]\n')
infile.write('134,4,N+O-F F ,111000,1000,[O-][NH+](F)F\n')
infile.close()
out = smu_utils_lib.generate_bond_topologies_from_csv(infile.name)
bt = next(out)
self.assertEqual(68, bt.bond_topology_id)
self.assertLen(bt.atoms, 4)
self.assertEqual(bt.smiles, '[NH+]#C[O-]')
bt = next(out)
self.assertEqual(134, bt.bond_topology_id)
self.assertLen(bt.atoms, 5)
self.assertEqual(bt.smiles, '[O-][NH+](F)F')
class ParseDuplicatesFileTest(absltest.TestCase):
def test_basic(self):
df = smu_utils_lib.parse_duplicates_file(
os.path.join(TESTDATA_PATH, 'small.equivalent_isomers.dat'))
pd.testing.assert_frame_equal(
pd.DataFrame(
columns=[
'name1', 'stoich1', 'btid1', 'shortconfid1', 'confid1', 'name2',
'stoich2', 'btid2', 'shortconfid2', 'confid2'
],
data=[
[
'x07_c2n2o2fh3.224227.004', 'c2n2o2fh3', 224227, 4,
224227004, 'x07_c2n2o2fh3.224176.005', 'c2n2o2fh3', 224176,
5, 224176005
],
[
'x07_c2n2o2fh3.260543.005', 'c2n2o2fh3', 260543, 5,
260543005, 'x07_c2n2o2fh3.224050.001', 'c2n2o2fh3', 224050,
1, 224050001
],
]),
df,
check_like=True)
class BondTopologyToMoleculeTest(absltest.TestCase):
def test_o2(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
""")
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('O=O', Chem.MolToSmiles(got))
def test_methane(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
""")
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('[H]C([H])([H])[H]', Chem.MolToSmiles(got))
# This molecule is an N+ central atom, bonded to C (triply), O-, and F
def test_charged_molecule(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_C
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_F
bonds {
atom_b: 1
bond_type: BOND_TRIPLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 3
bond_type: BOND_SINGLE
}
""")
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('C#[N+]([O-])F', Chem.MolToSmiles(got))
class ConformerToMoleculeTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
# We'll make a new initial_geometry which is just the current one with all
# coordinates multiplied by 1000
self.conformer.initial_geometries.append(
self.conformer.initial_geometries[0])
new_geom = self.conformer.initial_geometries[1]
for atom_pos in new_geom.atom_positions:
atom_pos.x = atom_pos.x * 1000
atom_pos.y = atom_pos.y * 1000
atom_pos.z = atom_pos.z * 1000
# For the extra bond_topology, we'll just copy the existing one and change
# the id. Through the dumb luck of the molecule we picked there's not a
# simple way to make this a new bond topology and still have it look valid
# to RDKit
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[1].bond_topology_id = 99999
def test_all_outputs(self):
mols = list(smu_utils_lib.conformer_to_molecules(self.conformer))
self.assertLen(mols, 6) # 2 bond topologies * (1 opt geom + 2 init_geom)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
'SMU 618451001 bt=618451(0/2) geom=opt',
'SMU 618451001 bt=99999(1/2) geom=init(0/2)',
'SMU 618451001 bt=99999(1/2) geom=init(1/2)',
'SMU 618451001 bt=99999(1/2) geom=opt'
])
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[4], kekuleSmiles=True, isomericSmiles=False))
def test_initial_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=True,
include_optimized_geometry=False,
include_all_bond_topologies=False))
self.assertLen(mols, 2)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
])
# This is just one random atom I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.6643, -3.470301, 3.4766],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('C', mols[1].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([664.299998, -3470.300473, 3476.600215],
list(mols[1].GetConformer().GetAtomPosition(1)),
atol=1e-6)
def test_optimized_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=False,
include_optimized_geometry=True,
include_all_bond_topologies=False))
self.assertLen(mols, 1)
self.assertEqual(
mols[0].GetProp('_Name'),
'SMU 618451001 bt=618451(0/2) geom=opt',
)
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
# This is just two random atoms I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.540254, -3.465543, 3.456982],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('H', mols[0].GetAtomWithIdx(13).GetSymbol())
np.testing.assert_allclose([2.135153, -1.817366, 0.226376],
list(mols[0].GetConformer().GetAtomPosition(13)),
atol=1e-6)
class SmilesCompareTest(absltest.TestCase):
def test_string_format(self):
# for some simplicity later on, we use shorter names
self.assertEqual('MISSING', str(smu_utils_lib.SmilesCompareResult.MISSING))
self.assertEqual('MISMATCH',
str(smu_utils_lib.SmilesCompareResult.MISMATCH))
self.assertEqual('MATCH', str(smu_utils_lib.SmilesCompareResult.MATCH))
def test_missing(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
""")
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISSING, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'O=O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
def test_mismatch(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
smiles: "BlahBlahBlah"
""")
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISMATCH, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
def test_matched_and_h_stripping(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
smiles: "O"
""")
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MATCH, result)
self.assertEqual('[H]O[H]', with_h)
self.assertEqual('O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'[H]O[H]',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
self.assertEqual(
'O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=False))
def test_compute_smiles_from_molecule_no_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
# This is expected. Even with include_hs=True, if there were no Hs in the
# molecule, they will not be in the smiles.
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True), 'COF')
def test_compute_smiles_from_molecule_with_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)
mol = Chem.AddHs(mol)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True),
'[H]C([H])([H])OF')
def test_compute_smiles_from_molecule_special_case(self):
mol = Chem.MolFromSmiles('C12=C3C4=C1C4=C23', sanitize=False)
# Double check that this really is the special case -- we get back the
# SMILES we put in even though it's not the one we want.
self.assertEqual('C12=C3C4=C1C4=C23',
Chem.MolToSmiles(mol, kekuleSmiles=True))
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False),
'C12=C3C1=C1C2=C31')
def test_compute_smiles_from_molecule_labeled_with_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][N+:1]([H:2])([H:3])[N:4]([H:5])[O:6][C:7]([H:8])([H:9])[F:10]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=True, labeled_atoms=True))
def test_compute_smiles_from_molecule_labeled_no_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][NH2+:1][NH:2][O:3][CH2:4][F:5]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=False, labeled_atoms=True))
class MergeConformersTest(absltest.TestCase):
def setUp(self):
super().setUp()
# We are relying on the fact that the first conformer in both x07_sample.dat
# and x07_stage1.dat are the same.
self.stage1_conformer = get_stage1_conformer()
self.stage2_conformer = get_stage2_conformer()
self.duplicate_conformer = dataset_pb2.Conformer()
self.duplicate_conformer.conformer_id = self.stage1_conformer.conformer_id
# A real duplicate conformer wouldn't have both of these fields filled in,
# but it's fine for the test to make | |
<reponame>richardk53/gluon-ts
from typing import Sequence, Optional, Union, Tuple
from dataclasses import dataclass
from box import Box
import numpy as np
import torch
from torch import nn
from torch.distributions import MultivariateNormal
from utils.utils import (
make_inv_tril_parametrization,
make_inv_tril_parametrization_from_cholesky,
TensorDims,
)
from torch_extensions.ops import matvec
from inference.analytical_gausian_linear.inference_sequence_inhomogenous import (
filter_forward,
smooth_forward_backward,
loss_em,
)
from inference.analytical_gausian_linear.inference_step import (
filter_forward_predictive_distribution,
filter_forward_prediction_step,
filter_forward_measurement_step,
smooth_backward_step,
)
from models.base_gls import (
ControlInputs,
Latents,
Prediction,
GLSVariables,
)
from models.base_amortized_gls import BaseAmortizedGaussianLinearSystem
from models.gls_parameters.gls_parameters import GLSParameters
from torch_extensions.distributions.parametrised_distribution import (
ParametrisedMultivariateNormal,
)
@dataclass
class ControlInputsKVAE(ControlInputs):
encoder: torch.Tensor
@dataclass
class GLSVariablesKVAE(GLSVariables):
auxiliary: torch.Tensor
rnn_state: [torch.Tensor, Sequence[Tuple[torch.Tensor, torch.Tensor]]]
m_auxiliary_variational: [torch.Tensor, None]
V_auxiliary_variational: [torch.Tensor, None]
@dataclass
class LatentsKVAE(Latents):
variables: GLSVariablesKVAE
def __post_init__(self):
if hasattr(super(), "__post_init__"):
super().__post_init__()
assert isinstance(self.variables, GLSVariablesKVAE)
class KalmanVariationalAutoEncoder(BaseAmortizedGaussianLinearSystem):
def __init__(
self,
*args,
n_auxiliary: int,
measurement_model: nn.Module,
rnn_switch_model: nn.RNNCellBase,
reconstruction_weight: float = 1.0,
rao_blackwellized: bool = False,
**kwargs,
):
kwargs.update({"n_ctrl_target": None})
super().__init__(*args, **kwargs)
self.n_auxiliary = n_auxiliary
self.measurement_model = measurement_model
self.rnn_switch_model = rnn_switch_model
self.reconstruction_weight = reconstruction_weight
self.rao_blackwellized = rao_blackwellized
self.z_initial = torch.nn.Parameter(torch.zeros(self.n_auxiliary))
def filter_step(
self,
lats_tm1: (LatentsKVAE, None),
tar_t: torch.Tensor,
ctrl_t: ControlInputs,
tar_is_obs_t: Optional[torch.Tensor] = None,
) -> LatentsKVAE:
is_initial_step = lats_tm1 is None
if tar_is_obs_t is None:
tar_is_obs_t = torch.ones(
tar_t.shape[:-1], dtype=tar_t.dtype, device=tar_t.device,
)
# 1) Initial step must prepare previous latents with prior and learnt z.
if is_initial_step:
n_particle, n_batch = self.n_particle, len(tar_t)
state_prior = self.state_prior_model(
None, batch_shape_to_prepend=(n_particle, n_batch),
)
z_init = self.z_initial[None, None].repeat(n_particle, n_batch, 1)
lats_tm1 = LatentsKVAE(
variables=GLSVariablesKVAE(
m=state_prior.loc,
V=state_prior.covariance_matrix,
Cov=None,
x=None,
auxiliary=z_init,
rnn_state=None,
m_auxiliary_variational=None,
V_auxiliary_variational=None,
),
gls_params=None,
)
# 2) Compute GLS params
rnn_state_t, rnn_output_t = self.compute_deterministic_switch_step(
rnn_input=lats_tm1.variables.auxiliary,
rnn_prev_state=lats_tm1.variables.rnn_state,
)
gls_params_t = self.gls_base_parameters(
switch=rnn_output_t, controls=ctrl_t,
)
# Perform filter step:
# 3) Prediction Step: Only for t > 0 and using previous GLS params.
# (In KVAE, they do first update then prediction step.)
if is_initial_step:
mp, Vp, = lats_tm1.variables.m, lats_tm1.variables.V
else:
mp, Vp = filter_forward_prediction_step(
m=lats_tm1.variables.m,
V=lats_tm1.variables.V,
R=lats_tm1.gls_params.R,
A=lats_tm1.gls_params.A,
b=lats_tm1.gls_params.b,
)
# 4) Update step
# 4a) Observed data: Infer pseudo-obs by encoding obs && Bayes update
auxiliary_variational_dist_t = self.encoder(tar_t)
z_infer_t = auxiliary_variational_dist_t.rsample([self.n_particle])
m_infer_t, V_infer_t = filter_forward_measurement_step(
y=z_infer_t,
m=mp,
V=Vp,
Q=gls_params_t.Q,
C=gls_params_t.C,
d=gls_params_t.d,
)
# 4b) Choice: inferred / predicted m, V for observed / missing data.
is_filtered = tar_is_obs_t[None, :].repeat(self.n_particle, 1).byte()
replace_m_fw = is_filtered[:, :, None].repeat(1, 1, mp.shape[2])
replace_V_fw = is_filtered[:, :, None, None].repeat(
1, 1, Vp.shape[2], Vp.shape[3],
)
assert replace_m_fw.shape == m_infer_t.shape == mp.shape
assert replace_V_fw.shape == V_infer_t.shape == Vp.shape
m_t = torch.where(replace_m_fw, m_infer_t, mp)
V_t = torch.where(replace_V_fw, V_infer_t, Vp)
# 4c) Missing Data: Predict pseudo-observations && No Bayes update
mpz_t, Vpz_t = filter_forward_predictive_distribution(
m=m_t, # posterior predictive or one-step-predictive (if missing)
V=V_t,
Q=gls_params_t.Q,
C=gls_params_t.C,
d=gls_params_t.d,
)
auxiliary_predictive_dist_t = MultivariateNormal(
loc=mpz_t, covariance_matrix=Vpz_t,
)
z_gen_t = auxiliary_predictive_dist_t.rsample()
# 4d) Choice: inferred / predicted z for observed / missing data.
# One-step predictive if missing and inferred from encoder otherwise.
replace_z = is_filtered[:, :, None].repeat(1, 1, z_gen_t.shape[2])
z_t = torch.where(replace_z, z_infer_t, z_gen_t)
# 5) Put result in Latents object, used in next iteration
lats_t = LatentsKVAE(
variables=GLSVariablesKVAE(
m=m_t,
V=V_t,
Cov=None,
x=None,
auxiliary=z_t,
rnn_state=rnn_state_t,
m_auxiliary_variational=auxiliary_variational_dist_t.loc,
V_auxiliary_variational=auxiliary_variational_dist_t.covariance_matrix,
),
gls_params=gls_params_t,
)
return lats_t
def smooth_step(
self,
lats_smooth_tp1: (LatentsKVAE, None),
lats_filter_t: (LatentsKVAE, None),
) -> LatentsKVAE:
# use the functional implementation given fixed params and filter dist.
m_sm_t, V_sm_t, Cov_sm_t = smooth_backward_step(
# <-- future smoothing part
m_sm=lats_smooth_tp1.variables.m,
V_sm=lats_smooth_tp1.variables.V,
# --> past / current filter parts
m_fw=lats_filter_t.variables.m,
V_fw=lats_filter_t.variables.V,
# --> forward-generated GLS params (there is no bw for them).
A=lats_filter_t.gls_params.A,
R=lats_filter_t.gls_params.R,
b=lats_filter_t.gls_params.b,
)
# pack into into Latents object.
lats_t = LatentsKVAE(
variables=GLSVariablesKVAE(
m=m_sm_t,
V=V_sm_t,
Cov=Cov_sm_t,
x=None,
auxiliary=lats_filter_t.variables.auxiliary, # from forward
rnn_state=lats_filter_t.variables.rnn_state, # from forward
m_auxiliary_variational=lats_filter_t.variables.m_auxiliary_variational,
V_auxiliary_variational=lats_filter_t.variables.V_auxiliary_variational,
),
gls_params=lats_filter_t.gls_params, # from forward
)
return lats_t
def sample_step(
self,
lats_tm1: LatentsKVAE,
ctrl_t: ControlInputsKVAE,
deterministic: bool = False,
) -> Prediction:
first_step = lats_tm1.gls_params is None
if first_step: # from t == 0, i.e. lats_tm1 is t == -1.
n_batch = len(lats_tm1.variables.auxiliary.shape[1])
assert lats_tm1.variables.x is None
assert lats_tm1.variables.m is None
assert lats_tm1.variables.V is None
x_t_dist = self.state_prior_model(
None, batch_shape_to_prepend=(self.n_particle, n_batch)
)
else:
x_t_dist = torch.distributions.MultivariateNormal(
loc=(
matvec(lats_tm1.gls_params.A, lats_tm1.variables.x)
if lats_tm1.gls_params.A is not None
else lats_tm1.variables.x
)
+ (
lats_tm1.gls_params.b
if lats_tm1.gls_params.b is not None
else 0.0
),
scale_tril=lats_tm1.gls_params.LR,
)
rnn_state_t, rnn_output_t = self.compute_deterministic_switch_step(
rnn_input=lats_tm1.variables.auxiliary,
rnn_prev_state=lats_tm1.variables.rnn_state,
)
gls_params_t = self.gls_base_parameters(
switch=rnn_output_t, controls=ctrl_t,
)
x_t = x_t_dist.mean if deterministic else x_t_dist.rsample()
z_t_dist = torch.distributions.MultivariateNormal(
loc=matvec(gls_params_t.C, x_t)
+ (gls_params_t.d if gls_params_t.d is not None else 0.0),
covariance_matrix=gls_params_t.Q,
)
z_t = z_t_dist.mean if deterministic else z_t_dist.rsample()
lats_t = LatentsKVAE(
variables=GLSVariablesKVAE(
m=None,
V=None,
Cov=None,
x=x_t,
auxiliary=z_t,
rnn_state=rnn_state_t,
m_auxiliary_variational=None,
V_auxiliary_variational=None,
),
gls_params=gls_params_t,
)
emission_dist_t = self.emit(lats_t=lats_t, ctrl_t=ctrl_t)
emissions_t = (
emission_dist_t.mean if deterministic else emission_dist_t.sample()
)
return Prediction(latents=lats_t, emissions=emissions_t,)
def loss(
self,
past_targets: [Sequence[torch.Tensor], torch.Tensor],
past_controls: Optional[
Union[Sequence[ControlInputs], ControlInputs]
] = None,
past_targets_is_observed: Optional[
Union[Sequence[torch.Tensor], torch.Tensor]
] = None,
) -> torch.Tensor:
if self.rao_blackwellized:
if past_targets_is_observed is None:
return self._loss_em_rb_efficient(
past_targets=past_targets, past_controls=past_controls,
)
else:
raise NotImplementedError(
"did not yet implement Rao-BW loss with missing data"
)
else:
if past_targets_is_observed is None:
return self._loss_em_mc_efficient(
past_targets=past_targets, past_controls=past_controls,
)
else:
return self._loss_em_mc(
past_targets=past_targets,
past_controls=past_controls,
past_targets_is_observed=past_targets_is_observed,
)
def _loss_em_mc(
self,
past_targets: [Sequence[torch.Tensor], torch.Tensor],
past_controls: Optional[
Union[Sequence[ControlInputs], ControlInputs]
] = None,
past_targets_is_observed: Optional[
Union[Sequence[torch.Tensor], torch.Tensor]
] = None,
) -> torch.Tensor:
"""" Monte Carlo loss as computed in KVAE paper """
n_batch = len(past_targets[0])
past_controls = self._expand_particle_dim(past_controls)
# A) SSM related distributions:
# A1) smoothing.
latents_smoothed = self.smooth(
past_targets=past_targets,
past_controls=past_controls,
past_targets_is_observed=past_targets_is_observed,
)
m = torch.stack([l.variables.m for l in latents_smoothed])
V = torch.stack([l.variables.V for l in latents_smoothed])
z = torch.stack([l.variables.auxiliary for l in latents_smoothed])
state_smoothed_dist = MultivariateNormal(loc=m, covariance_matrix=V)
x = state_smoothed_dist.rsample()
A = torch.stack([l.gls_params.A for l in latents_smoothed])
C = torch.stack([l.gls_params.C for l in latents_smoothed])
LR = torch.stack([l.gls_params.LR for l in latents_smoothed])
LQ = torch.stack([l.gls_params.LQ for l in latents_smoothed])
if latents_smoothed[0].gls_params.B is not None:
B = torch.stack([l.gls_params.B for l in latents_smoothed])
else:
B = None
if latents_smoothed[0].gls_params.D is not None:
D = torch.stack([l.gls_params.D for l in latents_smoothed])
else:
D = None
# A2) prior && posterior transition distribution.
prior_dist = self.state_prior_model(
None, batch_shape_to_prepend=(self.n_particle, n_batch)
)
# # A, B, R are already 0:T-1.
transition_dist = MultivariateNormal(
loc=matvec(A[:-1], x[:-1])
+ (
matvec(B[:-1], past_controls.state[:-1])
if B is not None
else 0.0
),
scale_tril=LR[:-1],
)
# A3) posterior predictive (auxiliary) distribution.
auxiliary_predictive_dist = MultivariateNormal(
loc=matvec(C, x)
+ (matvec(D, past_controls.target) if D is not None else 0.0),
scale_tril=LQ,
)
# A4) SSM related losses
# mean over particle dim, sum over time (after masking), leave batch dim
l_prior = -prior_dist.log_prob(x[0:1]).mean(dim=1).sum(dim=0)
l_transition = -transition_dist.log_prob(x[1:]).mean(dim=1).sum(dim=0)
l_entropy = state_smoothed_dist.log_prob(x).mean(dim=1).sum(dim=0)
_l_aux_timewise = -auxiliary_predictive_dist.log_prob(z).mean(dim=1)
if past_targets_is_observed is not None:
_l_aux_timewise = _l_aux_timewise * past_targets_is_observed
l_auxiliary = _l_aux_timewise.sum(dim=0)
# B) VAE related distributions
# B1) inv_measurement_dist already obtained from smoothing (as we dont want to re-compute)
# B2) measurement (decoder) distribution
# transpose TPBF -> PTBF to broadcast log_prob of y (TBF) correctly
z_particle_first = z.transpose(0, 1)
measurement_dist = self.measurement_model(z_particle_first)
# B3) VAE related losses
# We use z_particle_first for correct broadcasting -> dim=0 is particle.
_l_meas_timewise = -measurement_dist.log_prob(past_targets).mean(dim=0)
if past_targets_is_observed is not None:
_l_meas_timewise = _l_meas_timewise * past_targets_is_observed
l_measurement = _l_meas_timewise.sum(dim=0)
auxiliary_variational_dist = MultivariateNormal(
loc=torch.stack(
[l.variables.m_auxiliary_variational for l in latents_smoothed]
),
covariance_matrix=torch.stack(
[l.variables.V_auxiliary_variational for l in latents_smoothed]
),
)
_l_variational_timewise = auxiliary_variational_dist.log_prob(
z_particle_first
).mean(
dim=0
) # again dim=0 is particle dim here.
if past_targets_is_observed is not None:
_l_variational_timewise = (
_l_variational_timewise * past_targets_is_observed
)
l_inv_measurement = _l_variational_timewise.sum(dim=0)
assert all(
t.shape == l_prior.shape
for t in (
l_prior,
l_transition,
l_auxiliary,
l_measurement,
l_inv_measurement,
)
)
l_total = (
self.reconstruction_weight * l_measurement
+ l_inv_measurement
+ l_auxiliary
+ l_prior
+ l_transition
+ l_entropy
)
return l_total
def emit(
self, lats_t: LatentsKVAE, ctrl_t: ControlInputs,
) -> torch.distributions.Distribution:
return self.measurement_model(lats_t.variables.auxiliary)
def compute_deterministic_switch_sequence(
self, rnn_inputs: torch.Tensor,
) -> Tuple[Sequence[Union[Tuple, torch.Tensor]], torch.Tensor]:
(T, P, B, F,) = rnn_inputs.shape
rnn_inputs_flat = rnn_inputs.reshape([T, P * B, F])
rnn_states = [None] * len(rnn_inputs)
for t in range(len(rnn_inputs)):
rnn_state_flat_t = self.rnn_switch_model(
input=rnn_inputs_flat[t],
hx=rnn_state_flat_t if t > 0 else None,
)
if isinstance(rnn_state_flat_t, Tuple):
rnn_states[t] = tuple(
_h.reshape([P, B, _h.shape[-1]]) for _h in rnn_state_flat_t
)
else:
rnn_states[t] = rnn_state_flat_t.reshape(
[P, B, rnn_state_flat_t.shape[-1]],
)
if isinstance(rnn_states[0], Tuple):
rnn_outputs = torch.stack(
[rnn_states[t][0] for t in range(T)], dim=0
)
else:
rnn_outputs = torch.stack(rnn_states, dim=0)
return | |
# Copyright 2017 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import cmath
import numpy as np
import os
class ParameterExtractor(object):
def __init__(self, parameters):
if type(parameters) is dict:
self.parameters = parameters
self.wanted_words = self.get_param('wanted_words')
self.sample_rate = int(self.get_param('sample_rate'))
self.clip_duration_ms = int(self.get_param('clip_duration_ms'))
self.window_size_ms = int(self.get_param('window_size_ms'))
self.window_stride_ms = int(self.get_param('window_stride_coeff') * self.window_size_ms)
self.time_shift_ms = int(self.get_param('time_shift_ms'))
self.dct_coefficient_count = int(self.get_param('dct_coefficient_count'))
self.data_url = self.get_param('data_url')
self.data_dir = self.get_param('data_dir')
self.valid_dir = self.get_param('valid_dir')
self.silence_percentage = self.get_param('silence_percentage')
self.unknown_percentage = self.get_param('unknown_percentage')
self.validation_percentage = self.get_param('validation_percentage')
self.testing_percentage = self.get_param('testing_percentage')
self.how_many_training_steps = self.get_param('how_many_training_steps')
self.learning_rate = self.get_param('learning_rate')
self.model_architecture = self.get_param('model_architecture')
self.model_size_info = self.get_model_size_info()
self.check_nans = self.get_param('check_nans')
self.start_checkpoint = self.get_param('start_checkpoint')
self.batch_size = int(self.get_param('batch_size'))
self.background_frequency = self.get_param('background_frequency')
self.background_volume = self.get_param('background_volume')
self.eval_step_interval = int(self.get_param('eval_step_interval'))
self.lower_frequency = int(self.get_param('lower_frequency'))
self.upper_frequency = int(self.get_param('upper_frequency'))
self.num_fbank_filters = int(self.get_param('num_fbank_filters'))
self.is_bg_volume_constant = self.get_param('is_bg_volume_constant')
self.feature_extraction = self.get_param('feature_extraction')
self.summaries_dir = os.path.join(self.get_param('work_dir'), 'retrain_logs')
self.train_dir = os.path.join(self.get_param('work_dir'), 'training')
self.include_silence = self.silence_percentage != 0
elif type(parameters) is str:
print("Load json file")
else:
if len(parameters) != 30:
raise Exception("Wrong number of arguments for training function")
self.wanted_words = parameters[0]
self.sample_rate = parameters[1]
self.clip_duration_ms = parameters[2]
self.window_size_ms = parameters[3]
self.window_stride_ms = parameters[4]
self.time_shift_ms = parameters[5]
self.dct_coefficient_count = parameters[6]
self.data_url = parameters[7]
self.data_dir = parameters[8]
self.valid_dir = parameters[9]
self.silence_percentage = parameters[10]
self.unknown_percentage = parameters[11]
self.validation_percentage = parameters[12]
self.testing_percentage = parameters[13]
self.how_many_training_steps = parameters[14]
self.learning_rate = parameters[15]
self.model_architecture = parameters[16]
self.model_size_info = parameters[17]
self.check_nans = parameters[18]
self.summaries_dir = parameters[19]
self.work_dir = parameters[20]
self.start_checkpoint = parameters[21]
self.batch_size = parameters[22]
self.background_frequency = parameters[23]
self.background_volume = parameters[24]
self.eval_step_interval = parameters[25]
self.lower_frequency = parameters[26]
self.num_fbank_filters = parameters[27]
self.is_bg_volume_constant = parameters[28]
self.feature_extraction = parameters[29]
self.summaries_dir = os.path.join(self.work_dir, 'retrain_logs')
self.train_dir = os.path.join(self.work_dir, 'training')
self.include_silence = self.silence_percentage != 0
def get_param(self, param):
if param in self.parameters:
return self.parameters[param]
if param in self.parameters['search_space']:
return self.parameters['search_space'][param]
return None
def get_model_size_info(self):
# set parameters
msi = self.get_param('model_size_info')
num_layers = msi['num_layers']
model_size_info = [num_layers]
for i in range(num_layers):
layer = msi['layers'][i]
model_size_info.append(int(layer['num_channels']))
if 'sx' in layer.keys():
model_size_info.append(int(layer['sx']))
else:
model_size_info.append(3)
if 'sy' in layer.keys():
model_size_info.append(int(layer['sx']))
else:
model_size_info.append(3)
if i == 0:
model_size_info.append(2)
model_size_info.append(2)
else:
model_size_info.append(1)
model_size_info.append(1)
return model_size_info
def find_parameter(parameters, parameter):
if parameter in parameters:
return parameters[parameter]
if parameter in parameters['search_space']:
return parameters['search_space'][parameter]
return None
def write_filter_coeffs(fname, inputSize, sampleRate, numberBands, maxFrequency, minFrequency, width=1):
EarQ = 9.26449
minBW = 24.7
filterSize = numberBands
filterSizeInv = 1.0 / filterSize
bw = EarQ * minBW
filterFrequencies = [0] * filterSize
for i in range(1, filterSize + 1):
filterFrequencies[filterSize - i] = -bw + math.exp(
i * (-1 * math.log(maxFrequency + bw) + math.log(minFrequency + bw)) * filterSizeInv) * (maxFrequency + bw)
filterCoefficients = []
order = 1
fftSize = (inputSize - 1) * 2;
oneJ = complex(0, 1)
ucirc = []
for i in range(inputSize):
ucirc.append(cmath.exp((oneJ * 2.0 * math.pi * i) / fftSize))
sqrP = math.sqrt(3 + pow(2, 1.5))
sqrM = math.sqrt(3 - pow(2, 1.5))
with open(fname, 'a') as f:
f.write("#define FILTER_COEFFS {")
for i in range(filterSize):
cf = filterFrequencies[i]
ERB = width * pow((pow((cf / EarQ), order) + pow(minBW, order)), 1.0 / order)
B = 1.019 * 2 * math.pi * ERB
r = math.exp(-B / sampleRate)
theta = 2 * math.pi * cf / sampleRate
pole = r * cmath.exp(oneJ * theta)
T = 1.0 / sampleRate
GTord = 4
sinCf = math.sin(2 * cf * math.pi * T)
cosCf = math.cos(2 * cf * math.pi * T);
gtCos = 2 * T * cosCf / math.exp(B * T);
gtSin = T * sinCf / math.exp(B * T);
A11 = -(gtCos + 2 * sqrP * gtSin) / 2
A12 = -(gtCos - 2 * sqrP * gtSin) / 2
A13 = -(gtCos + 2 * sqrM * gtSin) / 2
A14 = -(gtCos - 2 * sqrM * gtSin) / 2
zeros = [-A11 / T, -A12 / T, -A13 / T, -A14 / T]
g1 = -2 * cmath.exp(4 * oneJ * cf * math.pi * T) * T
g2 = 2 * cmath.exp(-(B * T) + 2 * oneJ * cf * math.pi * T) * T
cxExp = cmath.exp(4 * oneJ * cf * math.pi * T)
filterGain = abs(
(g1 + g2 * (cosCf - sqrM * sinCf)) *
(g1 + g2 * (cosCf + sqrM * sinCf)) *
(g1 + g2 * (cosCf - sqrP * sinCf)) *
(g1 + g2 * (cosCf + sqrP * sinCf)) /
pow((-2 / cmath.exp(2 * B * T) - 2 * cxExp + 2 * (1 + cxExp) / math.exp(B * T)), 4))
filterCoeffs = []
f.write("{")
for j in range(inputSize):
filterCoeffsElement = (pow(T, 4) / filterGain) * abs(ucirc[j] - zeros[0]) * abs(
ucirc[j] - zeros[1]) * abs(ucirc[j] - zeros[2]) * abs(ucirc[j] - zeros[3]) * pow(
abs((pole - ucirc[j]) * (pole - ucirc[j])), (-GTord))
filterCoeffs.append(filterCoeffsElement)
if j == (inputSize - 1):
f.write('{}'.format(filterCoeffsElement))
else:
f.write('{}, '.format(filterCoeffsElement))
filterCoefficients.append(filterCoeffs)
if i == (filterSize - 1):
f.write("}")
else:
f.write("}, ")
f.write("}\n")
def write_dct_table(fname, inputSize, outputSize):
# void DCT::createDctTableII(int inputSize, int outputSize)
scale0 = 1.0 / math.sqrt(inputSize)
scale1 = math.sqrt(2.0 / inputSize)
dctTable = []
with open(fname, 'a') as f:
f.write("#define DCT_TABLE {")
for i in range(outputSize):
scale = scale1
if i == 0:
scale = scale0
freqMultiplier = math.pi / inputSize * i
dctRow = []
f.write("{")
for j in range(inputSize):
dctElement = scale * math.cos(freqMultiplier * (j + 0.5))
dctRow.append(dctElement)
if j == (inputSize - 1):
f.write("{}".format(dctElement))
else:
f.write("{}, ".format(dctElement))
dctTable.append(dctRow)
if i == (outputSize - 1):
f.write("}")
else:
f.write("}, ")
f.write("} \n")
def write_gfcc_tables(fname, parameters, gfcc_minimums, gfcc_maximums):
number_fbank_filters = find_parameter(parameters, 'num_fbank_filters')
sample_rate = find_parameter(parameters, 'sample_rate')
samples_in_frame = int(find_parameter(parameters, 'window_size_ms') * sample_rate / 1000)
spectrum_length = int(samples_in_frame / 2) + 1
dct_coeff_count = int(find_parameter(parameters, 'dct_coefficient_count'))
low_frequency_bound = int(find_parameter(parameters, 'lower_frequency'))
high_frequency_bound = int(find_parameter(parameters, 'upper_frequency'))
with open(fname, 'w') as f:
f.write('#define NUM_BANDS {}\n'.format(number_fbank_filters))
f.write('#define SPECTRUM_SIZE {}\n'.format(spectrum_length))
f.write('#define NUM_DCT_COEFFS {}\n'.format(dct_coeff_count))
f.write('#define GFCC_MINIMUMS {')
np.array(np.array(gfcc_minimums[:dct_coeff_count]).tofile(f, sep=", ", format="%f"))
f.write('} \n')
f.write('#define GFCC_MAXIMUMS {')
np.array(np.array(gfcc_maximums[:dct_coeff_count]).tofile(f, sep=", ", format="%f"))
f.write('} \n')
write_filter_coeffs(fname, samples_in_frame, sample_rate, number_fbank_filters, high_frequency_bound,
low_frequency_bound)
write_dct_table(fname, number_fbank_filters, dct_coeff_count)
def write_ds_cnn_h_beginning(f, wanted_words, sample_rate, clip_duration_ms,
window_size_ms, window_stride_ms, dct_coefficient_count,
model_size_info, act_max):
f.write("#ifndef __DS_CNN_H__\n")
f.write("#define __DS_CNN_H__\n\n")
f.write('#include "nn.h"\n')
f.write('#include "ds_cnn_weights.h"\n')
f.write('#include "local_NN.h"\n')
f.write('#include "arm_math.h"\n\n')
desired_samples = int(sample_rate * clip_duration_ms / 1000)
window_size_samples = int(sample_rate * window_size_ms / 1000)
window_stride_samples = int(sample_rate * window_stride_ms / 1000)
length_minus_window = (desired_samples - window_size_samples)
if length_minus_window < 0:
spectrogram_length = 0
else:
spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
input_x = dct_coefficient_count
input_y = spectrogram_length
f.write("#define SAMP_FREQ {}\n".format(sample_rate))
f.write("#define FEATURES_DEC_BITS {}\n".format(int(7 - np.log2(act_max[0]))))
f.write("#define FRAME_SHIFT_MS {}\n".format(int(window_stride_ms)))
f.write("#define FRAME_SHIFT ((int16_t)(SAMP_FREQ * 0.001 * FRAME_SHIFT_MS))\n")
f.write("#define NUM_FRAMES {}\n".format(spectrogram_length))
f.write("#define NUM_FEATURES_COEFFS {}\n".format(dct_coefficient_count))
f.write("#define FRAME_LEN_MS {}\n".format(int(window_size_ms)))
f.write("#define FRAME_LEN ((int16_t)(SAMP_FREQ * 0.001 * FRAME_LEN_MS))\n\n")
f.write("#define IN_DIM (NUM_FRAMES*NUM_FEATURES_COEFFS)\n")
f.write("#define OUT_DIM {}\n\n".format(int(len(wanted_words.split(',')) + 1)))
num_layers = model_size_info[0]
i = 1
for layer_no in range(1, num_layers + 1):
f.write("#define CONV{}_OUT_CH {}\n".format(layer_no, model_size_info[i]))
i += 1
ky = model_size_info[i]
i += 1
kx = model_size_info[i]
i += 1
sy = model_size_info[i]
i += 1
sx = model_size_info[i]
out_x = math.ceil(float(input_x) / float(sx))
out_y = math.ceil(float(input_y) / float(sy))
pad_x = max((out_x - 1) * sx + kx - input_x, 0) // 2
pad_y = max((out_y - 1) * sy + ky - input_y, 0) // 2
if layer_no == 1:
f.write("#define CONV1_IN_X NUM_FEATURES_COEFFS\n")
f.write("#define CONV1_IN_Y NUM_FRAMES\n")
f.write("#define CONV{}_KX {}\n".format(layer_no, kx))
f.write("#define CONV{}_KY {}\n".format(layer_no, ky))
f.write("#define CONV{}_SX {}\n".format(layer_no, sx))
f.write("#define CONV{}_SY {}\n".format(layer_no, sy))
f.write("#define CONV{}_PX {}\n".format(layer_no, pad_x))
f.write("#define CONV{}_PY {}\n".format(layer_no, pad_y))
f.write("#define CONV{}_OUT_X {}\n".format(layer_no, out_x))
f.write("#define CONV{}_OUT_Y {}\n".format(layer_no, out_y))
else:
f.write("#define CONV{1}_IN_X CONV{0}_OUT_X\n".format(layer_no - 1, layer_no))
f.write("#define CONV{1}_IN_Y CONV{0}_OUT_Y\n".format(layer_no - 1, layer_no))
f.write("#define CONV{}_DS_KX {}\n".format(layer_no, kx))
f.write("#define CONV{}_DS_KY {}\n".format(layer_no, ky))
f.write("#define CONV{}_DS_SX {}\n".format(layer_no, sx))
| |
from flask import Flask, render_template, request, session, redirect, url_for
import pymysql.cursors
import datetime
from pyecharts import options as opts
from pyecharts.charts import Pie,Bar
from appdef import *
#Get the airline the staff member works for
def getStaffAirline():
username = session['username']
cursor = conn.cursor()
#username is a primary key
query = 'select airline_name from airline_staff where username = %s'
cursor.execute(query, (username))
#fetchall returns an array, each element is a dictionary
airline = cursor.fetchall()[0]['airline_name']
cursor.close()
return airline
#Make sure that the user is actually staff before performing any operations
def authenticateStaff():
username = ""
try:
#could be that there is no user, make sure
username = session['username']
except:
return False
cursor = conn.cursor()
query = 'select * from airline_staff where username=%s'
cursor.execute(query, (username))
data = cursor.fetchall()
cursor.close()
if data:
return True
else:
#Logout before returning error message
session.pop('username')
return False
@app.route('/staffHome')
def staffHome():
if authenticateStaff():
username = session['username']
message = request.args.get('message')
cursor = conn.cursor()
queryGetairline = "SELECT airline_name FROM airline_staff WHERE username= %s"
cursor.execute(queryGetairline, username)
airline_name = cursor.fetchone()['airline_name']
# query top destination for the past 3 months
query1 = "select count(ticket.ticket_id) as cnt, airport.airport_city as city\
from airport,flight,ticket,purchases\
where airport.airport_name = flight.arrival_airport\
and flight.flight_num = ticket.flight_num\
and flight.airline_name = %s\
and purchases.ticket_id = ticket.ticket_id\
and purchases.purchase_date between DATE_SUB(curdate(), INTERVAL 3 MONTH) and curdate()\
group by city \
order by cnt DESC limit 3"
cursor.execute(query1,airline_name)
data1 = cursor.fetchall()
if len(data1)<3:
num = len(data1)
range1 = range(num)
data1 = [data1[i]['city'] for i in range(num)]
else:
range1 = range(3)
data1 = [data1[i]['city'] for i in range(3)]
# query top destination for the past 1 year
query2 = "select count(ticket.ticket_id) as cnt, airport.airport_city as city\
from airport,flight,ticket,purchases\
where airport.airport_name = flight.arrival_airport\
and flight.flight_num = ticket.flight_num\
and flight.airline_name = %s\
and purchases.ticket_id = ticket.ticket_id\
and purchases.purchase_date between DATE_SUB(curdate(), INTERVAL 1 YEAR) and curdate()\
group by city \
order by cnt DESC limit 3"
cursor.execute(query2,airline_name)
data2 = cursor.fetchall()
if len(data2)<3:
num = len(data2)
range2 = range(num)
data2 = [data2[i]['city'] for i in range(num)]
else:
range2 = range(3)
data2 = [data2[i]['city'] for i in range(3)]
cursor.close()
return render_template('staff.html', username=username,
message=message,
destination1 = data1,
destination2 = data2,
range1 = range1,
range2 = range2)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights')
def searchFlightsPage():
if authenticateStaff():
cursor = conn.cursor()
airline = getStaffAirline()
query = "select * from flight where airline_name = %s \
and ((departure_time between curdate() and date_add(curdate(), interval 30 day)) \
or (arrival_time between curdate() and date_add(curdate(), interval 30 day)))"
cursor.execute(query, (airline))
data = cursor.fetchall()
cursor.close()
error = request.args.get('error')
return render_template('searchStaff.html', error=error, results=data)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights/city', methods=['POST'])
def searchFlightsCity():
if authenticateStaff():
cursor = conn.cursor()
city = request.form['citysearchbox']
airline = getStaffAirline()
query = "select * from flight,airport \
where (airport.airport_name=flight.departure_airport or airport.airport_name=flight.arrival_airport) \
and airport.airport_city=%s and airline_name=%s"
cursor.execute(query, (city, airline))
data = cursor.fetchall()
cursor.close()
error = None
if data:
return render_template('searchStaffResults.html', results=data)
else:
#returns an error message to the html page
error = 'No results found'
return redirect(url_for('searchFlightsPage', error=error))
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights/airport', methods=['POST'])
def searchFlightsAirport():
if authenticateStaff():
cursor = conn.cursor()
airport = request.form['airportsearchbox']
airline = getStaffAirline()
query = 'select * from flight where (departure_airport = %s or arrival_airport = %s) and airline_name=%s'
cursor.execute(query, (airport, airport, airline))
data = cursor.fetchall()
cursor.close()
error = None
if data:
return render_template('searchStaffResults.html', results=data)
else:
#returns an error message to the html page
error = 'No results found'
return redirect(url_for('searchFlightsPage', error=error))
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights/date', methods=['POST'])
def searchFlightsDate():
if authenticateStaff():
begintime = request.form['begintime']
endtime = request.form['endtime']
if not validateDates(begintime, endtime):
error = 'Invalid date range'
return redirect(url_for('searchFlightsPage', error=error))
airline = getStaffAirline()
cursor = conn.cursor()
query = "select * from flight \
where ((departure_time between %s and %s) \
or (arrival_time between %s and %s)) and airline_name=%s"
cursor.execute(query, (begintime, endtime, begintime, endtime, airline))
data = cursor.fetchall()
cursor.close()
error = None
if data:
return render_template('searchStaffResults.html', results=data)
else:
#returns an error message to the html page
error = 'No results found'
return redirect(url_for('searchFlightsPage', error=error))
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights/customers', methods=['POST'])
def searchFlightsCustomer():
if authenticateStaff():
flightnum = request.form['flightsearchbox']
airline = getStaffAirline()
cursor = conn.cursor()
query = "select customer_email from purchases natural join ticket\
where flight_num = %s and airline_name=%s"
cursor.execute(query, (flightnum, airline))
data = cursor.fetchall()
cursor.close()
if data:
return render_template('searchStaffResults.html', customerresults=data, flightnum=flightnum)
else:
#returns an error message to the html page
error = 'No results found'
return redirect(url_for('searchFlightsPage', error=error))
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/createFlight')
def createFlightPage():
if authenticateStaff():
airline = getStaffAirline()
cursor = conn.cursor()
airline = getStaffAirline()
query = "select * from flight where airline_name = %s \
and ((departure_time between curdate() and date_add(curdate(), interval 30 day)) \
or (arrival_time between curdate() and date_add(curdate(), interval 30 day)))"
cursor.execute(query, (airline))
data = cursor.fetchall()
cursor = conn.cursor()
query = 'select distinct airport_name from airport'
cursor.execute(query)
airportdata = cursor.fetchall()
query = 'select distinct airplane_id from airplane where airline_name=%s'
cursor.execute(query, (airline))
airplanedata = cursor.fetchall()
cursor.close()
error = request.args.get('error')
return render_template('createFlight.html', error = error,
airportdata = airportdata,
airplanedata = airplanedata,
results = data)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/createFlight/Auth', methods=['POST'])
def createFlight():
# prevent unauthorized users from doing this action
if not authenticateStaff():
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
username = session['username']
flightnum = request.form['flightnum']
departport = request.form['departport']
departtime = request.form['departtime']
arriveport = request.form['arriveport']
arrivetime = request.form['arrivetime']
price = request.form['price']
status = "Upcoming"
airplaneid = request.form['airplanenum']
##########################################################################
airline = getStaffAirline()
cursor = conn.cursor()
query1 = 'select * from flight where airline_name = %s and flight_num = %s'
cursor.execute(query1,(airline,flightnum))
data1 = cursor.fetchall()
if data1:
error = "The flight number already exists, please enter another one."
return redirect(url_for('createFlightPage', error=error))
cursor.close()
#############################################################################
#############################################################################
cursor = conn.cursor()
query2 = 'select * from airport where airport_name = %s '
cursor.execute(query2,(departport))
data2 = cursor.fetchall()
query3 = 'select * from airport where airport_name = %s '
cursor.execute(query3,(arriveport))
data3 = cursor.fetchall()
if (not data2):
error = "The Departure Airport does not exist, please add the airport first."
return redirect(url_for('createFlightPage', error=error))
if (not data3):
error = "The Arrival Airport does not exist, please add the airport first."
return redirect(url_for('createFlightPage', error=error))
cursor.close()
#############################################################################
if not validateDates(departtime, arrivetime):
error = 'Invalid date range'
return redirect(url_for('createFlightPage', error=error))
airline = getStaffAirline()
#Check that airplane is valid
cursor = conn.cursor()
query = 'select * from airplane where airplane_id = %s'
cursor.execute(query, (airplaneid))
data = cursor.fetchall()
if not data:
error = 'Invalid Airplane ID'
return redirect(url_for('createFlightPage', error=error))
query = 'insert into flight values (%s, %s, %s, %s, %s, %s, %s, %s, %s)'
cursor.execute(query, (airline, flightnum, departport, departtime, arriveport, arrivetime, price, status, airplaneid))
conn.commit()
cursor.close()
return redirect(url_for('staffHome', message="Operation Successful"))
@app.route('/staffHome/changeFlight')
def changeFlightStatusPage():
if authenticateStaff():
error = request.args.get('error')
return render_template('changeFlight.html', error=error)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/changeFlight/Auth', methods=['POST'])
def changeFlightStatus():
# prevent unauthorized users from doing this action
if not authenticateStaff():
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
username = session['username']
cursor = conn.cursor()
flightnum = request.form['flightnum']
status = request.form['status']
if not status:
error = 'Did not select new status'
return redirect(url_for('changeFlightStatusPage', error=error))
airline = getStaffAirline()
#Check that the flight is from the same airline as the staff
query = 'select * from flight where flight_num = %s and airline_name = %s'
cursor.execute(query, (flightnum, airline))
data = cursor.fetchall()
##################################################################################
if not data:
error = 'Incorrect enter - flight number is not in your airline '
return redirect(url_for('changeFlightStatusPage', error=error))
##################################################################################
#Update the specified flight
query = 'update flight set status=%s where flight_num=%s and airline_name = %s'
cursor.execute(query, (status, flightnum, airline))
conn.commit()
cursor.close()
return redirect(url_for('staffHome', message="Operation Successful"))
@app.route('/staffHome/addAirplane')
def addAirplanePage():
if authenticateStaff():
error = request.args.get('error')
return render_template('addAirplane.html', error=error)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/addAirplane/confirm', methods=['POST'])
def addAirplane():
# prevent unauthorized users from doing this action
if not authenticateStaff():
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
username = session['username']
planeid = request.form['id']
seats = request.form['seats']
airline = getStaffAirline()
| |
<reponame>esayui/mworks
from contextlib import contextmanager
import inspect
import multiprocessing
import os
import shutil
import subprocess
import sys
import urllib
################################################################################
#
# Shared configuration
#
################################################################################
def join_flags(*flags):
return ' '.join(flags).strip()
building_for_ios = False
if os.environ['PLATFORM_NAME'] in ('iphoneos', 'iphonesimulator'):
building_for_ios = True
else:
assert os.environ['PLATFORM_NAME'] == 'macosx'
assert os.environ['GCC_VERSION'] == 'com.apple.compilers.llvm.clang.1_0'
ar = os.environ['DT_TOOLCHAIN_DIR'] + '/usr/bin/ar'
cc = os.environ['DT_TOOLCHAIN_DIR'] + '/usr/bin/clang'
cxx = os.environ['DT_TOOLCHAIN_DIR'] + '/usr/bin/clang++'
make = os.environ['DEVELOPER_DIR'] + '/usr/bin/make'
rsync = '/usr/bin/rsync'
xcodebuild = os.environ['DEVELOPER_DIR'] + '/usr/bin/xcodebuild'
num_cores = str(multiprocessing.cpu_count())
common_flags = ' '.join(('-arch ' + arch) for arch in
os.environ['ARCHS'].split())
common_flags += ' -isysroot %(SDKROOT)s'
if building_for_ios:
common_flags += ' -miphoneos-version-min=%(IPHONEOS_DEPLOYMENT_TARGET)s'
else:
common_flags += ' -mmacosx-version-min=%(MACOSX_DEPLOYMENT_TARGET)s'
common_flags %= os.environ
compile_flags = ('-g -Os -fexceptions -fvisibility=hidden ' +
'-Werror=unguarded-availability ' +
common_flags)
if os.environ['ENABLE_BITCODE'] == 'YES':
compile_flags += {
'bitcode': ' -fembed-bitcode',
'marker': ' -fembed-bitcode-marker',
}.get(os.environ['BITCODE_GENERATION_MODE'], '')
cflags = '-std=%(GCC_C_LANGUAGE_STANDARD)s' % os.environ
cxxflags = ('-std=%(CLANG_CXX_LANGUAGE_STANDARD)s '
'-stdlib=%(CLANG_CXX_LIBRARY)s'
% os.environ)
link_flags = common_flags
downloaddir = os.path.abspath('download')
patchdir = os.path.abspath('patches')
xcconfigdir = os.path.abspath('../build/xcode')
builddir = os.environ['TARGET_TEMP_DIR']
prefix = os.environ['BUILT_PRODUCTS_DIR']
frameworksdir = prefix + '/Frameworks'
matlabdir = prefix + '/MATLAB'
includedir = prefix + '/include'
libdir = prefix + '/lib'
################################################################################
#
# Build helpers
#
################################################################################
all_builders = []
def builder(func):
argspec = inspect.getargspec(func)
defaults = dict(zip(argspec[0], argspec[3] or []))
if building_for_ios:
if defaults.get('ios', False):
all_builders.append(func)
else:
if defaults.get('macos', True):
all_builders.append(func)
return func
def announce(msg, *args):
sys.stderr.write((msg + '\n') % args)
def check_call(args, **kwargs):
announce('Running command: %s', ' '.join(repr(a) for a in args))
cmd = subprocess.Popen(args,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
**kwargs)
output = cmd.communicate()[0]
if 0 != cmd.returncode:
announce('Command exited with status %d and output:\n%s',
cmd.returncode,
output)
sys.exit(1)
@contextmanager
def workdir(path):
old_path = os.getcwd()
announce('Entering directory %r', path)
os.chdir(path)
yield
announce('Leaving directory %r', path)
os.chdir(old_path)
class DoneFileExists(Exception):
pass
@contextmanager
def done_file(tag):
filename = tag + '.done'
if os.path.isfile(filename):
raise DoneFileExists
yield
with open(filename, 'w') as fp:
fp.write('Done!\n')
def always_download_file(url, filepath):
check_call(['/usr/bin/curl', '-#', '-L', '-f', '-o', filepath, url])
def download_file(url, filename):
filepath = downloaddir + '/' + filename
if os.path.isfile(filepath):
announce('Already downloaded file %r', filename)
else:
always_download_file(url, filepath)
def download_archive(url_path, filename):
download_file(url_path + filename, filename)
def download_archive_from_sf(path, version, filename):
url = (('http://downloads.sourceforge.net/project/%s/%s/%s'
'?use_mirror=autoselect') % (path, version, filename))
return download_file(url, filename)
def make_directory(path):
if not os.path.isdir(path):
check_call(['/bin/mkdir', '-p', path])
def make_directories(*args):
for path in args:
make_directory(path)
def remove_directory(path):
if os.path.isdir(path):
check_call(['/bin/rm', '-Rf', path])
def remove_directories(*args):
for path in args:
remove_directory(path)
def unpack_tarfile(filename, outputdir):
check_call(['/usr/bin/tar', 'xf', downloaddir + '/' + filename])
def unpack_zipfile(filename, outputdir):
check_call([
'/usr/bin/unzip',
'-q',
downloaddir + '/' + filename,
'-d', outputdir,
])
def apply_patch(patchfile, strip=1):
with open(patchdir + '/' + patchfile) as fp:
check_call(
args = ['/usr/bin/patch', '-p%d' % strip],
stdin = fp,
)
def get_clean_env():
env = os.environ.copy()
# The presence of these can break some build tools
env.pop('IPHONEOS_DEPLOYMENT_TARGET', None)
env.pop('MACOSX_DEPLOYMENT_TARGET', None)
if building_for_ios:
env.pop('SDKROOT', None)
return env
def run_b2(libraries, clean=False):
b2_args = [
'./b2',
#'-d', '2', # Show actual commands run,
'-j', num_cores,
'--prefix=' + prefix,
'--includedir=' + includedir,
'--libdir=' + libdir,
'variant=release',
'optimization=space',
'debug-symbols=on',
'link=static',
'threading=multi',
'define=boost=mworks_boost',
# Unfortunately, Boost.Python won't compile against Python 3.2+ with
# Py_LIMITED_API defined
#'define=Py_LIMITED_API',
'cflags=' + compile_flags,
'cxxflags=' + cxxflags,
'linkflags=' + link_flags,
]
b2_args += ['--with-' + l for l in libraries]
if clean:
b2_args.append('--clean')
else:
b2_args.append('install')
check_call(b2_args)
def get_updated_env(
extra_compile_flags = '',
extra_cflags = '',
extra_cxxflags = '',
extra_ldflags = '',
extra_cppflags = '',
):
env = get_clean_env()
env.update({
'CC': cc,
'CXX': cxx,
'CFLAGS': join_flags(compile_flags, extra_compile_flags,
cflags, extra_cflags),
'CXXFLAGS': join_flags(compile_flags, extra_compile_flags,
cxxflags, extra_cxxflags),
'LDFLAGS': join_flags(link_flags, extra_ldflags),
'CPPFLAGS': join_flags(common_flags, extra_cppflags),
})
return env
def run_make(targets=[]):
check_call([make, '-j', num_cores] + targets)
def run_configure_and_make(
extra_args = [],
command = ['./configure'],
extra_compile_flags = '',
extra_cflags = '',
extra_cxxflags = '',
extra_ldflags = '',
extra_cppflags = '',
ios_host_platform = 'arm-apple-darwin',
):
args = [
'--prefix=' + prefix,
'--includedir=' + includedir,
'--libdir=' + libdir,
'--disable-dependency-tracking',
'--disable-shared',
'--enable-static',
]
# Force configure into cross-compilation mode when building for an
# iOS device or simulator
if building_for_ios:
args.append('--host=' + ios_host_platform)
check_call(
args = command + args + extra_args,
env = get_updated_env(extra_compile_flags,
extra_cflags,
extra_cxxflags,
extra_ldflags,
extra_cppflags),
)
run_make(['install'])
def add_object_files_to_libpythonall(exclude=()):
object_files = []
for dirpath, dirnames, filenames in os.walk('.'):
for name in filenames:
if name.endswith('.o') and name not in exclude:
object_files.append(os.path.join(dirpath, name))
check_call([
ar,
'rcs',
libdir + ('/libpython%s_all.a' % os.environ['MW_PYTHON_3_VERSION']),
] + object_files)
################################################################################
#
# Library builders
#
################################################################################
@builder
def libffi(ios=True):
version = '3.3-rc0'
srcdir = 'libffi-' + version
tarfile = srcdir + '.tar.gz'
with done_file(srcdir):
if not os.path.isdir(srcdir):
download_archive('https://github.com/libffi/libffi/releases/download/v%s/' % version, tarfile)
unpack_tarfile(tarfile, srcdir)
with workdir(srcdir):
other_kwargs = {}
if building_for_ios:
assert os.environ['ARCHS'] == 'arm64'
other_kwargs['ios_host_platform'] = 'aarch64-apple-darwin'
run_configure_and_make(
extra_args = ['--enable-portable-binary'],
extra_cflags = '-std=gnu11',
**other_kwargs
)
@builder
def openssl(ios=True):
version = '1.1.1c'
srcdir = 'openssl-' + version
tarfile = srcdir + '.tar.gz'
with done_file(srcdir):
if not os.path.isdir(srcdir):
download_archive('https://www.openssl.org/source/', tarfile)
unpack_tarfile(tarfile, srcdir)
with workdir(srcdir):
if building_for_ios:
assert os.environ['ARCHS'] == 'arm64'
config_name = 'ios64-cross'
else:
assert os.environ['ARCHS'] == 'x86_64'
config_name = 'darwin64-x86_64-cc'
env = get_clean_env()
env['AR'] = ar
env['CC'] = cc
check_call([
'./Configure',
config_name,
'--prefix=' + prefix,
'no-shared',
join_flags(compile_flags, cflags, '-std=gnu11'),
],
env = env)
run_make()
run_make(['install_sw'])
@builder
def python(ios=True):
version = '3.7.4'
srcdir = 'Python-' + version
tarfile = srcdir + '.tgz'
assert version[:version.rfind('.')] == os.environ['MW_PYTHON_3_VERSION']
with done_file(srcdir):
if not os.path.isdir(srcdir):
download_archive('https://www.python.org/ftp/python/%s/' % version, tarfile)
unpack_tarfile(tarfile, srcdir)
with workdir(srcdir):
apply_patch('python_ctypes.patch')
apply_patch('python_static_zlib.patch')
if building_for_ios:
apply_patch('python_ios_build.patch')
apply_patch('python_ios_disabled_modules.patch')
apply_patch('python_ios_fixes.patch')
apply_patch('python_ios_test_fixes.patch')
else:
apply_patch('python_macos_10_13_required.patch')
apply_patch('python_macos_disabled_modules.patch')
apply_patch('python_macos_test_fixes.patch')
with workdir(srcdir):
extra_args = [
'--without-ensurepip',
'--with-openssl=' + prefix,
]
if building_for_ios:
extra_args += [
'--build=x86_64-apple-darwin',
'--enable-ipv6',
'PYTHON_FOR_BUILD=' + os.environ['MW_PYTHON_3'],
'ac_cv_file__dev_ptmx=no',
'ac_cv_file__dev_ptc=no',
]
else:
# Set MACOSX_DEPLOYMENT_TARGET, so that the correct value is
# recorded in the installed sysconfig data
extra_args.append('MACOSX_DEPLOYMENT_TARGET=' +
os.environ['MACOSX_DEPLOYMENT_TARGET'])
run_configure_and_make(
extra_args = extra_args,
extra_compile_flags = '-fvisibility=default',
)
add_object_files_to_libpythonall(
exclude = ['_testembed.o', 'python.o']
)
# Generate list of trusted root certificates (for ssl module)
always_download_file(
url = 'https://mkcert.org/generate/',
filepath = os.path.join(os.environ['MW_PYTHON_3_STDLIB_DIR'],
'cacert.pem'),
)
@builder
def numpy(ios=True):
version = '1.17.1'
srcdir = 'numpy-' + version
tarfile = srcdir + '.tar.gz'
with done_file(srcdir):
if not os.path.isdir(srcdir):
download_archive('https://github.com/numpy/numpy/releases/download/v%s/' % version, tarfile)
unpack_tarfile(tarfile, srcdir)
with workdir(srcdir):
if building_for_ios:
apply_patch('numpy_ios_fixes.patch')
apply_patch('numpy_ios_test_fixes.patch')
with workdir(srcdir):
env = get_clean_env()
env['PYTHONPATH'] = os.environ['MW_PYTHON_3_STDLIB_DIR']
# Don't use Accelerate, as it seems to make things worse rather
# than better
env['NPY_BLAS_ORDER'] = ''
env['NPY_LAPACK_ORDER'] = ''
if building_for_ios:
env.update({
'_PYTHON_HOST_PLATFORM': 'darwin-arm',
'_PYTHON_SYSCONFIGDATA_NAME': '_sysconfigdata_m_darwin_darwin',
# numpy's configuration tests link test executuables using
# bare cc (without cflags). Add common_flags to ensure that
# linking uses the correct architecture and SDK.
'CC': join_flags(cc, common_flags)
})
check_call([
os.environ['MW_PYTHON_3'],
'setup.py',
'build',
'-j', num_cores,
'install',
'--prefix=' + prefix,
# Force egg info in to a separate directory. (Not sure why
# including --root has this affect, but whatever.)
'--root=/',
],
env = env)
add_object_files_to_libpythonall()
# The numpy test suite requires pytest, so install it and its
# dependencies (but outside of any standard location, because we
# don't want to distribute it)
check_call([
os.environ['MW_PYTHON_3'],
'-m', 'pip',
'install',
'--target', os.path.join(prefix, 'pytest'),
'pytest',
])
@builder
def boost(ios=True):
version = '1.71.0'
srcdir = 'boost_' + version.replace('.', '_')
tarfile = srcdir + '.tar.bz2'
with done_file(srcdir):
project_config_jam = 'project-config.jam'
project_config_jam_orig = project_config_jam + '.orig'
if not os.path.isdir(srcdir):
download_archive('https://dl.bintray.com/boostorg/release/%s/source/' % version, tarfile)
unpack_tarfile(tarfile, srcdir)
with workdir(srcdir):
os.symlink('boost', 'mworks_boost')
env = get_clean_env()
if building_for_ios:
# Need to use the macOS SDK when compiling the build system
env['SDKROOT'] = subprocess.check_output([
'/usr/bin/xcrun',
'--sdk', 'macosx',
'--show-sdk-path',
]).strip()
check_call([
'./bootstrap.sh',
'--with-toolset=clang',
'--without-icu',
'--without-libraries=python', # Configure python manually
],
env = env,
)
shutil.move(project_config_jam, project_config_jam_orig)
with workdir(srcdir):
shutil.copy(project_config_jam_orig, project_config_jam)
libraries = ['filesystem', 'random', 'regex', 'thread']
if not building_for_ios:
libraries += ['serialization']
run_b2(libraries)
for tag in (() if building_for_ios else ('',)) + ('_3',):
shutil.copy(project_config_jam_orig, project_config_jam)
with open(project_config_jam, 'a') as fp:
fp.write('\nusing python : %s : %s : %s : %s ;\n' %
(os.environ['MW_PYTHON%s_VERSION' % tag],
# Prevent Boost's build system from running the
# Python executable
'/usr/bin/false',
os.environ['MW_PYTHON%s_INCLUDEDIR' % tag],
os.environ['MW_PYTHON%s_LIBDIR' % tag]))
libraries = ['python']
# Remove previous build products before building again
run_b2(libraries, clean=True)
run_b2(libraries)
with workdir(includedir):
if not os.path.islink('mworks_boost'):
os.symlink('boost', 'mworks_boost')
@builder
def zeromq(ios=True):
version = '4.3.2'
srcdir = 'zeromq-' + version
tarfile = srcdir + '.tar.gz'
with done_file(srcdir):
if not os.path.isdir(srcdir):
download_archive('https://github.com/zeromq/libzmq/releases/download/v%s/' % version, tarfile)
unpack_tarfile(tarfile, srcdir)
with workdir(srcdir):
run_configure_and_make(
extra_args = [
'--disable-silent-rules',
'--disable-perf',
'--disable-curve-keygen',
'--disable-curve',
],
extra_ldflags = '-lc++',
)
@builder
def msgpack(ios=True):
version = '3.2.0'
srcdir = 'msgpack-' + version
tarfile = srcdir + '.tar.gz'
with done_file(srcdir):
if not os.path.isdir(srcdir):
download_archive('https://github.com/msgpack/msgpack-c/releases/download/cpp-%s/' % version, tarfile)
unpack_tarfile(tarfile, srcdir)
with workdir(srcdir):
check_call([rsync, '-a', 'include/', includedir])
@builder
def libxslt(macos=False, ios=True):
version = '1.1.29'
srcdir = 'libxslt-' + version
tarfile | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##############################################
# Institut Villebon, UE 3.1
# Projet : mon_via_navigo
# Auteur : C.Lavrat, <NAME>, <NAME>
# Date de creation : 27/12/15
# Date de derniere modification : 27/12/15
##############################################
#------------------------------------------------------------------------------#
#Modules used in the class : #
#------------------------------------------------------------------------------#
#home made library
from Station import Station
from Line import Line
from Graph import Graph
#used library
from math import sqrt, pi, cos, sin
import os
import sys
import platform
#This is just for the plot of the graph
if platform.python_version_tuple()[0] != '3':
import numpy as np
import matplotlib.pyplot as plt
class PublicTransportationNetwork:
"""
The PublicTrasportationNetwork module
=====================================
This module represents a graph of a public transportation network.
Notta : all the stations with the same name are linked.
Parameters
----------
:param name: name of the network
:param scale: scale of the map
:type name: str
:type scale: int
:Example:
>>>network = PublicTrasportationNetwork("Network_1", 1/25000)
>>>
Line creation
----------------
add_a_line(self, line):
| adds a new line in the network
distances(self, from_station, to_station):
| calculates distances between two stations
set_all_distances(self, line):
| sets all the distances between all the station of the line
duration(self,station_1, station_2, mode):
| gives the time between two stations
| in function of the way of transportation of the line
dec_to_hour(self, time):
| takes the hour in base 10 and gives the hour in base 60
reset_network(self):
| resets all the variables of the network
connection(self, couple):
| connects two points of a network
| and adds a new line "tunnel"
connection_all(self):
| connects all point with the same name in the network
create_straight_line(self,line_name, stations_names, from_x, from_y, to_x, to_y, mode):
| this function creates a straight line between two stations
create_circular_line(self,line_name, stations_names, center_x, center_y, radius, mode):
| this function creates a circular line with a radius of radius and
| line_name as a departure station
Graph print
-----------
__str__(self):
| gives a visualisation of the network
__plot__(self, legend = True):
| Gives a representation of the network with matplotlib.
| /!\ : ONLY ON PYTHON 2.7!
| take False in argument if we want legend
Graph data
----------
save(self, file_name): saves all the data of the graph in a file
load(self, file_name): loads a graph from a file
.. Date:: 28/12/2015
.. author:: Cyril
"""
#------------------------------------------------------------------------------#
#initialisation of the class : #
#------------------------------------------------------------------------------#
def __init__(self,name, scale):
"""
The PublicTrasportationNetwork module
=====================================
This module represents a graph of a public transportation network
Parameters
----------
:param name: name of the network
:param scale: scale of the map
:type name: str
:type scale: int
:Example:
>>>network = PublicTransportationNetwork("Network_1", 1./25000)
>>>
Module attribute
--------------
:param name: name of the network
:param scale: scale of the map
:param lines: lines in the network
:type name: str
:type scale: int
:type lines: list
.. Date:: 20/01/2015
.. author:: Cyril
"""
#INPUT TESTS
#----------------------------------------------------------------------#
#name of the station
#test of the type of name
if type(name) == type(str()) :
self.name = name
else :
raise TypeError(str(name)+" is not a str")
#----------------------------------------------------------------------#
#scale of the graph
#test of the type of scale
if type(scale) == type(float()) :
self.scale = scale
else :
raise TypeError(str(scale)+" is not a float")
#----------------------------------------------------------------------#
#LINES
self.lines = []
#GRAPH
self.network = Graph(True)
self.dist = {}
#======================================================================#
################################################################################
# LINE CREATION #
################################################################################
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
#Graph creation : add_a_line #
def add_a_line(self, line):
"""
The add_a_line function
=======================
adds a new line in the network
Parameters
----------
:param line: dictionary with the format
dict = {"mode": "","name": "", "stations":[]}
the value in "station" is a graph module
:type arg3: dict
:Example:
>>>A = Station("A",[1.,1.])
>>>B = Station("B",[2.,2.])
>>>C = Station("C",[3.,3.])
>>>D = Station("D",[4.,4.])
>>>E = Station("E",[5.,5.])
>>>network = PublicTrasportationNetwork("Network_1", 1/25000)
>>>Line_A={"mode": "métro","name": "L_A", "stations":[A,B,C,D,E]}
>>>network.add_a_line(Line_A)
>>>
.. Date:: 23/01/2015
.. author:: Cyril
"""
#We add the line in the visual variables
#----------------------------------------------------------------------#
#we create a line with all the informations
new_line = Line(line["stations"],line["mode"],line["name"])
#we add the line in the line's list
self.lines.append(new_line)
#We set all the distances
#we add the stations in nodes
#----------------------------------------------------------------------#
#we add the stations in nodes
#we set the departure node
self.network.add_a_node(line["stations"][0])
for st in line["stations"]:
#if the stations is not the departure station
if st != line["stations"][0]:
#we add the station in the nodes
self.network.add_a_node(st)
#we add the edges the graph
#----------------------------------------------------------------------#
#we add all the edges
#we set the departure node
departure = line["stations"][0]
#for all the stations
for st in line["stations"]:
#if the station is not the departure station
if st != departure:
#we calculate the weight in time
weight = self.duration(departure,st,line["mode"])
#and add an edge between departure and st
self.network.add_an_edge(departure,st,weight)
#the new node is the departure
departure = st
#======================================================================#
#------------------------------------------------------------------------------#
#Graph creation : calculate distances #
#------------------------------------------------------------------------------#
def distances(self, from_station, to_station):
"""
The distances function
======================
Calculates the distance between two stations of a map.
Parameters
----------
:param from_station: station of departure
:param to_station: station of arrival
:type from_station: class Station
:type to_station: class Station
:return: distance between the two stations
:rtype: float
:Example:
>>>A = Station("A",[1.,1.])
>>>B = Station("B",[2.,2.])
>>>network = PublicTrasportationNetwork("Network_1", 1/25000)
>>>Line_A={"mode": "métro","name": "L_A", "stations":[A,B]}
>>>network.add_a_line(Line_A)
>>>network.distances(A,B)
1.4142135623730951
.. Date:: 28/12/2015
.. author:: Cyril
"""
#this used pythagore
#we take the from_station positions
[x1,y1] = from_station.position
#we take the to_station positions
[x2,y2] = to_station.position
#we return the pythagore value
return sqrt((abs(x1-x2))**2 + (abs(y1-y2))**2) * self.scale
#======================================================================#
#------------------------------------------------------------------------------#
#Graph creation : duration #
#------------------------------------------------------------------------------#
def duration(self,station_1, station_2, mode):
"""
The duration function
=====================
gives the time between two stations in function of the way the
transportation in the given line.
way and speed
--------------
RER: 40 km/h
Underground:25 km/h
Tramway: 22 km/h
Bus: 14 km/h
Piéton: 4.5 km/h
Parameters
----------
:param station_1: station of departure
:param station_2: station of arrival
:type station_1: class Station
:type station_2: class Station
:return: time in minutes between the two stations
:rtype: float
:Example:
>>>A = Station("A",[1.,1.])
>>>B = Station("B",[2.,2.])
>>>network = PublicTrasportationNetwork("Network_1", 1/25000)
>>>Line_A={"mode": "Underground","name": "L_A", "stations":[A,B]}
>>>network.add_a_line(Line_A)
>>>network.duration(A,B)
1.4142135623730951
.. Date:: 22/01/2015
.. author:: Cyril
"""
#all the type of name of the stations
#----------------------------------------------------------------------#
rer = ["RER","rer"]
under = ["Underground","underground","subway",
"métro","Métro","metro","Metro"]
tram = ["Tramway","tramway"]
bus = ["Bus","bus"]
foot = ["Piéton","piéton","Pieton","pieton"]
#Speed Selection
#----------------------------------------------------------------------#
#RER speed selection
if mode in rer:
v = 40. #km/h
#subway speed selection
if mode in under:
v = 25. #km/h
#Tramway speed selection
if mode in tram:
v = 22. #km/h
#Tramway speed selection
if mode in bus:
v = 14. #km/h
#pieton speed selection
if mode in foot:
v = 4.5 #km/h
#----------------------------------------------------------------------#
#Time between station_1 and station_2 in minutes
#----------------------------------------------------------------------#
return self.distances(station_1, station_2)*60./v
#======================================================================#
#------------------------------------------------------------------------------#
#Graph creation : decimal to hours #
#------------------------------------------------------------------------------#
def dec_to_hour(self, time):
"""
The dec_to_hour function
========================
takes the hour in base 10 and give the hour in base 60
:Example:
>>>network = PublicTrasportationNetwork("Network_1", 1/25000)
>>>network.dec_to_hour(0.5)
30 Min
>>>
.. Date:: 30/12/2015
.. author:: Cyril
"""
#MINS
mins = time%1*60
mins = mins - mins%1
#HOURS
hours = time - time%1
if hours == 0:
return str(mins)+" Min"
if hours != 0:
return str(hours)+" heure "+str(mins)+" Min"
#------------------------------------------------------------------------------#
#Graph creation : reset_network #
#------------------------------------------------------------------------------#
def reset_network(self):
"""
The reset_network function
==========================
resets all the network
:Example:
>>>network = PublicTrasportationNetwork("Network_1", 1/25000)
>>>network.reset_network()
>>>
.. Date:: 23/12/2015
.. author:: Cyril
"""
#we put nothing in all the global variable
self.lines = []
self.name = ""
self.scale = 1
#this function have been added in the Graph module
self.network.reset()
self.dist = {}
#======================================================================#
#------------------------------------------------------------------------------#
#Graph creation : connection #
#------------------------------------------------------------------------------#
def connection(self, couple):
"""
The connection function
=======================
connects two points of a network
and adds a new line "tunnel"
Parameters
----------
:Param couple: two station to link
:type : instance of station
.. Date:: 23/01/2015
.. author:: Cyril
"""
#We add the line in the visual variables
#----------------------------------------------------------------------#
#we create a line with all the in informations in ---->
new_line = Line(couple,"piéton","tunnel")
#we add the line | |
#!/usr/bin/python
import argparse
import multiprocessing
import queue
import time
import threading
from maho.camera import IPCamera
from maho.adsb import Dump1090
from maho.util import AzimuthAltitudeDistance
from time import sleep
from metar import Metar
#from hanging_threads import start_monitoring
import numpy as np
import cv2
#import subprocess
import pylab
import os
import sys
import math
#import faulthandler
#faulthandler.enable()
#start_monitoring(seconds_frozen=10, test_interval=100)
timeNow = time.strftime("%Y%m%d-%H%M%S")
def restart_program():
print("Restarting")
time.sleep(0)
python = sys.executable
os.execl(sys.executable, 'python', __file__, *sys.argv[1:])
min_height = 1000
def camera_control(camera_host, camera_port, camera_user, camera_pass, q):
"""Control a maho.Camera based on inputs from a multiprocessing queue"
On startup this function will place the stream_url in the queue if camera
communication works.
If it fails the exception for why will be placed in the queue before exiting
"""
try:
camera = IPCamera(camera_host, camera_port, camera_user, camera_pass)
q.put(camera.get_rtsp_url())
except RuntimeError as exc:
q.put(exc)
try:
while True:
camera.move_to(*q.get())
except KeyboardInterrupt:
pass
def track_closest_aircraft(latitude, longitude, elevation, host, port, q):
"""Forward adsb messages to a Queue
Args:
host (str): The dump1090 host
port (int): The dump1090 port
q (queue): Messages will be placed in this queue
On startup this function will place True in the queue if dump1090 starts properly
If it fails the exception will be placed in the queue before exiting
"""
try:
d = Dump1090(host, port)
q.put(True)
except IOError as exc:
q.put(exc)
return
target = None
target_distance = None
frame_count = 0
aad = AzimuthAltitudeDistance(latitude, longitude, elevation)
try:
for aircraft in d.updates():
lat, lng = aircraft.position
azimuth, altitude, distance = aad.calculate(
lat,
lng,
aircraft.altitude
)
if aircraft.position[0] and lat > latitude:
northSouth = ('N')
else:
northSouth = ('S')
# if we don't have a target we do now
# or target is old, then use this new aircraft
# or new aircraft isn't the target, but it is closer, so we switch!
# make sure aircraft swaps between cameras as it passes over
# so the correct camera is pointing at the aircraft, if this is not
# here the camera continues to follow until an aircraft is closer.
# if you are using only one camera you dont require this section
my_lat = 53.43450
timeNow = time.strftime("%Y%m%d-%H%M%S")
if latitude >= my_lat and target is None:
pass
elif latitude >= my_lat > lat and target.icao == aircraft.icao:
target = None
else:
pass
if latitude <= my_lat and target is None:
pass
elif latitude <= my_lat < lat and target.icao == aircraft.icao:
target = None
else:
pass
# Mondified code to split between the North and South facing cameras.
#North facing camera:
above = (min_height * 0.3048)
if (latitude >= my_lat) and (aircraft.altitude >= (30000 * 0.3048)) or (aircraft.position[0] < latitude):
if (target is None or target.age > 20 or target.icao != aircraft.icao and distance < target_distance) \
and northSouth is ('N') and (altitude > 15):
target = aircraft
elif aircraft and target is None:
pass
else:
#print('.', end='')
#print ((timeNow), end='\r', flush=True))
#print(min_height * 0.3048)
print(time.ctime(), (above), end="\r", flush=True)
pass
#South facing camera:
frame_count = 0
if (latitude <= my_lat) and (aircraft.altitude >= (30000 * 0.3048)) or (aircraft.position[0] > latitude):
if (target is None or target.age > 20 or target.icao != aircraft.icao and distance < target_distance) \
and northSouth is ('S') and (altitude > 15):
target = aircraft
elif aircraft and target is None:
pass
else:
#print (timeNow(), end='\r', flush=True)
print(time.ctime(), (above), end="\r", flush=True)
#pass#'''
'''tracker.terminate()
tracker.join()
camera.terminate()
camera.join()
cap.release()
cv2.destroyAllWindows()'''
pass
#if frame_count > 50:
# restart_program()
else:
pass
# if we aren't the target at this point then bail
old_distance = 0
if target != aircraft:
continue
target = aircraft
target_distance = distance
if old_distance == distance or altitude < 10:
pass
else:
old_distance = distance
q.put((target, azimuth, altitude, distance))
except KeyboardInterrupt:
pass
def go_maho(
latitude,
longitude,
elevation,
camera_host,
camera_port,
camera_user,
camera_pass,
adsb_host,
adsb_port,
min_height
):
# fork a process to communicate with dump1090
targets = multiprocessing.Queue()
tracker = multiprocessing.Process(
target=track_closest_aircraft,
args=(latitude, longitude, elevation, adsb_host, adsb_port, targets,)
)
tracker.start()
# fist thing in the queue will be startup status
# True if good
# an Exception if bad
status = targets.get()
if isinstance(status, Exception):
raise RuntimeError("Unable to connect to dump1090 on {}:{}: {}".format(
adsb_host,
adsb_port,
status
))
# run camera control in own process as moving the camera can block for seconds
camera_queue = multiprocessing.Queue()
camera = multiprocessing.Process(
target=camera_control,
args=(camera_host, camera_port, camera_user, camera_pass, camera_queue,)
)
camera.start()
# fist thing in the queue will be startup status
# Stream URL if good
# an Exception if bad
stream_url = camera_queue.get()
if isinstance(stream_url, Exception):
raise RuntimeError("Unable to connect to camera on {}:{}: {}".format(
camera_host,
camera_port,
stream_url
))
cap = cv2.VideoCapture(stream_url)
ret, frame = cap.read()
#cv2.namedWindow("maho")
#orb = cv2.ORB_create()
# build a mask that's the center of the frame
# we'll focus searching for aircraft in this region
search_mask = np.zeros((frame.shape[0], frame.shape[1], 1), dtype=np.uint8)
cx = frame.shape[1] / 2
cy = frame.shape[0] / 2
size = 0.3
search_rect = (
(int(cy - (cy * size)), int(cx - (cx * size))),
(int(cy + (cy * size)), int(cx + (cx * size)))
)
# openCV UI main loops
start = None
end = None
fps = 0
elapsed = 0
target = None
last_target = None
try:
while True:
start = time.time()
#'''
# fill our mask back to full, we may have chopped it smaller on the last frame
search_mask[
search_rect[0][0]:search_rect[0][1],
search_rect[1][0]:search_rect[1][1]
] = 255#'''
# grab a frame from the camera
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# check for new / updated target info
try:
target, azimuth, altitude, distance = targets.get(False)
if last_target is None or target.icao != last_target.icao:
last_target = target
print("Now tracking {} / {} - Distance: {}m Time: {}".format(
target.icao,
target.callsign,
int(distance),
time.strftime("%Y%m%d-%H%M%S")
))
print("{} | azi: {:.3f}, alt: {:.3f}".format(
target,
azimuth,
altitude,
#int(distance)
))
'''print("{} | azi: {:.3f}, alt: {:.3f}, dist: {}m.".format('''
camera_queue.put((azimuth, altitude))
except queue.Empty:
if target is None:
camera_queue.put((0,0))
else:
pass
# annotate the frame
if target:
cv2.putText(
frame,
target.callsign or target.icao,
(0, 50),
cv2.FONT_HERSHEY_DUPLEX,
2,
(255, 255, 255),
4,
cv2.LINE_AA
)
txt = "{0:.3f}, {1:.3f} @ {2:.0f}m (dist: {3:.0f}m)".format(
target.position[0],
target.position[1],
target.altitude,
distance
)
cv2.putText(
frame,
txt,
(10, 75),
cv2.FONT_HERSHEY_SIMPLEX,
.5,
(255, 255, 255),
1,
cv2.LINE_AA
)
cv2.rectangle(frame, search_rect[0][::-1], search_rect[1][::-1], (0, 0, 255), 2)
'''
kp = orb.detect(gray, search_mask)
kp, des = orb.compute(gray, kp)
cv2.drawKeypoints(frame, kp, frame, color=(0, 255, 0), flags=0)#'''
cv2.putText(
frame,
"Camera Position: Az: {:.0f}, Alt: {:.0f}".format(azimuth, altitude),
(10, 100),
cv2.FONT_HERSHEY_SIMPLEX,
.5,
(255, 255, 255),
1,
cv2.LINE_AA
)
# different ways of displaying the final output.
# uncomment to activate, not sure if you cannot have more than three active.
my_lat = 53.43450
if latitude >= my_lat:
window_name = ('<NAME>')
else:
window_name = ('<NAME>')
# display it
# small = cv2.resize(frame, (0,0), fx=1, fy=1)
# small = frame
# small = cv2.resize(frame, (0,0), width, height)
# cv2.imshow('maho', frame)
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
# cv2.setWindowProperty('Maho', cv2.WINDOW_FULLSCREEN)
# cv2.setWindowProperty('Maho', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# cv2.setWindowProperty('Maho', cv2.WINDOW_AUTOSIZE, cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty(window_name, cv2.WINDOW_AUTOSIZE, cv2.WND_PROP_FULLSCREEN)
# cv2.namedWindow('Maho', cv2.WND_PROP_FULLSCREEN)
# cv2.resizeWindow('Maho', 1000, 700)
cv2.imshow(window_name, frame)
# handle input
keypress = cv2.waitKey(1) & 0xFF
end = time.time()
elapsed = int((end - start) * 1000)
if elapsed == 0:
pass
else:
fps = int(1000 / elapsed)
if keypress == ord('q'):
raise KeyboardInterrupt
def saveScreen():
timestr = time.strftime("%Y%m%d-%H%M%S")
cv2.imwrite('MC' + timestr + '-' + target.icao + '.png', frame)
# cv2.imwrite('MC' + timestr + '.png', frame)
print("Image saved MC" + timestr)
if keypress == ord('s'):
saveScreen()
if keypress == ord('r'):
restart_program()
if keypress == ord('+'):
min_height += 1000
elif keypress == ord('-'):
min_height -= 1000
except KeyboardInterrupt:
tracker.terminate()
tracker.join()
camera.terminate()
camera.join()
cap.release()
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser(
prog='maho',
description='ADS-B asdisted aircraft spotting',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--latitude', type=float, required=True, help='Latitude of the camera')
parser.add_argument('--longitude', type=float, required=True, help='Longitude of the camera')
parser.add_argument('--elevation', type=float, required=True, help='Elevation of the camera')
parser.add_argument('--camera-host', type=str, required=True, help='Camera hostname/ip')
parser.add_argument('--camera-port', type=int, default=80, help='Camera port')
parser.add_argument('--camera-user', type=str, required=True, help='Camera username')
parser.add_argument('--camera-pass', type=str, required=True, help='Camera password')
parser.add_argument('--adsb-host', type=str, default='localhost', help='dump1090 hostname/ip')
parser.add_argument('--adsb-port', type=int, default=30002, help='dump1090 TCP raw output port')
parser.add_argument('--min-height', type=float, default=30000, help='Minimum | |
<reponame>GaloisInc/cryptol<gh_stars>100-1000
"""A synchronous, typed interface for the Cryptol bindings"""
from __future__ import annotations
import sys
from typing import Any, Optional, Union, List, Dict, TextIO, overload
from typing_extensions import Literal
from dataclasses import dataclass
from .solver import OfflineSmtQuery, Solver, OnlineSolver, OfflineSolver, Z3
from . import connection
from . import cryptoltypes
from .commands import *
from . import CryptolConnection, SmtQueryType
@dataclass
class Qed:
"""The positive result of a 'prove' SMT query. All instances of this class
are truthy, i.e. evaluate to `True` in an 'if' or 'while' statement.
"""
def __bool__(self) -> Literal[True]:
return True
def __nonzero__(self) -> Literal[True]:
return True
@dataclass
class Safe:
"""The positive result of a 'safe' SMT query. All instances of this class
are truthy, i.e. evaluate to `True` in an 'if' or 'while' statement.
"""
def __bool__(self) -> Literal[True]:
return True
def __nonzero__(self) -> Literal[True]:
return True
@dataclass
class Counterexample:
"""The negative result of a 'prove' or 'safe' SMT query, consisting of a
type (either "predicate falsified" or "safety violation") and the list of
values which constitute the counterexample. All instances of this class are
falsy, i.e. evaluate to `False` in an 'if' or 'while' statement. (Note that
this is different from the behaivor of a plain list, which is truthy iff
it has nonzero length.)
"""
type : Union[Literal["predicate falsified"], Literal["safety violation"]]
assignments : List[CryptolValue]
def __bool__(self) -> Literal[False]:
return False
def __nonzero__(self) -> Literal[False]:
return False
@dataclass
class Satisfiable:
"""The positive result of a 'sat' SMT query, consisting of a list of
models, where each model is a list of values satisfying the predicate.
All instances of this class are truthy, i.e. evaluate to `True` in an 'if'
or 'while' statement. (Note that this is different from the behaivor of a
plain list, which is truthy iff it has nonzero length.)
"""
models : List[List[CryptolValue]]
def __bool__(self) -> Literal[True]:
return True
def __nonzero__(self) -> Literal[True]:
return True
@dataclass
class Unsatisfiable:
"""The negative result of a 'sat' SMT query. All instances of this class
are falsy, i.e. evaluate to `False` in an 'if 'or 'while' statement.
"""
def __bool__(self) -> Literal[False]:
return False
def __nonzero__(self) -> Literal[False]:
return False
def connect(command : Optional[str]=None,
*,
cryptol_path : Optional[str] = None,
url : Optional[str] = None,
reset_server : bool = False,
verify : Union[bool, str] = True,
log_dest : Optional[TextIO] = None,
timeout : Optional[float] = None) -> CryptolSyncConnection:
"""
Connect to a (possibly new) synchronous Cryptol server process.
:param command: A command to launch a new Cryptol server in socket mode (if provided).
:param cryptol_path: A replacement for the contents of
the ``CRYPTOLPATH`` environment variable (if provided).
:param url: A URL at which to connect to an already running Cryptol
HTTP server.
:param reset_server: If ``True``, the server that is connected to will be
reset. (This ensures any states from previous server usages have been
cleared.)
:param verify: Determines whether a secure HTTP connection should verify the SSL certificates.
Corresponds to the ``verify`` keyword parameter on ``requests.post``. N.B.,
only has an affect when ``connect`` is called with a ``url`` parameter
or when the ``CRYPTOL_SERVER_URL`` environment variable is set.
:param log_dest: A destination to log JSON requests/responses to, e.g. ``log_dest=sys.stderr``
will print traffic to ``stderr``, ``log_dest=open('foo.log', 'w')`` will log to ``foo.log``,
etc.
:param timeout: Optional default timeout (in seconds) for methods. Can be modified/read via the
`timeout` property on a `CryptolSyncConnection` or the `get_default_timeout` and
`set_default_timeout` methods. Method invocations which specify the optional `timeout` keyword
parameter will cause the default to be ignored for that method.
If no ``command`` or ``url`` parameters are provided, the following are attempted in order:
1. If the environment variable ``CRYPTOL_SERVER`` is set and referse to an executable,
it is assumed to be a Cryptol server and will be used for a new ``socket`` connection.
2. If the environment variable ``CRYPTOL_SERVER_URL`` is set, it is assumed to be
the URL for a running Cryptol server in ``http`` mode and will be connected to.
3. If an executable ``cryptol-remote-api`` is available on the ``PATH``
it is assumed to be a Cryptol server and will be used for a new ``socket`` connection.
"""
return CryptolSyncConnection(connection.connect(
command=command,
cryptol_path=cryptol_path,
url=url,
reset_server=reset_server,
verify=verify,
log_dest=log_dest,
timeout=timeout))
def connect_stdio(command : str,
cryptol_path : Optional[str] = None,
log_dest : Optional[TextIO] = None,
timeout : Optional[float] = None) -> CryptolSyncConnection:
"""Start a new synchronous connection to a new Cryptol server process.
:param command: The command to launch the Cryptol server.
:param cryptol_path: An optional replacement for the contents of
the ``CRYPTOLPATH`` environment variable.
:param log_dest: A destination to log JSON requests/responses to, e.g. ``log_dest=sys.stderr``
will print traffic to ``stderr``, ``log_dest=open('foo.log', 'w')`` will log to ``foo.log``,
etc.
:param timeout: Optional default timeout (in seconds) for methods. Can be modified/read via the
`timeout` property on a `CryptolSyncConnection` or the `get_default_timeout` and
`set_default_timeout` methods. Method invocations which specify the optional `timeout` keyword
parameter will cause the default to be ignored for that method.
"""
return CryptolSyncConnection(connection.connect_stdio(
command=command,
cryptol_path=cryptol_path,
log_dest=log_dest,
timeout=timeout))
class CryptolSyncConnection:
"""A wrapper of ``CryptolConnection`` with a synchronous, typed interface."""
connection : CryptolConnection
def __init__(self, connection : CryptolConnection):
self.connection = connection
@property
def timeout(self) -> Optional[float]:
return self.connection.timeout
@timeout.setter
def timeout(self, timeout : Optional[float]) -> None:
self.connection.timeout = timeout
def get_default_timeout(self) -> Optional[float]:
"""Get the value of the optional default timeout for methods (in seconds)."""
return self.connection.get_default_timeout()
def set_default_timeout(self, timeout : Optional[float]) -> None:
"""Set the value of the optional default timeout for methods (in seconds)."""
self.connection.set_default_timeout(timeout)
def load_file(self, filename : str, *, timeout:Optional[float] = None) -> None:
"""Load a filename as a Cryptol module, like ``:load`` at the Cryptol
REPL.
"""
self.connection.load_file(filename, timeout=timeout).result()
def load_module(self, module_name : str, *, timeout:Optional[float] = None) -> None:
"""Load a Cryptol module, like ``:module`` at the Cryptol REPL."""
self.connection.load_module(module_name, timeout=timeout).result()
def extend_search_path(self, *dir : str, timeout:Optional[float] = None) -> None:
"""Extend the search path for loading Cryptol modules."""
self.connection.extend_search_path(*dir, timeout=timeout).result()
def eval(self, expression : Any, *, timeout:Optional[float] = None) -> CryptolValue:
"""Evaluate a Cryptol expression, with the result represented
according to :ref:`cryptol-json-expression`, with Python datatypes
standing for their JSON equivalents.
"""
return from_cryptol_arg(self.connection.eval_raw(expression, timeout=timeout).result())
def call(self, fun : str, *args : List[Any], timeout:Optional[float] = None) -> CryptolValue:
"""Evaluate a Cryptol functiom by name, with the arguments and the
result represented according to :ref:`cryptol-json-expression`, with
Python datatypes standing for their JSON equivalents.
"""
return from_cryptol_arg(self.connection.call_raw(fun, *args, timeout=timeout).result())
def check(self, expr : Any, *, num_tests : Union[Literal['all'], int, None] = None, timeout:Optional[float] = None) -> CheckReport:
"""Tests the validity of a Cryptol expression with random inputs. The expression must be a function with
return type ``Bit``.
If ``num_tests`` is ``"all"`` then the expression is tested exhaustively (i.e., against all possible inputs).
If ``num_tests`` is omitted, Cryptol defaults to running 100 tests.
"""
return to_check_report(self.connection.check_raw(expr, num_tests=num_tests, timeout=timeout).result())
def check_type(self, code : Any, *, timeout:Optional[float] = None) -> cryptoltypes.CryptolType:
"""Check the type of a Cryptol expression, represented according to
:ref:`cryptol-json-expression`, with Python datatypes standing for
their JSON equivalents.
"""
return cryptoltypes.to_type(self.connection.check_type(code, timeout=timeout).result()['type'])
@overload
def sat(self, expr : Any, solver : OfflineSolver, count : int = 1, *, timeout:Optional[float] = None) -> OfflineSmtQuery: ...
@overload
def sat(self, expr : Any, solver : OnlineSolver = Z3, count : int = 1, *, timeout:Optional[float] = None) -> Union[Satisfiable, Unsatisfiable]: ...
@overload
def sat(self, expr : Any, solver : Solver = Z3, count : int = 1, *, timeout:Optional[float] = None) -> Union[Satisfiable, Unsatisfiable, OfflineSmtQuery]: ...
def sat(self, expr : Any, solver : Solver = Z3, count : int = 1, *, timeout:Optional[float] = None) -> Union[Satisfiable, Unsatisfiable, OfflineSmtQuery]:
"""Check the satisfiability of a Cryptol expression, represented according to
:ref:`cryptol-json-expression`, with Python datatypes standing for
their JSON equivalents. Use the solver named `solver`, and return up to
`count` solutions.
If the given solver is an `OnlineSolver`, the result is either an
instance of `Satisfiable`, which is always truthy, or `Unsatisfiable`,
which is always falsy - meaning the result will evaluate to `True` in
an 'if' or | |
# -------------------------------------------------------------------------
# Public methods
# -------------------------------------------------------------------------
def reset(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Starts a new acquisition episode with a batch of images.
This methods performs the following steps:
1. Reads a batch of images from the environment's dataset.
2. Creates an initial acquisition mask for each image.
3. Passes the loaded data and the initial masks to the transform function,
producing a batch of inputs for the environment's reconstructor model.
4. Calls the reconstructor model on this input and returns its output
as an observation.
The observation returned is a dictionary with the following keys:
- *"reconstruction"(torch.Tensor):* The reconstruction produced by the
environment's reconstructor model, using the current
acquisition mask.
- *"extra_outputs"(dict(str,Any)):* A dictionary with any additional
outputs produced by the reconstructor (e.g., uncertainty maps).
- *"mask"(torch.Tensor):* The current acquisition mask.
Returns:
tuple: tuple containing:
- obs(dict(str,any): Observation dictionary.
- metadata(dict(str,any): Metadata information containing the following keys:
- *"fname"(list(str)):* the filenames of the image read from the dataset.
- *"slice_id"(list(int)):* slice indices for each image within the volume.
- *"current_score"(dict(str,float):* A dictionary with the error measures
for the reconstruction (e.g., "mse", "nmse", "ssim", "psnr"). The measures
considered can be obtained with :meth:`score_keys()`.
"""
self._did_reset = True
try:
kspace, _, ground_truth, attrs, fname, slice_id = next(
self._current_data_handler
)
except StopIteration:
return {}, {}
self._current_ground_truth = torch.from_numpy(np.stack(ground_truth))
# Converting k-space to torch is better handled by transform,
# since we have both complex and non-complex versions
self._current_k_space = kspace
self._transform_wrapper = functools.partial(
self._transform, attrs=attrs, fname=fname, slice_id=slice_id
)
kspace_shapes = [tuple(k.shape) for k in kspace]
self._current_mask = self._mask_func(kspace_shapes, self._rng, attrs=attrs)
obs, self._current_score = self._compute_obs_and_score()
self._current_reconstruction_numpy = obs["reconstruction"].cpu().numpy()
self._steps_since_reset = 0
meta = {
"fname": fname,
"slice_id": slice_id,
"current_score": self._current_score,
}
return obs, meta
def step(
self, action: Union[int, Sequence[int]]
) -> Tuple[Dict[str, Any], np.ndarray, List[bool], Dict, Dict]:
"""Performs a step of active MRI acquisition.
Given a set of indices for k-space columns to acquire, updates the current batch
of masks with their corresponding indices, creates a new batch of reconstructions,
and returns the corresponding observations and rewards (for the observation format
see :meth:`reset()`). The reward is the improvement in score with
respect to the reconstruction before adding the indices. The specific score metric
used is determined by ``env.reward_metric``.
The method also returns a list of booleans, indicating whether any episodes in the
batch have already concluded.
The last return value is a metadata dictionary. It contains a single key
"current_score", which contains a dictionary with the error measures for the
reconstruction (e.g., ``"mse", "nmse", "ssim", "psnr"``). The measures
considered can be obtained with :meth:`score_keys()`.
Args:
action(union(int, sequence(int))): Indices for k-space columns to acquire. The
length of the sequence must be equal to the
current number of parallel episodes
(i.e., ``obs["reconstruction"].shape[0]``).
If only an ``int`` is passed, the index will
be replicated for the whole batch of episodes.
Returns:
tuple: The transition information in the order
``(next_observation, reward, done, meta)``. The types and shapes are:
- ``next_observation(dict):`` Dictionary format (see :meth:`reset()`).
- ``reward(np.ndarray)``: length equal to current number of parallel
episodes.
- ``done(list(bool))``: same length as ``reward``.
- ``meta(dict)``: see description above.
"""
if not self._did_reset:
raise RuntimeError(
"Attempting to call env.step() before calling env.reset()."
)
if isinstance(action, int):
action = [action for _ in range(self.num_parallel_episodes)]
self._current_mask = activemri.envs.masks.update_masks_from_indices(
self._current_mask, action
)
obs, new_score = self._compute_obs_and_score()
self._current_reconstruction_numpy = obs["reconstruction"].cpu().numpy()
reward = new_score[self.reward_metric] - self._current_score[self.reward_metric]
if self.reward_metric in ["mse", "nmse"]:
reward *= -1
else:
assert self.reward_metric in ["ssim", "psnr"]
self._current_score = new_score
self._steps_since_reset += 1
done = activemri.envs.masks.check_masks_complete(self._current_mask)
if self.budget and self._steps_since_reset >= self.budget:
done = [True] * len(done)
return obs, reward, done, {"current_score": self._current_score}, {"gt": self._current_ground_truth, "gt_kspace": self._current_k_space}
def try_action(
self, action: Union[int, Sequence[int]]
) -> Tuple[Dict[str, Any], Dict[str, np.ndarray]]:
"""Simulates the effects of actions without changing the environment's state.
This method operates almost exactly as :meth:`step()`, with the exception that
the environment's state is not altered. The method returns the next observation
and the resulting reconstruction score after applying the give k-space columns to
each image in the current batch of episodes.
Args:
action(union(int, sequence(int))): Indices for k-space columns to acquire. The
length of the sequence must be equal to the
current number of parallel episodes
(i.e., ``obs["reconstruction"].shape[0]``).
If only an ``int`` is passed, the index will
be replicated for the whole batch of episodes.
Returns:
tuple: The reconstruction information in the order
``(next_observation, current_score)``. The types and shapes are:
- ``next_observation(dict):`` Dictionary format (see :meth:`reset()`).
- ``current_score(dict(str, float))``: A dictionary with the error measures
for the reconstruction (e.g., "mse", "nmse", "ssim", "psnr"). The measures
considered can be obtained with `ActiveMRIEnv.score_keys()`.
"""
if not self._did_reset:
raise RuntimeError(
"Attempting to call env.try_action() before calling env.reset()."
)
if isinstance(action, int):
action = [action for _ in range(self.num_parallel_episodes)]
new_mask = activemri.envs.masks.update_masks_from_indices(
self._current_mask, action
)
obs, new_score = self._compute_obs_and_score(override_current_mask=new_mask)
return obs, new_score
def render(self, mode="human"):
"""Renders information about the environment's current state.
Returns:
``np.ndarray``: An image frame containing, from left to right: current
acquisition mask, current ground image, current reconstruction,
and current relative reconstruction error.
"""
pass
def seed(self, seed: Optional[int] = None):
"""Sets the seed for the internal number generator.
This seeds affects the order of the data loader for all loop modalities (i.e.,
training, validation, test).
Args:
seed(optional(int)): The seed for the environment's random number generator.
"""
self._seed = seed
self._rng = np.random.RandomState(seed)
self._train_data_handler.seed(seed)
self._val_data_handler.seed(seed)
self._test_data_handler.seed(seed)
def set_training(self, reset: bool = False):
"""Sets the environment to use the training data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._train_data_handler.reset()
self._current_data_handler = self._train_data_handler
self._clear_cache_and_unset_did_reset()
def set_val(self, reset: bool = True):
"""Sets the environment to use the validation data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._val_data_handler.reset()
self._current_data_handler = self._val_data_handler
self._clear_cache_and_unset_did_reset()
def set_test(self, reset: bool = True):
"""Sets the environment to use the test data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._test_data_handler.reset()
self._current_data_handler = self._test_data_handler
self._clear_cache_and_unset_did_reset()
@staticmethod
def score_keys() -> List[str]:
""" Returns the list of score metric names used by this environment. """
return ["mse", "nmse", "ssim", "psnr"]
# -----------------------------------------------------------------------------
# CUSTOM ENVIRONMENTS
# -----------------------------------------------------------------------------
class MICCAI2020Env(ActiveMRIEnv):
"""Implementation of environment used for *Pineda et al., MICCAI 2020*.
This environment is provided to facilitate replication of the experiments performed
in *<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
"Active MR k-space Sampling with Reinforcement Learning". MICCAI 2020.*
The dataset is the same as that of :class:`SingleCoilKneeEnv`, except that we provide
a custom validation/test split of the original validation data. The environment's
configuration file is set to use the reconstruction model used in the paper
(see :class:`activemri.models.cvpr19_reconstructor.CVPR19Reconstructor`), as well
as the proper transform to generate inputs for this model.
The k-space shape of this environment is set to ``(640, 368)``.
Args:
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
extreme(bool): ``True`` or ``False`` for running extreme acceleration or normal
acceleration scenarios described in the paper, respectively.
"""
KSPACE_WIDTH = scknee_data.MICCAI2020Data.KSPACE_WIDTH
START_PADDING = scknee_data.MICCAI2020Data.START_PADDING
END_PADDING = scknee_data.MICCAI2020Data.END_PADDING
CENTER_CROP_SIZE = scknee_data.MICCAI2020Data.CENTER_CROP_SIZE
| |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UpdateCamera.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1119, 834)
Dialog.setMinimumSize(QtCore.QSize(1119, 834))
Dialog.setMaximumSize(QtCore.QSize(1119, 834))
self.buttonSaveConfiguration = QtWidgets.QPushButton(Dialog)
self.buttonSaveConfiguration.setGeometry(QtCore.QRect(430, 770, 300, 51))
self.buttonSaveConfiguration.setMinimumSize(QtCore.QSize(300, 51))
self.buttonSaveConfiguration.setMaximumSize(QtCore.QSize(300, 51))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.buttonSaveConfiguration.setFont(font)
self.buttonSaveConfiguration.setObjectName("buttonSaveConfiguration")
self.line_3 = QtWidgets.QFrame(Dialog)
self.line_3.setGeometry(QtCore.QRect(670, 40, 31, 721))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setBold(False)
font.setWeight(50)
self.line_3.setFont(font)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(720, 40, 111, 241))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setBold(False)
font.setWeight(50)
self.layoutWidget.setFont(font)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.labelCameraName = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelCameraName.setFont(font)
self.labelCameraName.setObjectName("labelCameraName")
self.verticalLayout_2.addWidget(self.labelCameraName)
self.labelCameraIP = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelCameraIP.setFont(font)
self.labelCameraIP.setObjectName("labelCameraIP")
self.verticalLayout_2.addWidget(self.labelCameraIP)
self.labelCameraIPAddition = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelCameraIPAddition.setFont(font)
self.labelCameraIPAddition.setObjectName("labelCameraIPAddition")
self.verticalLayout_2.addWidget(self.labelCameraIPAddition)
self.labelUserName = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelUserName.setFont(font)
self.labelUserName.setObjectName("labelUserName")
self.verticalLayout_2.addWidget(self.labelUserName)
self.labelPassword = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelPassword.setFont(font)
self.labelPassword.setObjectName("labelPassword")
self.verticalLayout_2.addWidget(self.labelPassword)
self.labelProtocolType = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelProtocolType.setFont(font)
self.labelProtocolType.setObjectName("labelProtocolType")
self.verticalLayout_2.addWidget(self.labelProtocolType)
self.labelLocation = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelLocation.setFont(font)
self.labelLocation.setObjectName("labelLocation")
self.verticalLayout_2.addWidget(self.labelLocation)
self.labelCalibrationPhoto = QtWidgets.QLabel(Dialog)
self.labelCalibrationPhoto.setGeometry(QtCore.QRect(10, 40, 640, 480))
self.labelCalibrationPhoto.setMinimumSize(QtCore.QSize(640, 480))
self.labelCalibrationPhoto.setMaximumSize(QtCore.QSize(640, 480))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setBold(False)
font.setWeight(50)
self.labelCalibrationPhoto.setFont(font)
self.labelCalibrationPhoto.setStyleSheet("background-color: rgb(50,50,50)\n"
"\n"
"")
self.labelCalibrationPhoto.setFrameShape(QtWidgets.QFrame.Box)
self.labelCalibrationPhoto.setFrameShadow(QtWidgets.QFrame.Raised)
self.labelCalibrationPhoto.setLineWidth(2)
self.labelCalibrationPhoto.setText("")
self.labelCalibrationPhoto.setObjectName("labelCalibrationPhoto")
self.buttonShowContours = QtWidgets.QPushButton(Dialog)
self.buttonShowContours.setGeometry(QtCore.QRect(190, 660, 281, 61))
self.buttonShowContours.setMinimumSize(QtCore.QSize(281, 61))
self.buttonShowContours.setMaximumSize(QtCore.QSize(281, 61))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.buttonShowContours.setFont(font)
self.buttonShowContours.setToolTipDuration(9999)
self.buttonShowContours.setObjectName("buttonShowContours")
self.labelCharReadingCalibration = QtWidgets.QLabel(Dialog)
self.labelCharReadingCalibration.setGeometry(QtCore.QRect(780, 290, 311, 31))
self.labelCharReadingCalibration.setMinimumSize(QtCore.QSize(141, 31))
self.labelCharReadingCalibration.setMaximumSize(QtCore.QSize(400, 31))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(13)
font.setBold(True)
font.setUnderline(False)
font.setWeight(75)
font.setStrikeOut(False)
font.setKerning(True)
self.labelCharReadingCalibration.setFont(font)
self.labelCharReadingCalibration.setObjectName("labelCharReadingCalibration")
self.layoutWidget_3 = QtWidgets.QWidget(Dialog)
self.layoutWidget_3.setGeometry(QtCore.QRect(960, 330, 100, 411))
self.layoutWidget_3.setMinimumSize(QtCore.QSize(100, 0))
self.layoutWidget_3.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setBold(False)
font.setWeight(50)
self.layoutWidget_3.setFont(font)
self.layoutWidget_3.setObjectName("layoutWidget_3")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.layoutWidget_3)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.editMinPixelWidth = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMinPixelWidth.setMinimumSize(QtCore.QSize(90, 28))
self.editMinPixelWidth.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMinPixelWidth.setFont(font)
self.editMinPixelWidth.setToolTipDuration(9999)
self.editMinPixelWidth.setText("")
self.editMinPixelWidth.setClearButtonEnabled(True)
self.editMinPixelWidth.setObjectName("editMinPixelWidth")
self.verticalLayout_5.addWidget(self.editMinPixelWidth)
self.editMinPixelHeight = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMinPixelHeight.setMinimumSize(QtCore.QSize(90, 28))
self.editMinPixelHeight.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMinPixelHeight.setFont(font)
self.editMinPixelHeight.setToolTipDuration(9999)
self.editMinPixelHeight.setText("")
self.editMinPixelHeight.setClearButtonEnabled(True)
self.editMinPixelHeight.setObjectName("editMinPixelHeight")
self.verticalLayout_5.addWidget(self.editMinPixelHeight)
self.editMinPixelArea = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMinPixelArea.setMinimumSize(QtCore.QSize(90, 28))
self.editMinPixelArea.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMinPixelArea.setFont(font)
self.editMinPixelArea.setToolTipDuration(9999)
self.editMinPixelArea.setClearButtonEnabled(True)
self.editMinPixelArea.setObjectName("editMinPixelArea")
self.verticalLayout_5.addWidget(self.editMinPixelArea)
self.editMinPixelRatio = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMinPixelRatio.setMinimumSize(QtCore.QSize(90, 28))
self.editMinPixelRatio.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMinPixelRatio.setFont(font)
self.editMinPixelRatio.setToolTipDuration(9999)
self.editMinPixelRatio.setText("")
self.editMinPixelRatio.setClearButtonEnabled(True)
self.editMinPixelRatio.setObjectName("editMinPixelRatio")
self.verticalLayout_5.addWidget(self.editMinPixelRatio)
self.editMaxPixelRatio = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMaxPixelRatio.setMinimumSize(QtCore.QSize(90, 28))
self.editMaxPixelRatio.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMaxPixelRatio.setFont(font)
self.editMaxPixelRatio.setToolTipDuration(9999)
self.editMaxPixelRatio.setText("")
self.editMaxPixelRatio.setClearButtonEnabled(True)
self.editMaxPixelRatio.setObjectName("editMaxPixelRatio")
self.verticalLayout_5.addWidget(self.editMaxPixelRatio)
self.editMinDiagSize = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMinDiagSize.setMinimumSize(QtCore.QSize(90, 28))
self.editMinDiagSize.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMinDiagSize.setFont(font)
self.editMinDiagSize.setToolTipDuration(9999)
self.editMinDiagSize.setText("")
self.editMinDiagSize.setClearButtonEnabled(True)
self.editMinDiagSize.setObjectName("editMinDiagSize")
self.verticalLayout_5.addWidget(self.editMinDiagSize)
self.editMaxDiagSize = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMaxDiagSize.setMinimumSize(QtCore.QSize(90, 28))
self.editMaxDiagSize.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMaxDiagSize.setFont(font)
self.editMaxDiagSize.setToolTipDuration(9999)
self.editMaxDiagSize.setText("")
self.editMaxDiagSize.setClearButtonEnabled(True)
self.editMaxDiagSize.setObjectName("editMaxDiagSize")
self.verticalLayout_5.addWidget(self.editMaxDiagSize)
self.editMaxChangeInArea = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMaxChangeInArea.setMinimumSize(QtCore.QSize(90, 28))
self.editMaxChangeInArea.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMaxChangeInArea.setFont(font)
self.editMaxChangeInArea.setToolTipDuration(9999)
self.editMaxChangeInArea.setText("")
self.editMaxChangeInArea.setClearButtonEnabled(True)
self.editMaxChangeInArea.setObjectName("editMaxChangeInArea")
self.verticalLayout_5.addWidget(self.editMaxChangeInArea)
self.editMaxChangeInWidth = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMaxChangeInWidth.setMinimumSize(QtCore.QSize(90, 28))
self.editMaxChangeInWidth.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMaxChangeInWidth.setFont(font)
self.editMaxChangeInWidth.setToolTipDuration(9999)
self.editMaxChangeInWidth.setText("")
self.editMaxChangeInWidth.setClearButtonEnabled(True)
self.editMaxChangeInWidth.setObjectName("editMaxChangeInWidth")
self.verticalLayout_5.addWidget(self.editMaxChangeInWidth)
self.editMaxChangeInHeight = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMaxChangeInHeight.setMinimumSize(QtCore.QSize(90, 28))
self.editMaxChangeInHeight.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMaxChangeInHeight.setFont(font)
self.editMaxChangeInHeight.setToolTipDuration(9999)
self.editMaxChangeInHeight.setText("")
self.editMaxChangeInHeight.setClearButtonEnabled(True)
self.editMaxChangeInHeight.setObjectName("editMaxChangeInHeight")
self.verticalLayout_5.addWidget(self.editMaxChangeInHeight)
self.editMaxAngleBetweenChar = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMaxAngleBetweenChar.setMinimumSize(QtCore.QSize(90, 28))
self.editMaxAngleBetweenChar.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMaxAngleBetweenChar.setFont(font)
self.editMaxAngleBetweenChar.setToolTipDuration(9999)
self.editMaxAngleBetweenChar.setText("")
self.editMaxAngleBetweenChar.setClearButtonEnabled(True)
self.editMaxAngleBetweenChar.setObjectName("editMaxAngleBetweenChar")
self.verticalLayout_5.addWidget(self.editMaxAngleBetweenChar)
self.editMinNumberOfMatchCharNumber = QtWidgets.QLineEdit(self.layoutWidget_3)
self.editMinNumberOfMatchCharNumber.setMinimumSize(QtCore.QSize(90, 28))
self.editMinNumberOfMatchCharNumber.setMaximumSize(QtCore.QSize(90, 28))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editMinNumberOfMatchCharNumber.setFont(font)
self.editMinNumberOfMatchCharNumber.setToolTipDuration(9999)
self.editMinNumberOfMatchCharNumber.setText("")
self.editMinNumberOfMatchCharNumber.setClearButtonEnabled(True)
self.editMinNumberOfMatchCharNumber.setObjectName("editMinNumberOfMatchCharNumber")
self.verticalLayout_5.addWidget(self.editMinNumberOfMatchCharNumber)
self.layoutWidget_4 = QtWidgets.QWidget(Dialog)
self.layoutWidget_4.setGeometry(QtCore.QRect(750, 330, 201, 401))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setBold(False)
font.setWeight(50)
self.layoutWidget_4.setFont(font)
self.layoutWidget_4.setObjectName("layoutWidget_4")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.layoutWidget_4)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.labelMinPixelWidth = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMinPixelWidth.setFont(font)
self.labelMinPixelWidth.setObjectName("labelMinPixelWidth")
self.verticalLayout_6.addWidget(self.labelMinPixelWidth)
self.labelMinPixelHeight = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMinPixelHeight.setFont(font)
self.labelMinPixelHeight.setObjectName("labelMinPixelHeight")
self.verticalLayout_6.addWidget(self.labelMinPixelHeight)
self.labelMinPixelArea = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMinPixelArea.setFont(font)
self.labelMinPixelArea.setObjectName("labelMinPixelArea")
self.verticalLayout_6.addWidget(self.labelMinPixelArea)
self.labelMinPixelRatio = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMinPixelRatio.setFont(font)
self.labelMinPixelRatio.setObjectName("labelMinPixelRatio")
self.verticalLayout_6.addWidget(self.labelMinPixelRatio)
self.labelMaxPixelRatio = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMaxPixelRatio.setFont(font)
self.labelMaxPixelRatio.setObjectName("labelMaxPixelRatio")
self.verticalLayout_6.addWidget(self.labelMaxPixelRatio)
self.labelMinDiagSize = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMinDiagSize.setFont(font)
self.labelMinDiagSize.setObjectName("labelMinDiagSize")
self.verticalLayout_6.addWidget(self.labelMinDiagSize)
self.labelMaxDiagSize = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMaxDiagSize.setFont(font)
self.labelMaxDiagSize.setObjectName("labelMaxDiagSize")
self.verticalLayout_6.addWidget(self.labelMaxDiagSize)
self.labelMaxChangeInArea = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMaxChangeInArea.setFont(font)
self.labelMaxChangeInArea.setObjectName("labelMaxChangeInArea")
self.verticalLayout_6.addWidget(self.labelMaxChangeInArea)
self.labelMaxChangeInWidth = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMaxChangeInWidth.setFont(font)
self.labelMaxChangeInWidth.setObjectName("labelMaxChangeInWidth")
self.verticalLayout_6.addWidget(self.labelMaxChangeInWidth)
self.labelMaChangeInHeight = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMaChangeInHeight.setFont(font)
self.labelMaChangeInHeight.setObjectName("labelMaChangeInHeight")
self.verticalLayout_6.addWidget(self.labelMaChangeInHeight)
self.labelMaxAngleBetweenChar = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMaxAngleBetweenChar.setFont(font)
self.labelMaxAngleBetweenChar.setObjectName("labelMaxAngleBetweenChar")
self.verticalLayout_6.addWidget(self.labelMaxAngleBetweenChar)
self.labelMinNumberOfMatchCharNumber = QtWidgets.QLabel(self.layoutWidget_4)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelMinNumberOfMatchCharNumber.setFont(font)
self.labelMinNumberOfMatchCharNumber.setObjectName("labelMinNumberOfMatchCharNumber")
self.verticalLayout_6.addWidget(self.labelMinNumberOfMatchCharNumber)
self.layoutWidget1 = QtWidgets.QWidget(Dialog)
self.layoutWidget1.setGeometry(QtCore.QRect(840, 40, 263, 241))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setBold(False)
font.setWeight(50)
self.layoutWidget1.setFont(font)
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.editCameraName = QtWidgets.QLineEdit(self.layoutWidget1)
self.editCameraName.setMinimumSize(QtCore.QSize(261, 30))
self.editCameraName.setMaximumSize(QtCore.QSize(261, 30))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editCameraName.setFont(font)
self.editCameraName.setToolTipDuration(9999)
self.editCameraName.setText("")
self.editCameraName.setClearButtonEnabled(True)
self.editCameraName.setObjectName("editCameraName")
self.verticalLayout_4.addWidget(self.editCameraName)
self.editCameraIP = QtWidgets.QLineEdit(self.layoutWidget1)
self.editCameraIP.setMinimumSize(QtCore.QSize(261, 30))
self.editCameraIP.setMaximumSize(QtCore.QSize(261, 30))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editCameraIP.setFont(font)
self.editCameraIP.setToolTipDuration(9999)
self.editCameraIP.setText("")
self.editCameraIP.setClearButtonEnabled(True)
self.editCameraIP.setObjectName("editCameraIP")
self.verticalLayout_4.addWidget(self.editCameraIP)
self.editCameraIPAddition = QtWidgets.QLineEdit(self.layoutWidget1)
self.editCameraIPAddition.setMinimumSize(QtCore.QSize(261, 29))
self.editCameraIPAddition.setMaximumSize(QtCore.QSize(261, 29))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editCameraIPAddition.setFont(font)
self.editCameraIPAddition.setToolTipDuration(9999)
self.editCameraIPAddition.setClearButtonEnabled(True)
self.editCameraIPAddition.setObjectName("editCameraIPAddition")
self.verticalLayout_4.addWidget(self.editCameraIPAddition)
self.editUsername = QtWidgets.QLineEdit(self.layoutWidget1)
self.editUsername.setMinimumSize(QtCore.QSize(261, 30))
self.editUsername.setMaximumSize(QtCore.QSize(261, 30))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editUsername.setFont(font)
self.editUsername.setToolTipDuration(9999)
self.editUsername.setText("")
self.editUsername.setClearButtonEnabled(True)
self.editUsername.setObjectName("editUsername")
self.verticalLayout_4.addWidget(self.editUsername)
self.editPassword = QtWidgets.QLineEdit(self.layoutWidget1)
self.editPassword.setMinimumSize(QtCore.QSize(261, 30))
self.editPassword.setMaximumSize(QtCore.QSize(261, 30))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editPassword.setFont(font)
self.editPassword.setToolTipDuration(9999)
self.editPassword.setText("")
self.editPassword.setClearButtonEnabled(True)
self.editPassword.setObjectName("editPassword")
self.verticalLayout_4.addWidget(self.editPassword)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.radioButtonRtsp = QtWidgets.QRadioButton(self.layoutWidget1)
self.radioButtonRtsp.setMinimumSize(QtCore.QSize(115, 22))
self.radioButtonRtsp.setMaximumSize(QtCore.QSize(115, 22))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.radioButtonRtsp.setFont(font)
self.radioButtonRtsp.setObjectName("radioButtonRtsp")
self.horizontalLayout_2.addWidget(self.radioButtonRtsp)
self.radioButtonHttp = QtWidgets.QRadioButton(self.layoutWidget1)
self.radioButtonHttp.setMinimumSize(QtCore.QSize(114, 22))
self.radioButtonHttp.setMaximumSize(QtCore.QSize(114, 22))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.radioButtonHttp.setFont(font)
self.radioButtonHttp.setObjectName("radioButtonHttp")
self.horizontalLayout_2.addWidget(self.radioButtonHttp)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
self.editLocation = QtWidgets.QLineEdit(self.layoutWidget1)
self.editLocation.setMinimumSize(QtCore.QSize(261, 30))
self.editLocation.setMaximumSize(QtCore.QSize(261, 30))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editLocation.setFont(font)
self.editLocation.setToolTipDuration(9999)
self.editLocation.setText("")
self.editLocation.setClearButtonEnabled(True)
self.editLocation.setObjectName("editLocation")
self.verticalLayout_4.addWidget(self.editLocation)
self.layoutWidget2 = QtWidgets.QWidget(Dialog)
self.layoutWidget2.setGeometry(QtCore.QRect(750, 740, 351, 26))
self.layoutWidget2.setObjectName("layoutWidget2")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.layoutWidget2)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.labelCameraStatus = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelCameraStatus.setFont(font)
self.labelCameraStatus.setToolTipDuration(9999)
self.labelCameraStatus.setObjectName("labelCameraStatus")
self.horizontalLayout_4.addWidget(self.labelCameraStatus)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.radioButtonWorking = QtWidgets.QRadioButton(self.layoutWidget2)
self.radioButtonWorking.setMinimumSize(QtCore.QSize(115, 22))
self.radioButtonWorking.setMaximumSize(QtCore.QSize(115, 22))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.radioButtonWorking.setFont(font)
self.radioButtonWorking.setObjectName("radioButtonWorking")
self.horizontalLayout_3.addWidget(self.radioButtonWorking)
self.radioButtonNotWorking = QtWidgets.QRadioButton(self.layoutWidget2)
self.radioButtonNotWorking.setMinimumSize(QtCore.QSize(114, 22))
self.radioButtonNotWorking.setMaximumSize(QtCore.QSize(114, 22))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.radioButtonNotWorking.setFont(font)
self.radioButtonNotWorking.setObjectName("radioButtonNotWorking")
self.horizontalLayout_3.addWidget(self.radioButtonNotWorking)
self.horizontalLayout_4.addLayout(self.horizontalLayout_3)
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(360, 560, 272, 82))
self.widget.setObjectName("widget")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.verticalLayout_11 = QtWidgets.QVBoxLayout()
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.labelBottomY1 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelBottomY1.setFont(font)
self.labelBottomY1.setObjectName("labelBottomY1")
self.verticalLayout_11.addWidget(self.labelBottomY1)
self.labelBottomY2 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelBottomY2.setFont(font)
self.labelBottomY2.setObjectName("labelBottomY2")
self.verticalLayout_11.addWidget(self.labelBottomY2)
self.horizontalLayout_8.addLayout(self.verticalLayout_11)
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.editBottomY1 = QtWidgets.QLineEdit(self.widget)
self.editBottomY1.setMinimumSize(QtCore.QSize(100, 36))
self.editBottomY1.setMaximumSize(QtCore.QSize(100, 36))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editBottomY1.setFont(font)
self.editBottomY1.setText("")
self.editBottomY1.setClearButtonEnabled(True)
self.editBottomY1.setObjectName("editBottomY1")
self.verticalLayout_12.addWidget(self.editBottomY1)
self.editBottomY2 = QtWidgets.QLineEdit(self.widget)
self.editBottomY2.setMinimumSize(QtCore.QSize(100, 36))
self.editBottomY2.setMaximumSize(QtCore.QSize(100, 36))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editBottomY2.setFont(font)
self.editBottomY2.setText("")
self.editBottomY2.setClearButtonEnabled(True)
self.editBottomY2.setObjectName("editBottomY2")
self.verticalLayout_12.addWidget(self.editBottomY2)
self.horizontalLayout_8.addLayout(self.verticalLayout_12)
self.widget1 = QtWidgets.QWidget(Dialog)
self.widget1.setGeometry(QtCore.QRect(30, 560, 275, 82))
self.widget1.setObjectName("widget1")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.widget1)
self.horizontalLayout_9.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.labelTopY1 = QtWidgets.QLabel(self.widget1)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelTopY1.setFont(font)
self.labelTopY1.setObjectName("labelTopY1")
self.verticalLayout_3.addWidget(self.labelTopY1)
self.labelTopY2 = QtWidgets.QLabel(self.widget1)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelTopY2.setFont(font)
self.labelTopY2.setObjectName("labelTopY2")
self.verticalLayout_3.addWidget(self.labelTopY2)
self.horizontalLayout_9.addLayout(self.verticalLayout_3)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.editTopY1 = QtWidgets.QLineEdit(self.widget1)
self.editTopY1.setMinimumSize(QtCore.QSize(100, 36))
self.editTopY1.setMaximumSize(QtCore.QSize(100, 36))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editTopY1.setFont(font)
self.editTopY1.setText("")
self.editTopY1.setClearButtonEnabled(True)
self.editTopY1.setObjectName("editTopY1")
self.verticalLayout.addWidget(self.editTopY1)
self.editTopY2 = QtWidgets.QLineEdit(self.widget1)
self.editTopY2.setMinimumSize(QtCore.QSize(100, 36))
self.editTopY2.setMaximumSize(QtCore.QSize(100, 36))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.editTopY2.setFont(font)
self.editTopY2.setText("")
self.editTopY2.setClearButtonEnabled(True)
self.editTopY2.setObjectName("editTopY2")
self.verticalLayout.addWidget(self.editTopY2)
self.horizontalLayout_9.addLayout(self.verticalLayout)
self.widget2 = QtWidgets.QWidget(Dialog)
self.widget2.setGeometry(QtCore.QRect(10, 0, 1101, 33))
self.widget2.setObjectName("widget2")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget2)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.line = QtWidgets.QFrame(self.widget2)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayout.addWidget(self.line)
self.labelCameraInfo = QtWidgets.QLabel(self.widget2)
self.labelCameraInfo.setMinimumSize(QtCore.QSize(150, 31))
self.labelCameraInfo.setMaximumSize(QtCore.QSize(150, 31))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(13)
font.setBold(True)
font.setUnderline(False)
font.setWeight(75)
font.setStrikeOut(False)
font.setKerning(True)
self.labelCameraInfo.setFont(font)
self.labelCameraInfo.setObjectName("labelCameraInfo")
self.horizontalLayout.addWidget(self.labelCameraInfo)
self.line_2 = QtWidgets.QFrame(self.widget2)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.horizontalLayout.addWidget(self.line_2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.buttonSaveConfiguration.setText(_translate("Dialog", "KAMERA AYARINI KAYDET"))
self.labelCameraName.setText(_translate("Dialog", "Kamera Adı"))
self.labelCameraIP.setText(_translate("Dialog", "Kamera IP"))
self.labelCameraIPAddition.setText(_translate("Dialog", "IP Ek Uzantı"))
self.labelUserName.setText(_translate("Dialog", "Kullanıcı Adı"))
self.labelPassword.setText(_translate("Dialog", "<PASSWORD>"))
self.labelProtocolType.setText(_translate("Dialog", "Protokol Tipi"))
self.labelLocation.setText(_translate("Dialog", "Konum"))
self.buttonShowContours.setText(_translate("Dialog", "SINIRLARI GÖSTER"))
self.labelCharReadingCalibration.setText(_translate("Dialog", "KARAKTER OKUMA KALİBRASYONU"))
self.labelMinPixelWidth.setText(_translate("Dialog", "Min. Piksel Genişliği"))
self.labelMinPixelHeight.setText(_translate("Dialog", "Min. Piksel Yüksekliği"))
self.labelMinPixelArea.setText(_translate("Dialog", "Min. Piksel Alanı"))
self.labelMinPixelRatio.setText(_translate("Dialog", "Min. Piksel Oranı"))
self.labelMaxPixelRatio.setText(_translate("Dialog", "Max. Piksel Oranı"))
self.labelMinDiagSize.setText(_translate("Dialog", "Min. Köşegen Boyutu Çarpanı"))
self.labelMaxDiagSize.setText(_translate("Dialog", "Max. Köşegen Boyutu Çarpanı"))
self.labelMaxChangeInArea.setText(_translate("Dialog", "Alandaki Max. Değişim"))
self.labelMaxChangeInWidth.setText(_translate("Dialog", "Genişlikteki Max. Değişim"))
self.labelMaChangeInHeight.setText(_translate("Dialog", "Yükseklikteki Max. Değişim"))
self.labelMaxAngleBetweenChar.setText(_translate("Dialog", "Karakterler Arası Max. | |
count: number of code lines (Int)
Returns:
- asm code (String)
"""
code = self.execute_redirect("x/%di 0x%x" % (count, address))
if code:
return code.rstrip()
else:
return ""
def dumpmem(self, start, end):
"""
Dump process memory from start to end
Args:
- start: start address (Int)
- end: end address (Int)
Returns:
- memory content (raw bytes)
"""
mem = None
logfd = tmpfile(is_binary_file=True)
logname = logfd.name
out = self.execute_redirect("dump memory %s 0x%x 0x%x" % (logname, start, end))
if out is None:
return None
else:
logfd.flush()
mem = logfd.read()
logfd.close()
return mem
def readmem(self, address, size):
"""
Read content of memory at an address
Args:
- address: start address to read (Int)
- size: bytes to read (Int)
Returns:
- memory content (raw bytes)
"""
# try fast dumpmem if it works
mem = self.dumpmem(address, address+size)
if mem is not None:
return mem
# failed to dump, use slow x/gx way
mem = ""
out = self.execute_redirect("x/%dbx 0x%x" % (size, address))
if out:
for line in out.splitlines():
bytes = line.split(":\t")[-1].split()
mem += "".join([chr(int(c, 0)) for c in bytes])
return mem
def read_int(self, address, intsize=None):
"""
Read an interger value from memory
Args:
- address: address to read (Int)
- intsize: force read size (Int)
Returns:
- mem value (Int)
"""
if not intsize:
intsize = self.intsize()
value = self.readmem(address, intsize)
if value:
value = to_int("0x" + codecs.encode(value[::-1], 'hex'))
return value
else:
return None
def read_long(self, address):
"""
Read a long long value from memory
Args:
- address: address to read (Int)
Returns:
- mem value (Long Long)
"""
return self.read_int(address, 8)
def writemem(self, address, buf):
"""
Write buf to memory start at an address
Args:
- address: start address to write (Int)
- buf: data to write (raw bytes)
Returns:
- number of written bytes (Int)
"""
out = None
if not buf:
return 0
if self.getpid():
# try fast restore mem
tmp = tmpfile(is_binary_file=True)
tmp.write(buf)
tmp.flush()
out = self.execute_redirect("restore %s binary 0x%x" % (tmp.name, address))
tmp.close()
if not out: # try the slow way
for i in range(len(buf)):
if not self.execute("set {char}0x%x = 0x%x" % (address+i, ord(buf[i]))):
return i
return i+1
elif "error" in out: # failed to write the whole buf, find written byte
for i in range(0, len(buf), 1):
if not self.is_address(address+i):
return i
else:
return len(buf)
def write_int(self, address, value, intsize=None):
"""
Write an interger value to memory
Args:
- address: address to read (Int)
- value: int to write to (Int)
- intsize: force write size (Int)
Returns:
- Bool
"""
if not intsize:
intsize = self.intsize()
buf = hex2str(value, intsize).ljust(intsize, "\x00")[:intsize]
saved = self.readmem(address, intsize)
if not saved:
return False
ret = self.writemem(address, buf)
if ret != intsize:
self.writemem(address, saved)
return False
return True
def write_long(self, address, value):
"""
Write a long long value to memory
Args:
- address: address to read (Int)
- value: value to write to
Returns:
- Bool
"""
return self.write_int(address, value, 8)
def cmpmem(self, start, end, buf):
"""
Compare contents of a memory region with a buffer
Args:
- start: start address (Int)
- end: end address (Int)
- buf: raw bytes
Returns:
- dictionary of array of diffed bytes in hex (Dictionary)
{123: [("A", "B"), ("C", "C"))]}
"""
line_len = 32
if end < start:
(start, end) = (end, start)
mem = self.dumpmem(start, end)
if mem is None:
return None
length = min(len(mem), len(buf))
result = {}
lineno = 0
for i in range(length//line_len):
diff = 0
bytes_ = []
for j in range(line_len):
offset = i*line_len+j
bytes_ += [(mem[offset:offset + 1], buf[offset:offset + 1])]
if mem[offset] != buf[offset]:
diff = 1
if diff == 1:
result[start+lineno] = bytes_
lineno += line_len
bytes_ = []
diff = 0
for i in range(length % line_len):
offset = lineno+i
bytes_ += [(mem[offset:offset + 1], buf[offset:offset + 1])]
if mem[offset] != buf[offset]:
diff = 1
if diff == 1:
result[start+lineno] = bytes_
return result
def xormem(self, start, end, key):
"""
XOR a memory region with a key
Args:
- start: start address (Int)
- end: end address (Int)
- key: XOR key (String)
Returns:
- xored memory content (raw bytes)
"""
mem = self.dumpmem(start, end)
if mem is None:
return None
if to_int(key) != None:
key = hex2str(to_int(key), self.intsize())
mem = list(bytes_iterator(mem))
for index, char in enumerate(mem):
key_idx = index % len(key)
mem[index] = chr(ord(char) ^ ord(key[key_idx]))
buf = b"".join([to_binary_string(x) for x in mem])
bytes = self.writemem(start, buf)
return buf
def searchmem(self, start, end, search, mem=None):
"""
Search for all instances of a pattern in memory from start to end
Args:
- start: start address (Int)
- end: end address (Int)
- search: string or python regex pattern (String)
- mem: cached mem to not re-read for repeated searches (raw bytes)
Returns:
- list of found result: (address(Int), hex encoded value(String))
"""
result = []
if end < start:
(start, end) = (end, start)
if mem is None:
mem = self.dumpmem(start, end)
if not mem:
return result
if isinstance(search, six.string_types) and search.startswith("0x"):
# hex number
search = search[2:]
if len(search) %2 != 0:
search = "0" + search
search = codecs.decode(search, 'hex')[::-1]
search = re.escape(search)
# Convert search to bytes if is not already
if not isinstance(search, bytes):
search = search.encode('utf-8')
try:
p = re.compile(search)
except:
search = re.escape(search)
p = re.compile(search)
found = list(p.finditer(mem))
for m in found:
index = 1
if m.start() == m.end() and m.lastindex:
index = m.lastindex+1
for i in range(0,index):
if m.start(i) != m.end(i):
result += [(start + m.start(i), codecs.encode(mem[m.start(i):m.end(i)], 'hex'))]
return result
def searchmem_by_range(self, mapname, search):
"""
Search for all instances of a pattern in virtual memory ranges
Args:
- search: string or python regex pattern (String)
- mapname: name of virtual memory range (String)
Returns:
- list of found result: (address(Int), hex encoded value(String))
"""
result = []
ranges = self.get_vmmap(mapname)
if ranges:
for (start, end, perm, name) in ranges:
if "r" in perm:
result += self.searchmem(start, end, search)
return result
@memoized
def search_reference(self, search, mapname=None):
"""
Search for all references to a value in memory ranges
Args:
- search: string or python regex pattern (String)
- mapname: name of target virtual memory range (String)
Returns:
- list of found result: (address(int), hex encoded value(String))
"""
maps = self.get_vmmap()
ranges = self.get_vmmap(mapname)
result = []
search_result = []
for (start, end, perm, name) in maps:
if "r" in perm:
search_result += self.searchmem(start, end, search)
for (start, end, perm, name) in ranges:
for (a, v) in search_result:
result += self.searchmem(start, end, to_address(a))
return result
@memoized
def search_address(self, searchfor="stack", belongto="binary"):
"""
Search for all valid addresses in memory ranges
Args:
- searchfor: memory region to search for addresses (String)
- belongto: memory region that target addresses belong to (String)
Returns:
- list of found result: (address(Int), value(Int))
"""
result = []
maps = self.get_vmmap()
if maps is None:
return result
searchfor_ranges = self.get_vmmap(searchfor)
belongto_ranges = self.get_vmmap(belongto)
step = self.intsize()
for (start, end, _, _) in searchfor_ranges[::-1]: # dirty trick, to search in rw-p mem first
mem = self.dumpmem(start, end)
if not mem:
continue
for i in range(0, len(mem), step):
search = "0x" + codecs.encode(mem[i:i+step][::-1], 'hex').decode('utf-8')
addr = to_int(search)
if self.is_address(addr, belongto_ranges):
result += [(start+i, addr)]
return result
@memoized
def search_pointer(self, searchfor="stack", belongto="binary"):
"""
Search for all valid pointers in memory ranges
Args:
- searchfor: memory region to search for pointers (String)
- belongto: memory region that pointed addresses belong to (String)
Returns:
- list of found result: (address(Int), value(Int))
"""
search_result = []
result = []
maps = self.get_vmmap()
searchfor_ranges = self.get_vmmap(searchfor)
belongto_ranges = self.get_vmmap(belongto)
step = self.intsize()
for (start, end, _, _) in searchfor_ranges[::-1]:
mem = self.dumpmem(start, end)
if not mem:
continue
for i in range(0, len(mem), step):
search = "0x" + codecs.encode(mem[i:i+step][::-1], 'hex').decode('utf-8')
addr = to_int(search)
if self.is_address(addr):
(v, t, vn) = self.examine_mem_value(addr)
if t != | |
#! /usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
import os, sys
import gzip
import json
import argparse
from uuid import UUID
import time, calendar
from datetime import datetime
import socket, struct
import pdb
import re
import logging
VERSION = '1.0'
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def dec_to_ip(number, ver=4):
if ver == 4:
return socket.inet_ntoa(struct.pack('!L', number))
else:
return socket.inet_ntop(socket.AF_INET6,
struct.pack('!QQ', number>>64, number & ((1 << 64) - 1)))
def is_valid_uuid(id):
try:
obj = UUID(str(id))
except:
return False
return str(id)
def ts_to_string(ts):
# timestampe to string "%Y-%m-%dT%H:%M:%S.%f"
if len(str(ts)) == 16:
return datetime.utcfromtimestamp(int(ts)/1000000.0).isoformat()
elif len(str(ts)) == 13:
return datetime.utcfromtimestamp(int(ts)/1000.0).isoformat()
else:
return datetime.utcfromtimestamp(int(ts)/1.0).isoformat()
def time_type(s, fmt = "%Y-%m-%dT%H:%M:%S"):
times = 1000000
try:
datetime_obj = datetime.strptime(s, fmt)
except:
raise argparse.ArgumentTypeError(
"Wrong time format e.g. 2018-03-19T15:27:01")
timeInSeconds = calendar.timegm(datetime_obj.utctimetuple())
return timeInSeconds * times
def dict_compare(d1, d2):
(d1_only, d2_only, modified) = ({}, {}, {})
for k, v in d1.items():
if k not in d2:
d1_only[k] = v
elif v != d2[k]:
modified[k] = (v, d2[k])
for k, v in d2.items():
if k not in d1:
d2_only[k] = v
return (d1_only, d2_only, modified)
def dict_print(d, offset='', step=0):
indent = ' ' * 3
for key, value in d.items():
if isinstance(value, dict):
print(offset + indent * step + str(key) + ':')
dict_print(value, offset, step+1)
else:
print(offset + indent * step + str(key) + ': ' + str(value))
def pretty_print(d, offset='', step=0):
indent = ' ' * 3
if isinstance(d, dict):
for key, value in d.items():
if (isinstance(value, dict) or isinstance(value, list)
or isjson(value)):
print(offset + indent * step + str(key) + ':')
pretty_print(value, offset, step+1)
else:
print(offset + indent * step + str(key) + ': ' + str(value))
elif isinstance(d, list):
if len(d):
if (isinstance(d[0], dict) or isinstance(d[0], list)
or isjson(d[0])):
for e in d:
pretty_print(e, offset, step+1)
else:
print(offset + indent * step + ':'.join(str(e) for e in d))
elif isjson(d):
pretty_print(json.loads(d), offset, step+1)
else:
print(offset + indent * step + str(d))
def is_in(key, string):
assert (isinstance(key, list) or isinstance(key, set)), "expect a list"
for i in key:
if i in string:
return True
return False
def isjson(j):
if not(isinstance(j, str) or isinstance(j, unicode)):
return False
if len(j) > 1 and (j[0] + j[-1] == '{}' or j[0] + j[-1] == '[]'):
try:
json.loads(j)
except:
return False
return True
return False
class DB():
""" Class to load config db json file into a dictionary """
def __init__(self, from_json = True, **kwargs):
self._logger = logging.getLogger(__name__)
if from_json:
self.zk = {}
self.cassandra = {}
json_file = kwargs.get('json_file', "db-dump.json")
self._load_db(json_file)
# else connect to cassandra and zookeep - TBD
def _load_db (self, json_file):
self.zk = {}
self.cassandra = {}
db_contents = {}
self._logger.info('Loading %s ...', json_file)
if json_file.endswith('.gz'):
try:
f = gzip.open(json_file, 'rb')
db_contents = json.loads(f.read())
finally:
f.close()
else:
with open(json_file, 'r') as f:
db_contents = json.loads(f.read())
self._logger.info('Loading %s completed', json_file)
self.cassandra = db_contents['cassandra']
for e in json.loads(db_contents['zookeeper']):
self.zk[e[0]] = e[1]
class DB_comp():
""" Class to compare two DB """
KEYSPACES = ['config_db_uuid',
'useragent',
'to_bgp_keyspace',
'svc_monitor_keyspace',
'dm_keyspace']
ZK_PATHS = ['/fq-name-to-uuid/',
'/api-server/subnets/',
'/id/virtual-networks/',
'/id/bgp/route-targets/',
'/id/security-groups/id/',
'/id/bgpaas/port/',
'/vnc_api_server_obj_create/',
'other_paths']
def __init__(self, parser, old_json, new_json):
self._logger = logging.getLogger(__name__)
self._build_parser(parser)
self.json_files = [ old_json, new_json ]
self.diff = {}
def _load_db(self):
self.old_db = DB(json_file = self.json_files[0])
self.new_db = DB(json_file = self.json_files[1])
def _build_parser(self, parser):
parser.add_argument(
'new_json_file',
help="json file to be compared")
parser.add_argument(
'scope',
choices = self.KEYSPACES + ['zookeeper', 'object', 'all'],
default = 'all',
help="Scope to be compared")
parser.add_argument(
"objects", type=is_valid_uuid, nargs= '*',
help="object uuid to be compared. ")
parser.add_argument(
"-d", "--detail", help="Display details",
action='store_true', default=False)
parser.set_defaults(func=self.db_compare)
def db_compare(self, args):
self._load_db()
if args.scope == 'all':
for s in self.KEYSPACES:
self._compare_cassandra(s)
self._compare_zk()
elif args.scope == 'zookeeper':
self._compare_zk()
elif args.scope in self.KEYSPACES:
self._compare_cassandra(args.scope)
elif args.scope == 'object':
if args.objects == []:
# comparing config_db_uuid since no object id is provided
self._compare_cassandra('config_db_uuid')
else:
self._compare_object(args.objects)
self.print_diff(args.detail)
def _compare_cassandra (self, ks):
self.diff[ks] = {}
old_ks = self.old_db.cassandra[ks]
new_ks = self.new_db.cassandra[ks]
old_cfs = set(old_ks.keys())
new_cfs = set(old_ks.keys())
if new_cfs != old_cfs:
self._logger.warning('CFs were added/removed in %s', ks)
self._logger.warning('\tAdded CF: %s',
str([ s for s in new_cfs - old_cfs]))
self._logger.warning('\tRemoved CF: %s',
str([ s for s in old_cfs - new_cfs]))
for cf in old_cfs.intersection(new_cfs):
diff_cf = self.diff[ks][cf] = {'removed': {},
'added': {},
'modified': {}}
if cf == 'obj_fq_name_table' or cf == 'obj_shared_table':
for type, objs in old_ks[cf].items():
if type not in new_ks[cf]:
for name, value in objs.items():
diff_cf['removed'][type+':'+ name] = value
else:
(removed, added, modified) = \
dict_compare(objs, new_ks[cf][type])
for key, value in removed.items():
diff_cf['removed'][type+':'+key] = value
for key, value in added.items():
diff_cf['added'][type+':'+key] = value
for key, value in modified.items():
diff_cf['modified'][type+':'+key] = value
for type, objs in new_ks[cf].items():
if type not in old_ks[cf]:
for name, value in objs.items():
diff_cf['added'][type+':'+ name] = value
else:
(diff_cf['removed'], diff_cf['added'], diff_cf['modified']) = \
dict_compare(old_ks[cf], new_ks[cf])
def _compare_zk(self):
diff_zk = self.diff['zookeeper'] = {}
for type in self.ZK_PATHS:
diff_zk[type] = {'removed': {},
'added': {},
'modified': {}}
diff = {}
(diff['removed'], diff['added'], diff['modified']) = \
dict_compare(self.old_db.zk, self.new_db.zk)
def zk_type(path):
for type in self.ZK_PATHS:
if path.startswith(type):
return type
return 'other_paths'
for path, value in diff['removed'].items():
diff_zk[zk_type(path)]['removed'][path] = value
for path, value in diff['added'].items():
diff_zk[zk_type(path)]['added'][path] = value
for path, value in diff['modified'].items():
diff_zk[zk_type(path)]['modified'][path] = value
def _compare_object(self, obj_ids):
old_db = self.old_db.cassandra
new_db = self.new_db.cassandra
for id in obj_ids:
try:
old_obj = old_db['config_db_uuid']['obj_uuid_table'][id]
new_obj = new_db['config_db_uuid']['obj_uuid_table'][id]
except KeyError:
self._logger.warning('%s not found in obj_uuid_table', id)
continue
self.print_obj_diff(id, old_obj, new_obj)
def print_diff(self, detail):
indent = ' ' * 3
for name, result in self.diff.items():
if name in self.KEYSPACES:
print "keyspace: %s" % (name)
else:
print "zookeeper:"
for tbl_name, tbl in result.items():
removed = 0 if 'removed' not in tbl else len(tbl['removed'])
added = 0 if 'added' not in tbl else len(tbl['added'])
modified = 0 if 'modified' not in tbl else len(tbl['modified'])
print "%s%s (removed[-]: %d, added[+]: %d, modified[*]: %d)" \
% (indent, tbl_name, removed, added, modified)
if detail:
if removed:
for k in tbl['removed'].keys():
print "%s- %s" % (2*indent, str(k))
if added:
for k in tbl['added'].keys():
print "%s+ %s" % (2*indent, str(k))
if modified:
for k in tbl['modified'].keys():
print "%s- %s" % (2*indent, str(k))
def print_obj_diff(self, uuid, old_obj, new_obj):
indent = ' ' * 3
print "%s:" % uuid
for field in sorted(set(old_obj.keys()) | set(new_obj.keys())):
if field in old_obj and field not in new_obj:
mtime = ts_to_string(old_obj[field][1])
DB_show.print_obj_field(field, old_obj[field][0],
mtime=mtime,
detail=True, prefix='-')
elif field not in old_obj and field in new_obj:
mtime = ts_to_string(new_obj[field][1])
DB_show.print_obj_field(field, new_obj[field][0],
mtime=mtime,
detail=True, prefix='+')
elif old_obj[field][0] != new_obj[field][0]:
old_mtime = ts_to_string(old_obj[field][1])
new_mtime = ts_to_string(new_obj[field][1])
DB_show.print_obj_field(field, old_obj[field][0],
mtime=old_mtime,
detail=True, prefix='*[old]')
DB_show.print_obj_field(field, new_obj[field][0],
mtime=new_mtime,
detail=True, prefix='*[new]')
else:
mtime = ts_to_string(old_obj[field][1])
DB_show.print_obj_field(field, old_obj[field][0], detail=True)
class DB_show():
""" Class to show objects in DB tables"""
SUB_COMMAND = {
'config_db': [{
'fqn' : 'obj_fq_name_table',
'uuid' : 'obj_uuid_table',
'shared_obj': 'obj_shared_table'},
'config_db_uuid'],
'useragent': [{
'kv' : 'useragent_keyval_table'},
'useragent'],
'to_bgp': [ {
'sc_uuid': 'service_chain_uuid_table',
'sc_ip' : 'service_chain_ip_address_table',
'sc' : 'service_chain_table',
'rt' : 'route_target_table'},
'to_bgp_keyspace'],
'svc_mon': [ {
'si': 'service_instance_table',
'lb': 'loadbalancer_table',
'pl': 'pool_table'},
'svc_monitor_keyspace'],
'dm': [ {
'pnf': 'dm_pnf_resource_table',
'pr' : 'dm_pr_vn_ip_table'},
'dm_keyspace'],
'zk': [ {}, 'zookeeper' ]
}
OBJ_TYPES = [
'service_appliance_set' , 'virtual_router' , 'security_group' ,
'global_system_config' , 'network_policy' , 'qos_config' ,
'route_table' , 'interface_route_table', 'forwarding_class' ,
'service_appliance' , 'routing_policy' , 'network_ipam' ,
'config_node' , 'namespace' , 'logical_router' ,
'global_qos_config' , 'service_health_check' , 'bgp_router' ,
'domain' , 'service_instance' , 'loadbalancer_member' ,
'virtual_ip' , 'api_access_list' , 'qos_forwarding_class' ,
'discovery_service_assignment', 'project' , 'route_target' ,
'virtual_machine' , 'qos_queue' , 'virtual_machine_interface',
'database_node' , 'analytics_node' , 'floating_ip_pool' ,
'instance_ip' , 'access_control_list' , 'bgp_as_a_service' ,
'global_vrouter_config' , 'loadbalancer_pool' , 'port_tuple' ,
'service_template' , 'routing_instance' , 'virtual_network' ,
'floating_ip',
]
ZK_ENT_PROP = [ 'cZxid',
'mZxid',
'ctime',
'mtime',
'cversion',
'dataVersion',
'aclVersion',
'ephemeralOwner',
'dataLength',
'numChildren',
'pZxid']
def __init__(self, parser, json_file):
self._logger = logging.getLogger(__name__)
self.db_json_file = json_file
self._build_parser(parser)
def _load_db(self):
self.db = DB(json_file = self.db_json_file)
def _build_parser(self, parser):
sub_parsers = parser.add_subparsers()
for cmd in self.SUB_COMMAND.keys():
cmd_parser = sub_parsers.add_parser(cmd,
help='Show %s' % self.SUB_COMMAND[cmd][1])
if self.SUB_COMMAND[cmd][0]:
cmd_parser.add_argument(
'table', choices=self.SUB_COMMAND[cmd][0].keys(),
help=str(self.SUB_COMMAND[cmd][0])
)
cmd_parser.add_argument(
'search', nargs= '*',
help="search string")
cmd_parser.add_argument(
'-t', '--time', action='store_true', default=False,
help="Display creation/modification time")
cmd_parser.add_argument(
'-a', '--after', type=time_type,
default = "2000-01-01T00:00:00",
help = "show objects created after given time " +
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from . import models
class AzureOpcHistoryClientConfiguration(Configuration):
"""Configuration for AzureOpcHistoryClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'http://localhost'
super(AzureOpcHistoryClientConfiguration, self).__init__(base_url)
self.add_user_agent('azureopchistoryclient/{}'.format(VERSION))
self.credentials = credentials
class AzureOpcHistoryClient(object):
"""Azure Industrial IoT OPC UA Historic Access Service
:ivar config: Configuration for client.
:vartype config: AzureOpcHistoryClientConfiguration
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
self.config = AzureOpcHistoryClientConfiguration(credentials, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = 'v2'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def history_delete_values_at_times(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Delete value history at specified times.
Delete value history using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history update request
:type request:
~azure-iiot-opc-history.models.HistoryUpdateRequestApiModelDeleteValuesAtTimesDetailsApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryUpdateResponseApiModel or ClientRawResponse if
raw=true
:rtype: ~azure-iiot-opc-history.models.HistoryUpdateResponseApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_delete_values_at_times.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryUpdateRequestApiModelDeleteValuesAtTimesDetailsApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryUpdateResponseApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_delete_values_at_times.metadata = {'url': '/v2/delete/{endpointId}/values/pick'}
def history_delete_values(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Delete historic values.
Delete historic values using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history update request
:type request:
~azure-iiot-opc-history.models.HistoryUpdateRequestApiModelDeleteValuesDetailsApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryUpdateResponseApiModel or ClientRawResponse if
raw=true
:rtype: ~azure-iiot-opc-history.models.HistoryUpdateResponseApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_delete_values.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryUpdateRequestApiModelDeleteValuesDetailsApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryUpdateResponseApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_delete_values.metadata = {'url': '/v2/delete/{endpointId}/values'}
def history_delete_modified_values(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Delete historic values.
Delete historic values using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history update request
:type request:
~azure-iiot-opc-history.models.HistoryUpdateRequestApiModelDeleteModifiedValuesDetailsApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryUpdateResponseApiModel or ClientRawResponse if
raw=true
:rtype: ~azure-iiot-opc-history.models.HistoryUpdateResponseApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_delete_modified_values.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryUpdateRequestApiModelDeleteModifiedValuesDetailsApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryUpdateResponseApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_delete_modified_values.metadata = {'url': '/v2/delete/{endpointId}/values/modified'}
def history_delete_events(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Delete historic events.
Delete historic events using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history update request
:type request:
~azure-iiot-opc-history.models.HistoryUpdateRequestApiModelDeleteEventsDetailsApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryUpdateResponseApiModel or ClientRawResponse if
raw=true
:rtype: ~azure-iiot-opc-history.models.HistoryUpdateResponseApiModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_delete_events.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryUpdateRequestApiModelDeleteEventsDetailsApiModel')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryUpdateResponseApiModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_delete_events.metadata = {'url': '/v2/delete/{endpointId}/events'}
def history_read_raw(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Read history using json details.
Read node history if available using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history read request
:type request:
~azure-iiot-opc-history.models.HistoryReadRequestApiModelJToken
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryReadResponseApiModelJToken or ClientRawResponse if
raw=true
:rtype:
~azure-iiot-opc-history.models.HistoryReadResponseApiModelJToken or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_read_raw.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request, 'HistoryReadRequestApiModelJToken')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HistoryReadResponseApiModelJToken', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
history_read_raw.metadata = {'url': '/v2/history/read/{endpointId}'}
def history_read_raw_next(
self, endpoint_id, request, custom_headers=None, raw=False, **operation_config):
"""Read next batch of history as json.
Read next batch of node history values using historic access.
The endpoint must be activated and connected and the module client
and server must trust each other.
:param endpoint_id: The identifier of the activated endpoint.
:type endpoint_id: str
:param request: The history read next request
:type request:
~azure-iiot-opc-history.models.HistoryReadNextRequestApiModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HistoryReadNextResponseApiModelJToken or ClientRawResponse if
raw=true
:rtype:
~azure-iiot-opc-history.models.HistoryReadNextResponseApiModelJToken
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.history_read_raw_next.metadata['url']
path_format_arguments = {
'endpointId': self._serialize.url("endpoint_id", endpoint_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = | |
# SIPEC
# MARKUS MARKS
# MODEL ARCHITECTURES
import tensorflow as tf
from tensorflow.keras import regularizers, Model
from tensorflow.keras.applications import (
DenseNet121,
DenseNet201,
ResNet50,
ResNet101,
InceptionResNetV2,
Xception,
NASNetLarge,
InceptionV3,
EfficientNetB2,
EfficientNetB3,
EfficientNetB4,
EfficientNetB5,
EfficientNetB6,
EfficientNetB7,
)
from tensorflow.keras.layers import (
Conv2D,
BatchNormalization,
Flatten,
Dense,
Dropout,
Activation,
TimeDistributed,
LSTM,
Input,
Bidirectional,
MaxPooling2D,
Conv1D,
SpatialDropout1D,
ZeroPadding2D,
concatenate,
GaussianNoise,
Conv2DTranspose,
UpSampling2D,
Reshape,
LeakyReLU,
)
from tensorflow.keras.models import Sequential
from tensorflow.python.keras.applications.efficientnet import EfficientNetB1
def posenet(
input_shape,
num_classes,
backbone="efficientnetb5",
fix_backbone=False,
gaussian_noise=0.05,
features=256,
bias=False,
):
"""Model that implements SIPEC:PoseNet architecture.
This model uses an EfficientNet backbone and deconvolves generated features into landmarks in imagespace.
It operates on single images and can be used in conjuntion with SIPEC:SegNet to perform top-down pose estimation.
Parameters
----------
input_shape : keras compatible input shape (W,H,Channels)
keras compatible input shape (features,)
num_classes : int
Number of joints/landmarks to detect.
backbone : str
Backbone/feature detector to use, default is EfficientNet5. Choose smaller/bigger backbone depending on GPU memory.
gaussian_noise : float
Kernel size of gaussian noise layers to use.
features : int
Number of feature maps to generate at each level.
bias : bool
Use bias for deconvolutional layers.
Returns
-------
keras.model
SIPEC:PoseNet
"""
if backbone == "efficientnetb5":
recognition_model = EfficientNetB5(
include_top=False,
input_shape=input_shape,
pooling=None,
weights="imagenet",
)
elif backbone == "efficientnetb7":
recognition_model = EfficientNetB7(
include_top=False,
input_shape=input_shape,
pooling=None,
weights="imagenet",
)
elif backbone == "efficientnetb1":
recognition_model = EfficientNetB1(
include_top=False,
input_shape=input_shape,
pooling=None,
weights="imagenet",
)
else:
raise NotImplementedError
new_input = Input(
batch_shape=(None, input_shape[0], input_shape[1], input_shape[2])
)
if fix_backbone:
for layer in recognition_model.layers:
layer.trainable = False
x = Conv2D(3, kernel_size=(1, 1), strides=(1, 1))(new_input)
x = recognition_model(x)
for i in range(4):
x = Conv2DTranspose(
features, kernel_size=(2, 2), strides=(2, 2), padding="valid", use_bias=bias
)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = GaussianNoise(gaussian_noise)(x)
x = Conv2DTranspose(
features, kernel_size=(2, 2), strides=(2, 2), padding="valid", use_bias=bias
)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2DTranspose(
num_classes, kernel_size=(1, 1), strides=(1, 1), padding="valid"
)(x)
x = Activation("sigmoid")(x)
model = Model(inputs=new_input, outputs=x)
model.summary()
return model
def classification_scratch(input_shape):
"""
Args:
input_shape:
"""
activation = "tanh"
dropout = 0.3
# conv model
model = Sequential()
model.add(
Conv2D(
64,
kernel_size=(4, 4),
strides=(4, 4),
padding="valid",
input_shape=input_shape,
)
)
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(128, kernel_size=(4, 4), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(128, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(256, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(512, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(64, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(32, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
# model.add(Flatten())
model.add(Dense(4))
model.add(Activation("softmax"))
return model
def classification_large(input_shape):
"""
Args:
input_shape:
"""
activation = "tanh"
dropout = 0.1
# conv model
model = Sequential()
model.add(
Conv2D(
64,
kernel_size=(4, 4),
strides=(4, 4),
padding="valid",
input_shape=input_shape,
)
)
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(128, kernel_size=(4, 4), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(128, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(256, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(512, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(1024, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(64, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(32, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
# model.add(Flatten())
model.add(Dense(4))
model.add(Activation("softmax"))
return model
def classification_small(input_shape, num_classes):
"""
Args:
input_shape:
num_classes:
"""
dropout = 0.33
# conv model
model = Sequential()
model.add(
Conv2D(
64,
kernel_size=(4, 4),
strides=(4, 4),
padding="valid",
input_shape=input_shape,
)
)
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(128, kernel_size=(4, 4), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(256, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Conv2D(512, kernel_size=(2, 2), strides=(2, 2), padding="valid"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(64, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(32, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation("relu"))
# model.add(Dropout(dropout))
# model.add(Flatten())
model.add(Dense(num_classes))
model.add(Activation("softmax"))
return model
def dlc_model_sturman(input_shape, num_classes):
"""Model that implements behavioral classification based on Deeplabcut generated features as in Sturman et al.
Reimplementation of the model used in the publication Sturman et al. that performs action recognition on top of pose estimation
Parameters
----------
input_shape : keras compatible input shape (W,H,Channels)
keras compatible input shape (features,)
num_classes : int
Number of behaviors to classify.
Returns
-------
keras.model
Sturman et al. model
"""
model = Sequential()
model.add(
Dense(
256, input_shape=(input_shape[-1],), kernel_regularizer=regularizers.l2(0.0)
)
)
model.add(Activation("relu"))
model.add(Dropout(0.4))
model.add(Dense(128, kernel_regularizer=regularizers.l2(0.0)))
model.add(Activation("relu"))
model.add(Dropout(0.3))
# TODO: parametrize # behaviors
model.add(Dense(num_classes))
model.add(Activation("softmax"))
return model
def dlc_model(input_shape, num_classes):
"""Model for classification on top of pose estimation.
Classification model for behavior, operating on pose estimation. This model has more free parameters than Sturman et al.
Parameters
----------
input_shape : keras compatible input shape (W,H,Channels)
keras compatible input shape (features,)
num_classes : int
Number of behaviors to classify.
Returns
-------
keras.model
behavior (from pose estimates) model
"""
dropout = 0.3
model = Sequential()
model.add(Dense(256, input_shape=(input_shape[-1],)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
model.add(Dense(64))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(dropout))
# TODO: parametrize # behaviors
model.add(Dense(num_classes))
model.add(Activation("softmax"))
return model
def recurrent_model_old(
recognition_model, recurrent_input_shape, classes=4, recurrent_dropout=None
):
"""
Args:
recognition_model:
recurrent_input_shape:
classes:
recurrent_dropout:
"""
input_sequences = Input(shape=recurrent_input_shape)
sequential_model_helper = TimeDistributed(recognition_model)(input_sequences)
if recurrent_dropout:
# TODO: adjust bidirectional
k = LSTM(units=128, return_sequences=True, recurrent_dropout=recurrent_dropout)(
sequential_model_helper
)
k = LSTM(units=64, return_sequences=True, recurrent_dropout=recurrent_dropout)(
k
)
k = LSTM(units=32, return_sequences=False, recurrent_dropout=recurrent_dropout)(
k
)
else:
# As of TF 2, one can just use LSTM and there is no CuDNNLSTM
k = Bidirectional(LSTM(units=128, return_sequences=True))(
sequential_model_helper
)
k = Bidirectional(LSTM(units=64, return_sequences=True))(k)
k = Bidirectional(LSTM(units=32, return_sequences=False))(k)
dout = 0.3
k = Dense(256)(k)
k = Activation("relu")(k)
k = Dropout(dout)(k)
k = Dense(128)(k)
k = Activation("relu")(k)
k = Dropout(dout)(k)
k = Dense(64)(k)
k = Dropout(dout)(k)
k = Activation("relu")(k)
k = Dense(32)(k)
k = Activation("relu")(k)
# TODO: modelfy me!
k = Dense(classes)(k)
k = Activation("softmax")(k)
sequential_model = Model(inputs=input_sequences, outputs=k)
return sequential_model
def recurrent_model_tcn(
recognition_model,
recurrent_input_shape,
classes=4,
):
"""Recurrent architecture for classification of temporal sequences of images based on temporal convolution architecture (TCN).
This architecture is used for BehaviorNet in SIPEC.
Parameters
----------
recognition_model : keras.model
Pretrained recognition model that extracts features for individual frames.
recurrent_input_shape : np.ndarray - (Time, Width, Height, Channels)
Shape of the images over time.
classes : int
Number of behaviors to recognise.
Returns
-------
keras.model
BehaviorNet
"""
input_sequences = Input(shape=recurrent_input_shape)
sequential_model_helper = TimeDistributed(recognition_model)(input_sequences)
k = BatchNormalization()(sequential_model_helper)
# TODO: config me!
filters = 64
kernel_size = 2
# dout = 0.01
act_fcn = "relu"
k = Conv1D(
filters,
kernel_size=kernel_size,
padding="same",
dilation_rate=1,
kernel_initializer="he_normal",
)(k)
k = BatchNormalization()(k)
# k = Activation(LeakyReLU(alpha=0.3))(k)
# k = Activation(Activation('relu'))(k)
# k = wave_net_activation(k)
k = Activation(act_fcn)(k)
# k = SpatialDropout1D(rate=dout)(k)
k = Conv1D(
filters,
kernel_size=kernel_size,
padding="same",
dilation_rate=2,
kernel_initializer="he_normal",
)(k)
k = BatchNormalization()(k)
# k = Activation(LeakyReLU(alpha=0.3))(k)
# k = Activation(Activation('relu'))(k)
# k = wave_net_activation(k)
k = Activation(act_fcn)(k)
# k = SpatialDropout1D(rate=dout)(k)
k = Conv1D(
filters,
kernel_size=kernel_size,
padding="same",
dilation_rate=4,
kernel_initializer="he_normal",
)(k)
k = BatchNormalization()(k)
# k = Activation(LeakyReLU(alpha=0.3))(k)
# k = Activation(Activation('relu'))(k)
# k = wave_net_activation(k)
k = Activation(act_fcn)(k)
# k = SpatialDropout1D(rate=dout)(k)
k = Conv1D(
1,
kernel_size=1,
padding="same",
dilation_rate=1,
kernel_initializer="he_normal",
)(k)
k = Activation(Activation("relu"))(k)
k = Flatten()(k)
k = Dense(64)(k)
k = Activation(LeakyReLU(alpha=0.3))(k)
# k = Dropout(dout)(k)
k = Dense(32)(k)
k = Activation(LeakyReLU(alpha=0.3))(k)
# k = Dropout(dout)(k)
k = Dense(16)(k)
k = Activation(LeakyReLU(alpha=0.3))(k)
k = Dense(classes)(k)
k = Activation("softmax")(k)
sequential_model = Model(inputs=input_sequences, outputs=k)
return sequential_model
def recurrent_model_lstm(
recognition_model, recurrent_input_shape, classes=4, recurrent_dropout=None
):
"""Recurrent architecture for classification of temporal sequences of images based on LSTMs or GRUs.
This architecture is used for IdNet in SIPEC.
Parameters
----------
recognition_model : keras.model
Pretrained recognition model that extracts features for individual frames.
recurrent_input_shape : np.ndarray - (Time, Width, Height, Channels)
Shape of the images over time.
classes : int
Number of behaviors to recognise.
recurrent_dropout : float
Recurrent dropout factor to use.
Returns
-------
keras.model
IdNet
"""
input_sequences = Input(shape=recurrent_input_shape)
sequential_model_helper = TimeDistributed(recognition_model)(input_sequences)
k = BatchNormalization()(sequential_model_helper)
dout = 0.2
if recurrent_dropout:
# TODO: adjust bidirectional
k = LSTM(units=128, return_sequences=True, recurrent_dropout=recurrent_dropout)(
k
)
k = LSTM(units=64, return_sequences=True, recurrent_dropout=recurrent_dropout)(
k
)
k = LSTM(units=32, return_sequences=False, recurrent_dropout=recurrent_dropout)(
k
)
else:
# As of TF 2, one can just use LSTM and there is no CuDNNGRU
k = Bidirectional(GRU(units=128, return_sequences=True))(k)
k = Activation(LeakyReLU(alpha=0.3))(k)
k = Bidirectional(GRU(units=64, return_sequences=True))(k)
k = Activation(LeakyReLU(alpha=0.3))(k)
k = Bidirectional(GRU(units=32, return_sequences=False))(k)
k = Activation(LeakyReLU(alpha=0.3))(k)
# k = Dense(256)(k)
# k = Activation('relu')(k)
# | |
muts per sample (combine a single mut afftecting several motifs in the same sample; groupBy mut_pos and sample_id)
#min_indiv_score_to_consider = mean_and_sd_from_the_simulations_indiv_sites_dict['mean']#+(1.96*mean_and_sd_from_the_simulations_indiv_sites_dict['std'])
#if min_indiv_score_to_consider ==0:
min_indiv_score_to_consider = 0#1.47 #:avg of single motifs genomewide (sanger sim) (FIX: make this dynamic)
if 'mean' in mean_and_sd_from_the_simulations_indiv_sites_dict.keys():
if mean_and_sd_from_the_simulations_indiv_sites_dict['mean'] >= 0 and mean_and_sd_from_the_simulations_indiv_sites_dict['mean']!='':
min_indiv_score_to_consider = mean_and_sd_from_the_simulations_indiv_sites_dict['mean']
#use the avg score dist from the given simulation to filterout muts with a lower score
if not use_estimates_from_simulation_set:
min_indiv_score_to_consider = 0
awk_stm = """awk 'BEGIN{FS=OFS="\t"}{""" + skip_line + awk_cond_st + """print $1,$2,$3,$4">"$5,$6,$9,$10":"$11"-"$12"#"$13"#"$15"#"$16"#"$17"#"$44,$26,$27,$44}' """ + annotated_mutations_final_output_file_ranked
awk_stm += " | sort -k1,1 -k2,2n -k3,3n -k6,6 -k5,5 " + """ | groupBy -g 1,2,3,6 -c 4,5,7,8,9,10 -o distinct,distinct,distinct,distinct,distinct,mean | awk 'BEGIN{FS=OFS="\t"}{gsub(",","|"); if($10>""" + str(min_indiv_score_to_consider) + """) print}' | sort -k1,1 -k2,2n -k3,3n """
os.system(awk_stm + " > " + annotated_mutations_final_output_file_scored_merged+"_unifiedpersample")
awk_stm=""
elements_muts_temp = '.'.join(elements_file.split('/')[-1].split('.')[0:-1])+'_'+annotated_mutations_final_output_file_scored_merged+"_unifiedpersample_temp"
if not only_unify_per_sample:
if elements_file!="none" and os.path.exists(elements_file):
if os.path.exists(annotated_mutations_final_output_file_scored_merged+"_unifiedpersample"):
if os.stat(annotated_mutations_final_output_file_scored_merged+"_unifiedpersample").st_size > 2:
number_cols_in_elements_file = 4
with open(elements_file, 'r') as elements_file_read:#get number of lines from the first line of the elements file; only the first four cols (pos, name) are used here
number_cols_in_elements_file = len(elements_file_read.readline().strip().split('\t'))
muts_unified_per_sample_obj = BedTool(annotated_mutations_final_output_file_scored_merged+"_unifiedpersample")
elements_file_obj = BedTool(elements_file)
elements_file_obj.intersect(muts_unified_per_sample_obj, wo =True, split=bed12_format_bool).saveas(elements_muts_temp)
#combine muts cross samples and blocks of the given element (sum of scores)
awk_stm = ("cut -f1-4," + str(number_cols_in_elements_file+1) + "- " + elements_muts_temp + """ | awk 'BEGIN{FS=OFS="\t"}{print $1,$2,$3,$4,$8,$9,$10,$5":"$6"-"$7"@"$8"@"$9"@"$10"@"$11"@"$12"@"$13"@"$14,$12,$13,$14}' | sort -k1,1 -k2,2n -k3,3n -k4,4 | groupBy -g 1,2,3,4 -c 5,5,6,7,8,9,10,11 -o collapse,count,collapse,distinct,collapse,distinct,distinct,sum""")
#Input: element_chr, element_start,element_end, elementID, mut_chr, start, end, sampleID, refalt, tumorType, info, chromatinDomain, repliDomain, score
else:
print('no muts have a score larger than: ' + str(min_indiv_score_to_consider))
#intermediate (prepared for groupBy): element_chr, element_start,element_end, elementID, sampleID, tumorType, chromatin_label, repliLabel, score, mut_chr:start-end@sampleID@refalt@tumorType@info@chromatinDomain@repliDomain@score
#final statement: element_chr, element_start,element_end, elementID, sampleID, sample_count, refToalt, tumorTypes, chromatin_label, repliLabel, sum_score, mut_chr:start-end@sampleID@refalt@tumorType@info@chromatinDomain@repliDomain@score
else:
#final statement: m_chr, m_start, m_end, mID, sampleID, sample_count, refToalt, tumorTypes, motifs_info, chromatin_label, repliLabel, scores, sum_score
awk_stm = """mergeBed -i """ + annotated_mutations_final_output_file_scored_merged+"_unifiedpersample" + """ -d """ + str(window_overlap_to_merge) + """ -c 4,4,5,6,7,8,9,10,10 -o collapse,count,collapse,collapse,collapse,distinct,distinct,collapse,sum | awk 'BEGIN{FS=OFS="\t"}{split($6,nucl_split,">"); if(length(nucl_split[1])==length(nucl_split[2]) && ($3-$2+1 > length(nucl_split[1])) && nucl_split[1]!="-" && nucl_split[2]!="-"){$2=$2+1; $3=$3-1;} print $1,$2,$3,$1":"$2"-"$3,$4,$5,$6,$7,$8,$9,$10,$11,$12}'""" #mergeBed adds one base to the start pos and one to the end, so they have to be extracted again
#print awk_stm + " > " + annotated_mutations_final_output_file_scored_merged
os.system(awk_stm + " > " + annotated_mutations_final_output_file_scored_merged)
if os.path.exists(elements_muts_temp):
os.remove(elements_muts_temp)
if os.path.exists(annotated_mutations_final_output_file_scored_merged+"_unifiedpersample"):
os.remove(annotated_mutations_final_output_file_scored_merged+"_unifiedpersample")
return annotated_mutations_final_output_file_scored_merged
def process_annotated_scored_mutations_rankedMutsbyPercentile(annotated_mutations_final_output_file_ranked, annotated_mutations_final_output_file_scored_merged, window_overlap_to_merge = 20):
col_names_to_report = ['MutChr', 'MutStart', 'MutStop', 'Ref', 'Alt', 'Cancer-project', 'Mut_score', 'vcf_info', 'sample_file_id',
'MotifChr', 'MotifStart', 'MotifEnd', 'Motif_name', 'Motif_score', 'Motif_strand', 'Entropy_diff', 'MutMotif_position',
'normal_expression_level_of_this_motif', 'tumor_expression_level_of_this_motif',
'same_factor_overlaps', 'all_factors_same_cell_overlaps', 'dnase_overlap', 'contactingdomain_overlap', 'loopdomain_overlap', 'cage_overlap',
'chromhmm_overlap_to_report', 'replicationtiming_label_overlap_to_report','WAScore', 'rank']
col_numbers_to_report = []
col_number_for_filtering = 63
filter_value = 0.95
for c in col_names_to_report:
if c in col_names:
#get col number for awk
col_numbers_to_report.append('$' + str(col_names.index(c)+1))
else:
del col_names_to_report[col_names_to_report.index(c)]
col_names_to_sortBy = ['MutChr', 'MutStart', 'MutStop', 'Cancer-project', 'Mut_score', 'vcf_info', 'sample_file_id']
col_numbers_to_sortBy = []
numeric_columns_sortBy = ['MutStart', 'MutStop']
for c in col_names_to_sortBy:
if c in col_names:
#get col number for awk
if c in numeric_columns_sortBy:
col_numbers_to_sortBy.append('-k' + str(col_names.index(c)+1) + ',' + str(col_names.index(c)+1)+'n')
else:
col_numbers_to_sortBy.append('-k' + str(col_names.index(c)+1) + ',' + str(col_names.index(c)+1))
else:
del col_names_to_sortBy[col_names_to_sortBy.index(c)]
awk_stm = ("""awk 'BEGIN{FS=OFS="\t"}{if($""" + str(col_number_for_filtering) + ' >= ' + str(filter_value) + """){gsub(",","+"); print """ + ','.join(col_numbers_to_report[0:3]) + "," + col_numbers_to_report[8] + ',' + '"@"'.join(col_numbers_to_report[0:9]) + "," + col_numbers_to_report[12] + ',' + col_numbers_to_report[-2] + ',' + col_numbers_to_report[-1] + ',' + '"@"'.join(col_numbers_to_report[9::]) + """}}' """ + annotated_mutations_final_output_file_ranked +
" | sort -k1,1 -k2,2n -k3,3n -k4,4 " +
"""| groupBy -g 1,2,3,4 -c 5,6,7,8,9 -o distinct,collapse,max,max,collapse """ + #report each mutaton only once
" | sort -k1,1 -k2,2n -k3,3n | " + #merge muts withing a window
" mergeBed -d " + str(window_overlap_to_merge) + " -c 4,4,5,6,7,8,9 -o count,distinct,collapse,collapse,sum,sum,collapse | sort -k4,4nr | " +
""" awk 'BEGIN{FS=OFS="\t"}{if($2!=$3){$2=$2+1; $3=$3-1;} print}' > """ + #mergeBed adds one base to the start pos and one to the end, so they have to be extracted again
annotated_mutations_final_output_file_scored_merged)
os.system(awk_stm)
def get_mean_and_sd_from_simulation_sets(scored_simulation_input_file, mean_and_sd_output_file, score_index_simulation_file=-1):
mean_std_median_dict = {}
if os.path.exists(mean_and_sd_output_file):
print("reading mean and sd from an existsing fiel: " + mean_and_sd_output_file)
with open(mean_and_sd_output_file, 'r') as mean_and_sd_read:
lines = mean_and_sd_read.readlines()
for l in lines:
if len(l.split('\t'))==2:
mean_std_median_dict[l.strip().split('\t')[0]] = float(l.strip().split('\t')[1])
else:
scores_from_simulations = []
with open(scored_simulation_input_file, 'r') as scored_simulation_read:
l = scored_simulation_read.readline()
while l!="":
scores_from_simulations.append(float(l.split('\t')[score_index_simulation_file].strip()))
l = scored_simulation_read.readline()
mean_std_median_dict['mean'] = np.mean(scores_from_simulations)
mean_std_median_dict['std'] = np.std(scores_from_simulations)
mean_std_median_dict['median'] = np.median(scores_from_simulations)
if mean_and_sd_output_file!='none':
with open(mean_and_sd_output_file, 'w') as mean_and_sd_write:
for k in mean_std_median_dict.keys():
mean_and_sd_write.write(k + '\t' + str(mean_std_median_dict[k]) + '\n')
return mean_std_median_dict
def get_pvalues_per_element_score(annotated_mutations_statcalc_output_file, annotated_mutations_statcalc_output_file_scores_significance, mean_and_sd_from_the_simulations, element_score_index_obs_file=-3):
if 'mean' in mean_and_sd_from_the_simulations.keys() and 'std' in mean_and_sd_from_the_simulations.keys():
with open(annotated_mutations_statcalc_output_file, 'r') as annotated_mutations_statcalc_read:
with open(annotated_mutations_statcalc_output_file_scores_significance, 'w') as annotated_mutations_statcalc_output_file_scores_significance_write:
read_line = annotated_mutations_statcalc_read.readline()
while read_line != "":
score_current_line = float(read_line.split('\t')[element_score_index_obs_file].strip())
z_score = (score_current_line - mean_and_sd_from_the_simulations['mean'])/mean_and_sd_from_the_simulations['std']
p_value = stats.norm.sf(z_score)
annotated_mutations_statcalc_output_file_scores_significance_write.write(read_line.strip() + '\t' + str(p_value) + '\n')
read_line = annotated_mutations_statcalc_read.readline()
else:
print("mean or std is not provided in the given file")
return annotated_mutations_statcalc_output_file_scores_significance
def get_matching_cells_tracks_per_tumor(list_of_cell_tracks, tumor_cells_dict):
tumor_cells_tracks_dict = {}
for tumor in tumor_cells_dict:
for cell_track in list_of_cell_tracks:
if cell_track.split('#')[0] in tumor_cells_dict[tumor]:
if tumor not in tumor_cells_tracks_dict:
tumor_cells_tracks_dict[tumor] = []
tumor_cells_tracks_dict[tumor].append(cell_track)
return tumor_cells_tracks_dict
def get_params(params_list):
params = {}
for arg in params_list:#priority is for the command line
if '=' in arg:
if len(arg.strip().split('='))==2:
if arg.split('=')[0] not in params.keys():
params[arg.strip().split('=')[0]] = arg.strip().split('=')[1]
if 'param_file' in params:
with open(params['param_file'], 'r') as params_infile:
params_from_file = params_infile.readlines()
for line in params_from_file:
if not line.startswith('//') and not line.startswith('#') and '=' in line:
if len(line.strip().split('='))==2:
if line.strip().split('=')[0] not in params.keys():
params[line.strip().split('=')[0]] = line.strip().split('=')[1].split('#')[0].split('//')[0]
return params
def get_value(str):
if 'true' in str.lower() or 'yes' in str.lower():
return True
else:
return False
if __name__ == '__main__':
params = get_params(sys.argv)
if len(params.keys())==0:
usage()
print("Given params", params)
run_training = get_value(params['run_training_arg'])
take_abs_entropy_diff = get_value(params['take_abs_entropy_diff_arg'])
use_gene_expression_for_scoring = get_value(params['use_gene_expression_for_scoring_arg'])
header = get_value(params['header_param'])
rank_scores = get_value(params['rank_scores_param'])
compute_significance = get_value(params['compute_significance_param'])
compute_score_sig = get_value(params['compute_score_sig_param'])
run_in_parallele = get_value(params['run_in_parallele_param'])
remove_temp_files = get_value(params['remove_temp_files'])
use_estimates_from_simulation_set = get_value(params['use_estimates_from_simulation_set'])
only_unify_per_sample = False
retrieve_estimates_from_simulation_set = get_value(params['retrieve_estimates_from_simulation_set'])
bed12_format_bool = get_value(params['bed12_format_bool'])
col_names = get_col_names()
#if not run_training:
# col_names.append("final_mut_motif_tracks_score")
weights_per_param_dict = {}
if not run_training:
if os.path.exists(params['weights_per_param_dict_arg_file']):
with open(params['weights_per_param_dict_arg_file'], 'r') as weights_per_param_dict_arg_infile:
param_weights_from_input_file = weights_per_param_dict_arg_infile.readline().strip().split(',')
for element in param_weights_from_input_file:
if len(element.split('='))==2:
if element.split('=')[0] not in weights_per_param_dict.keys():
weights_per_param_dict[element.strip().split('=')[0]] = float(element.strip().split('=')[1])
list_of_file_tracks_to_add_cell_tracks = []
list_of_file_tracks_to_add_cell_tracks.append(params['ContactingDomains_file_path'])
list_of_file_tracks_to_add_cell_tracks.append(params['LoopDomains_file_path'])
list_of_file_tracks_to_add_cell_tracks.append(params['ReplicationDomains_file_path'])
list_of_file_tracks_to_add_cell_tracks.append(params['CAGE_expr_file_path'])
Mutations_dir_list = []#if a directory is given then list its content otherwise put the file in the list
if os.path.isdir(params['mutations_dir']):
for f in os.listdir(params['mutations_dir']):
Mutations_dir_list.append(params['mutations_dir']+"/"+f)
elif os.path.isfile(params['mutations_dir']):#in such case mutations_dir is not a dir but a file
Mutations_dir_list = [params['mutations_dir']]
mean_and_sd_from_the_simulations_indiv_sites_dict = {'mean': 0, 'std': 0, 'median': 0}
if not os.path.exists(params['annotated_mutations_final_output_file']):
if use_estimates_from_simulation_set and not retrieve_estimates_from_simulation_set:
if os.path.exists(params['scored_simulation_input_file']+"_withoutfilterunifiedpersample") and params['mean_and_sd_output_file']!='none':
print('Getting mean and Std from: ' + params['scored_simulation_input_file']+"_withoutfilterunifiedpersample")
mean_and_sd_from_the_simulations_indiv_sites_dict = get_mean_and_sd_from_simulation_sets(params['scored_simulation_input_file']+"_withoutfilterunifiedpersample", params['mean_and_sd_output_file']+"_withoutfilterunifiedpersample", score_index_simulation_file=int(params['score_index_simulation_file']))
else:
print("using the default values for mean and sd (0,0) for single sites beacuse the score_per_site file doesn't exist")
list_of_cell_tracks, list_tf_names_from_tracks, assay_type_cell_tracks_dict = retreive_cell_elment_datasets(params['CellInfo_target_dir'], list_of_file_tracks_to_add_cell_tracks)
tumor_cells_dict = map_cellNames_to_originTypes(params['TumorCellInfo_matches_dict'])
tissue_cells_dict = map_cellNames_to_originTypes(params['TissueCellInfo_matches_dict'])
motifTFName_TFNames_matches_dict = retreive_TFFamilyName_for_motifNames(params['TF_family_matches_file'])
tumor_cells_tracks_dict = get_matching_cells_tracks_per_tumor(list_of_cell_tracks, tumor_cells_dict)
#use list_of_cell_tracks to gather expression level of each TF in each cell
#gene expression values from the normal samples input file
normal_expression_per_cell_per_TF = {}
tf_names_to_extract_gene_expression_for = []#list_tf_names_from_tracks#get names of TFs from the TFFamily file and the dirs contaning ChIP-seq datasets
for k in motifTFName_TFNames_matches_dict.keys():
tf_names_to_extract_gene_expression_for.append(k)
tf_names_to_extract_gene_expression_for = list(set(tf_names_to_extract_gene_expression_for))
if os.path.exists(params['normal_gene_expression_inputfile']) and os.path.exists(params['metadata_samples_normal_gene_expression_file']):
dict_tissue_type_sampleIDs = parse_GTEx_metadafile(params['metadata_samples_normal_gene_expression_file'])
origin_gene_expression_values = get_expression_level_per_originType_per_TF(tf_names_to_extract_gene_expression_for, dict_tissue_type_sampleIDs, params['normal_gene_expression_inputfile'])
print("Getting expression per cell#TF in normal samples")
origin_gene_expression_values_outputfile = params['normal_gene_expression_inputfile'] + "_perCell_perTF"
normal_expression_per_cell_per_TF = get_expression_level_per_cell_per_TF(tf_names_to_extract_gene_expression_for, origin_gene_expression_values, tissue_cells_dict, origin_gene_expression_values_outputfile)
#gene expression values from the tumor samples input file
tumor_expression_per_cell_per_TF = {}
if os.path.exists(params['tumor_gene_expression_inputfile']) and os.path.exists(params['metadata_samples_tumor_gene_expression_file']):
dict_tissue_type_sampleIDs = parse_GTEx_metadafile(params['metadata_samples_tumor_gene_expression_file'])
origin_gene_expression_values = get_expression_level_per_originType_per_TF(tf_names_to_extract_gene_expression_for, dict_tissue_type_sampleIDs, params['tumor_gene_expression_inputfile'])
print("Getting expression per cell#TF in tumor samples")
origin_gene_expression_values_outputfile = params['normal_gene_expression_inputfile'] + "_perCell_perTF"
tumor_expression_per_cell_per_TF = get_expression_level_per_cell_per_TF(tf_names_to_extract_gene_expression_for, origin_gene_expression_values, tumor_cells_dict, origin_gene_expression_values_outputfile)
mutations_motifs_scored_all_chromatin_makrs_all_cells_grouped_annotated_files = []
if not os.path.exists(params['all_chromatin_makrs_all_cells_combined_dir_path']):
dict_cell_lines_info = parse_cellinfodict_to_populate_data(params['CellInfo_dict_file'], cell_names_start_with="#")
populate_cellinfo_dirs(dict_cell_lines_info, params['CellInfo_target_dir'])
os.mkdir(params['all_chromatin_makrs_all_cells_combined_dir_path'])
print("Generating chromatin data for all the cells")
os.system("cat " + params['CellInfo_target_dir'] + "/*/ChIP-seq/*ChIP-seq.bed4 " | |
KEYBDINPUT, SendInput, WM_KEYDOWN, and WM_KEYUP
(0xE8, 0), # Unassigned
# (0xE9-F5, 0), # OEM specific
(0xF6, 0), # Attn key
(0xF7, 0), # CrSel key
(0xF8, 0), # ExSel key
(0xF9, 222), # Erase EOF key
(0xFA, 207), # Play key
(0xFB, 0x174), # Zoom key
(0xFC, 0), # Reserved
(0xFD, 0x19b), # PA1 key
(0xFE, 0x163), # Clear key
(0xFF, 185)
)
MAC_EVENT_CODES = (
# NSLeftMouseDown Quartz.kCGEventLeftMouseDown
(1, ("Key", 0x110, 1, 589825)),
# NSLeftMouseUp Quartz.kCGEventLeftMouseUp
(2, ("Key", 0x110, 0, 589825)),
# NSRightMouseDown Quartz.kCGEventRightMouseDown
(3, ("Key", 0x111, 1, 589826)),
# NSRightMouseUp Quartz.kCGEventRightMouseUp
(4, ("Key", 0x111, 0, 589826)),
(5, (None, 0, 0, 0)), # NSMouseMoved Quartz.kCGEventMouseMoved
(6, (None, 0, 0, 0)), # NSLeftMouseDragged Quartz.kCGEventLeftMouseDragged
# NSRightMouseDragged Quartz.kCGEventRightMouseDragged
(7, (None, 0, 0, 0)),
(8, (None, 0, 0, 0)), # NSMouseEntered
(9, (None, 0, 0, 0)), # NSMouseExited
(10, (None, 0, 0, 0)), # NSKeyDown
(11, (None, 0, 0, 0)), # NSKeyUp
(12, (None, 0, 0, 0)), # NSFlagsChanged
(13, (None, 0, 0, 0)), # NSAppKitDefined
(14, (None, 0, 0, 0)), # NSSystemDefined
(15, (None, 0, 0, 0)), # NSApplicationDefined
(16, (None, 0, 0, 0)), # NSPeriodic
(17, (None, 0, 0, 0)), # NSCursorUpdate
(22, (None, 0, 0, 0)), # NSScrollWheel Quartz.kCGEventScrollWheel
(23, (None, 0, 0, 0)), # NSTabletPoint Quartz.kCGEventTabletPointer
(24, (None, 0, 0, 0)), # NSTabletProximity Quartz.kCGEventTabletProximity
(25, (None, 0, 0, 0)), # NSOtherMouseDown Quartz.kCGEventOtherMouseDown
(25.2, ("Key", 0x112, 1, 589827)), # BTN_MIDDLE
(25.3, ("Key", 0x113, 1, 589828)), # BTN_SIDE
(25.4, ("Key", 0x114, 1, 589829)), # BTN_EXTRA
(26, (None, 0, 0, 0)), # NSOtherMouseUp Quartz.kCGEventOtherMouseUp
(26.2, ("Key", 0x112, 0, 589827)), # BTN_MIDDLE
(26.3, ("Key", 0x113, 0, 589828)), # BTN_SIDE
(26.4, ("Key", 0x114, 0, 589829)), # BTN_EXTRA
(27, (None, 0, 0, 0)), # NSOtherMouseDragged
(29, (None, 0, 0, 0)), # NSEventTypeGesture
(30, (None, 0, 0, 0)), # NSEventTypeMagnify
(31, (None, 0, 0, 0)), # NSEventTypeSwipe
(18, (None, 0, 0, 0)), # NSEventTypeRotate
(19, (None, 0, 0, 0)), # NSEventTypeBeginGesture
(20, (None, 0, 0, 0)), # NSEventTypeEndGesture
(27, (None, 0, 0, 0)), # Quartz.kCGEventOtherMouseDragged
(32, (None, 0, 0, 0)), # NSEventTypeSmartMagnify
(33, (None, 0, 0, 0)), # NSEventTypeQuickLook
(34, (None, 0, 0, 0)), # NSEventTypePressure
)
MAC_KEYS = (
(0x00, 30), # kVK_ANSI_A
(0x01, 31), # kVK_ANSI_S (0x02, 32), # kVK_ANSI_D
(0x03, 33), # kVK_ANSI_F
(0x04, 35), # kVK_ANSI_H
(0x05, 34), # kVK_ANSI_G
(0x06, 44), # kVK_ANSI_Z
(0x07, 45), # kVK_ANSI_X
(0x08, 46), # kVK_ANSI_C
(0x09, 47), # kVK_ANSI_V
(0x0B, 48), # kVK_ANSI_B
(0x0C, 16), # kVK_ANSI_Q
(0x0D, 17), # kVK_ANSI_W
(0x0E, 18), # kVK_ANSI_E
(0x0F, 33), # kVK_ANSI_R
(0x10, 21), # kVK_ANSI_Y
(0x11, 20), # kVK_ANSI_T
(0x12, 2), # kVK_ANSI_1
(0x13, 3), # kVK_ANSI_2
(0x14, 4), # kVK_ANSI_3
(0x15, 5), # kVK_ANSI_4
(0x16, 7), # kVK_ANSI_6
(0x17, 6), # kVK_ANSI_5
(0x18, 13), # kVK_ANSI_Equal
(0x19, 10), # kVK_ANSI_9
(0x1A, 8), # kVK_ANSI_7
(0x1B, 12), # kVK_ANSI_Minus
(0x1C, 9), # kVK_ANSI_8
(0x1D, 11), # kVK_ANSI_0
(0x1E, 27), # kVK_ANSI_RightBracket
(0x1F, 24), # kVK_ANSI_O
(0x20, 22), # kVK_ANSI_U
(0x21, 26), # kVK_ANSI_LeftBracket
(0x22, 23), # kVK_ANSI_I
(0x23, 25), # kVK_ANSI_P
(0x25, 38), # kVK_ANSI_L
(0x26, 36), # kVK_ANSI_J
(0x27, 40), # kVK_ANSI_Quote
(0x28, 37), # kVK_ANSI_K
(0x29, 39), # kVK_ANSI_Semicolon
(0x2A, 43), # kVK_ANSI_Backslash
(0x2B, 51), # kVK_ANSI_Comma
(0x2C, 53), # kVK_ANSI_Slash
(0x2D, 49), # kVK_ANSI_N
(0x2E, 50), # kVK_ANSI_M
(0x2F, 52), # kVK_ANSI_Period
(0x32, 41), # kVK_ANSI_Grave
(0x41, 83), # kVK_ANSI_KeypadDecimal
(0x43, 55), # kVK_ANSI_KeypadMultiply
(0x45, 78), # kVK_ANSI_KeypadPlus
(0x47, 69), # kVK_ANSI_KeypadClear
(0x4B, 98), # kVK_ANSI_KeypadDivide
(0x4C, 96), # kVK_ANSI_KeypadEnter
(0x4E, 74), # kVK_ANSI_KeypadMinus
(0x51, 117), # kVK_ANSI_KeypadEquals
(0x52, 82), # kVK_ANSI_Keypad0
(0x53, 79), # kVK_ANSI_Keypad1
(0x54, 80), # kVK_ANSI_Keypad2
(0x55, 81), # kVK_ANSI_Keypad3
(0x56, 75), # kVK_ANSI_Keypad4
(0x57, 76), # kVK_ANSI_Keypad5
(0x58, 77), # kVK_ANSI_Keypad6
(0x59, 71), # kVK_ANSI_Keypad7
(0x5B, 72), # kVK_ANSI_Keypad8
(0x5C, 73), # kVK_ANSI_Keypad9
(0x24, 28), # kVK_Return
(0x30, 15), # kVK_Tab
(0x31, 57), # kVK_Space
(0x33, 111), # kVK_Delete
(0x35, 1), # kVK_Escape
(0x37, 125), # kVK_Command
(0x38, 42), # kVK_Shift
(0x39, 58), # kVK_CapsLock
(0x3A, 56), # kVK_Option
(0x3B, 29), # kVK_Control
(0x3C, 54), # kVK_RightShift
(0x3D, 100), # kVK_RightOption
(0x3E, 126), # kVK_RightControl
(0x36, 126), # Right Meta
(0x3F, 0x1d0), # kVK_Function
(0x40, 187), # kVK_F17
(0x48, 115), # kVK_VolumeUp
(0x49, 114), # kVK_VolumeDown
(0x4A, 113), # kVK_Mute
(0x4F, 188), # kVK_F18
(0x50, 189), # kVK_F19
(0x5A, 190), # kVK_F20
(0x60, 63), # kVK_F5
(0x61, 64), # kVK_F6
(0x62, 65), # kVK_F7
(0x63, 61), # kVK_F3
(0x64, 66), # kVK_F8
(0x65, 67), # kVK_F9
(0x67, 87), # kVK_F11
(0x69, 183), # kVK_F13
(0x6A, 186), # kVK_F16
(0x6B, 184), # kVK_F14
(0x6D, 68), # kVK_F10
(0x6F, 88), # kVK_F12
(0x71, 185), # kVK_F15
(0x72, 138), # kVK_Help
(0x73, 102), # kVK_Home
(0x74, 104), # kVK_PageUp
(0x75, 111), # kVK_ForwardDelete
(0x76, 62), # kVK_F4
(0x77, 107), # kVK_End
(0x78, 60), # kVK_F2
(0x79, 109), # kVK_PageDown
(0x7A, 59), # kVK_F1
(0x7B, 105), # kVK_LeftArrow
(0x7C, 106), # kVK_RightArrow
(0x7D, 108), # kVK_DownArrow
(0x7E, 103), # kVK_UpArrow
(0x0A, 170), # kVK_ISO_Section
(0x5D, 124), # kVK_JIS_Yen
(0x5E, 92), # kVK_JIS_Underscore
(0x5F, 95), # kVK_JIS_KeypadComma
(0x66, 94), # kVK_JIS_Eisu
(0x68, 90) # kVK_JIS_Kana
)
# We have yet to support force feedback but probably should
# eventually:
FORCE_FEEDBACK = () # Motor in gamepad
FORCE_FEEDBACK_STATUS = () # Status of motor
POWER = () # Power switch
# These two are internal workings of evdev we probably will never care
# about.
MAX = ()
CURRENT = ()
EVENT_MAP = (
('types', EVENT_TYPES),
('type_codes', ((value, key) for key, value in EVENT_TYPES)),
('wincodes', WINCODES),
('specials', SPECIAL_DEVICES),
('xpad', XINPUT_MAPPING),
('Sync', SYNCHRONIZATION_EVENTS),
('Key', KEYS_AND_BUTTONS),
('Relative', RELATIVE_AXES),
('Absolute', ABSOLUTE_AXES),
('Misc', MISC_EVENTS),
('Switch', SWITCH_EVENTS),
('LED', LEDS),
('LED_type_codes', LED_TYPE_CODES),
('Sound', SOUNDS),
('Repeat', AUTOREPEAT_VALUES),
('ForceFeedback', FORCE_FEEDBACK),
('Power', POWER),
('ForceFeedbackStatus', FORCE_FEEDBACK_STATUS),
('Max', MAX),
('Current', CURRENT))
# Evdev style paths for the Mac
APPKIT_KB_PATH = "/dev/input/by-id/usb-AppKit_Keyboard-event-kbd"
QUARTZ_MOUSE_PATH = "/dev/input/by-id/usb-Quartz_Mouse-event-mouse"
APPKIT_MOUSE_PATH = "/dev/input/by-id/usb-AppKit_Mouse-event-mouse"
# Now comes all the structs we need to parse the infomation coming
# from Windows.
class KBDLLHookStruct(ctypes.Structure):
"""Contains information about a low-level keyboard input event.
For full details see Microsoft's documentation:
https://msdn.microsoft.com/en-us/library/windows/desktop/
ms644967%28v=vs.85%29.aspx
"""
# pylint: disable=too-few-public-methods
_fields_ = [("vk_code", DWORD),
("scan_code", DWORD),
("flags", DWORD),
("time", ctypes.c_int)]
class MSLLHookStruct(ctypes.Structure):
"""Contains information about a low-level mouse input event.
For full details see Microsoft's documentation:
https://msdn.microsoft.com/en-us/library/windows/desktop/
ms644970%28v=vs.85%29.aspx
"""
# pylint: disable=too-few-public-methods
_fields_ = [("x_pos", ctypes.c_long),
("y_pos", ctypes.c_long),
('reserved', ctypes.c_short),
('mousedata', ctypes.c_short),
("flags", DWORD),
("time", DWORD),
("extrainfo", ctypes.c_ulong)]
class XinputGamepad(ctypes.Structure):
"""Describes the current state of the Xbox 360 Controller.
For full details see Microsoft's documentation:
https://msdn.microsoft.com/en-us/library/windows/desktop/
microsoft.directx_sdk.reference.xinput_gamepad%28v=vs.85%29.aspx
"""
# pylint: disable=too-few-public-methods
_fields_ = [
('buttons', ctypes.c_ushort), # wButtons
('left_trigger', ctypes.c_ubyte), # bLeftTrigger
('right_trigger', ctypes.c_ubyte), # bLeftTrigger
('l_thumb_x', ctypes.c_short), # sThumbLX
('l_thumb_y', ctypes.c_short), # sThumbLY
('r_thumb_x', ctypes.c_short), # sThumbRx
('r_thumb_y', ctypes.c_short), # sThumbRy
]
class XinputState(ctypes.Structure):
"""Represents the state of a controller.
For full details see Microsoft's documentation:
https://msdn.microsoft.com/en-us/library/windows/desktop/
microsoft.directx_sdk.reference.xinput_state%28v=vs.85%29.aspx
"""
# pylint: disable=too-few-public-methods
_fields_ = [
('packet_number', ctypes.c_ulong), # dwPacketNumber
('gamepad', XinputGamepad), # Gamepad
]
class XinputVibration(ctypes.Structure):
"""Specifies motor speed levels for the vibration function of a
controller.
For full details see Microsoft's documentation:
https://msdn.microsoft.com/en-us/library/windows/desktop/
microsoft.directx_sdk.reference.xinput_vibration%28v=vs.85%29.aspx
"""
# pylint: disable=too-few-public-methods
_fields_ = [("wLeftMotorSpeed", ctypes.c_ushort),
("wRightMotorSpeed", ctypes.c_ushort)]
if sys.version_info.major == 2:
# pylint: disable=redefined-builtin
class PermissionError(IOError):
"""Raised when trying to run an operation without the adequate access
rights - for example filesystem permissions. Corresponds to errno
EACCES and EPERM."""
class UnpluggedError(RuntimeError):
"""The device requested is not plugged in."""
pass
class NoDevicePath(RuntimeError):
"""No evdev device path was given."""
pass
class UnknownEventType(IndexError):
"""We don't know what this event is."""
pass
class UnknownEventCode(IndexError):
"""We don't know what this event is."""
pass
class InputEvent(object): # pylint: disable=useless-object-inheritance
"""A user event."""
# pylint: disable=too-few-public-methods
def __init__(self,
device,
event_info):
self.device = device
self.timestamp = event_info["timestamp"]
self.code = event_info["code"]
self.state = event_info["state"]
self.ev_type = event_info["ev_type"]
class BaseListener(object): # pylint: disable=useless-object-inheritance
"""Loosely emulate Evdev keyboard behaviour on other platforms.
Listen (hook in Windows terminology) for key events then buffer
them in a pipe.
"""
def __init__(self, pipe, events=None, codes=None):
self.pipe = pipe
self.events = events if events else []
self.codes = codes if codes else None
self.app = None
self.timeval = None
| |
sv_g)
dH = H1 - H_ref_LS
H_gas = 1e-3*dH*MW_g #property_mass_to_molar(dH, MW_g)
return H_gas
def S_model_g(T, P, zs):
MW_g, sv_g = 0.0, 0.0
for i in cmps:
MW_g += MWs[i]*zs[i]
sv_g += n_atoms[i]*zs[i]
sv_g /= MW_g
S_ref_LS = Lastovka_Shaw_integral_over_T(T_ref, sv_g)
S1 = Lastovka_Shaw_integral_over_T(T, sv_g)
dS = S1 - S_ref_LS
S_gas = 1e-3*dS*MW_g
return S_gas
def H_model_l(T, P, zs):
MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0
for i in cmps:
MW_l += MWs[i]*zs[i]
sv_l += n_atoms[i]*zs[i]
Tc_l += Tcs[i]*zs[i]
omega_l += omegas[i]*zs[i]
sv_l /= MW_l
H_ref_DS = Dadgostar_Shaw_integral(T_ref, sv_l)
H1 = Dadgostar_Shaw_integral(T, sv_l)
Hvap = SMK(T, Tc_l, omega_l)
dH = H1 - H_ref_DS
H_liq = 1e-3*dH*MW_l #property_mass_to_molar(dH, MW_l)
return (H_liq - Hvap)
def S_model_l(T, P, zs):
MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0
for i in cmps:
MW_l += MWs[i]*zs[i]
sv_l += n_atoms[i]*zs[i]
Tc_l += Tcs[i]*zs[i]
omega_l += omegas[i]*zs[i]
sv_l /= MW_l
S_ref_DS = Dadgostar_Shaw_integral_over_T(T_ref, sv_l)
S1 = Dadgostar_Shaw_integral_over_T(T, sv_l)
Hvap = SMK(T, Tc_l, omega_l)
dS = S1 - S_ref_DS
S_liq = 1e-3*dS*MW_l
return (S_liq - Hvap/T)
elif method == IDEAL_WILSON:
HeatCapacityGases = correlations.HeatCapacityGases
EnthalpyVaporizations = correlations.EnthalpyVaporizations
def flash_model(T, P, zs):
_, _, VF, xs, ys = flash_wilson(zs, constants.Tcs, constants.Pcs, constants.omegas, T=T, P=P)
return VF, xs, ys
def H_model_g(T, P, zs):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T)
return H_calc
def S_model_g(T, P, zs):
S_calc = 0.
for i in cmps:
S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T)
return S_calc
def H_model_l(T, P, zs):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T))
return H_calc
def S_model_l(T, P, zs):
S_calc = 0.
T_inv = 1.0/T
for i in cmps:
S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T))
return S_calc
try:
# All three variables P, T, V are positive but can grow unbounded, so
# for the secant method, only set the one variable
if iter_T:
guess = 298.15
elif iter_P:
guess = 101325.0
elif iter_V:
guess = 0.024465403697038125
val = secant(err, guess, xtol=xtol, ytol=ytol,
maxiter=maxiter, bisection=True, low=min_bound, require_xtol=False)
return val, info[0], info[1], info[2]
except (UnconvergedError,) as e:
val = brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter)
return val, info[0], info[1], info[2]
global cm_flash
cm_flash = None
def cm_flash_tol():
global cm_flash
if cm_flash is not None:
return cm_flash
from matplotlib.colors import ListedColormap
N = 100
vals = np.zeros((N, 4))
vals[:, 3] = np.ones(N)
# Grey for 1e-10 to 1e-7
low = 40
vals[:low, 0] = np.linspace(100/256, 1, low)[::-1]
vals[:low, 1] = np.linspace(100/256, 1, low)[::-1]
vals[:low, 2] = np.linspace(100/256, 1, low)[::-1]
# green 1e-6 to 1e-5
ok = 50
vals[low:ok, 1] = np.linspace(100/256, 1, ok-low)[::-1]
# Blue 1e-5 to 1e-3
mid = 70
vals[ok:mid, 2] = np.linspace(100/256, 1, mid-ok)[::-1]
# Red 1e-3 and higher
vals[mid:101, 0] = np.linspace(100/256, 1, 100-mid)[::-1]
newcmp = ListedColormap(vals)
cm_flash = newcmp
return cm_flash
empty_flash_conv = {'iterations': 0, 'err': 0.0, 'stab_guess_name': None}
one_in_list = [1.0]
empty_list = []
class Flash(object):
r'''Base class for performing flash calculations. All Flash objects need
to inherit from this, and common methods can be added to it.
Attributes
----------
T_MIN_FIXED : float
Absolute minimum temperature to search for a valid flash, [K]
T_MAX_FIXED : float
Absolute maximum temperature to search for a valid flash, [K]
P_MIN_FIXED : float
Absolute minimum pressure to search for a valid flash, [Pa]
P_MAX_FIXED : float
Absolute maximum pressure to search for a valid flash, [Pa]
'''
T_MIN_FIXED = Phase.T_MIN_FIXED
T_MAX_FIXED = Phase.T_MAX_FIXED
P_MIN_FIXED = Phase.P_MIN_FIXED
P_MAX_FIXED = Phase.P_MAX_FIXED
def flash(self, zs=None, T=None, P=None, VF=None, SF=None, V=None, H=None,
S=None, G=None, U=None, A=None, solution=None, hot_start=None,
retry=False, dest=None):
r'''Method to perform a flash calculation and return the result as an
:obj:`EquilibriumState <thermo.equilibrium.EquilibriumState>` object.
This generic interface allows flashes with any combination of valid
specifications; if a flash is unimplemented and error will be raised.
Parameters
----------
zs : list[float], optional
Mole fractions of each component, required unless there is only
one component, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
VF : float, optional
Vapor fraction, [-]
SF : float, optional
Solid fraction, [-]
V : float, optional
Molar volume of the overall bulk, [m^3/mol]
H : float, optional
Molar enthalpy of the overall bulk, [J/mol]
S : float, optional
Molar entropy of the overall bulk, [J/(mol*K)]
G : float, optional
Molar Gibbs free energy of the overall bulk, [J/mol]
U : float, optional
Molar internal energy of the overall bulk, [J/mol]
A : float, optional
Molar Helmholtz energy of the overall bulk, [J/mol]
solution : str or int, optional
When multiple solutions exist, if more than one is found they will
be sorted by T (and then P) increasingly; this number will index
into the multiple solution array. Negative indexing is supported.
'high' is an alias for 0, and 'low' an alias for -1. Setting this
parameter may make a flash slower because in some cases more checks
are performed. [-]
hot_start : :obj:`EquilibriumState <thermo.equilibrium.EquilibriumState>`
A previously converged flash or initial guessed state from which
the flash can begin; this parameter can save time in some cases,
[-]
retry : bool
Usually for flashes like UV or PH, there are multiple sets of
possible iteration variables. For the UV case, the prefered
iteration variable is P, so each iteration a PV solve is done on
the phase; but equally the flash can be done iterating on
`T`, where a TV solve is done on the phase each iteration.
Depending on the tolerances, the flash type, the thermodynamic
consistency of the phase, and other factors, it is possible the
flash can fail. If `retry` is set to True, the alternate variable
set will be iterated as a backup if the first flash fails. [-]
dest : None or :obj:`EquilibriumState <thermo.equilibrium.EquilibriumState>` or :obj:`EquilibriumStream <thermo.stream.EquilibriumStream>`
What type of object the flash result is set into; leave as None to
obtain the normal `EquilibriumState` results, [-]
Returns
-------
results : :obj:`EquilibriumState <thermo.equilibrium.EquilibriumState>`
Equilibrium object containing the state of the phases after the
flash calculation [-]
Notes
-----
Examples
--------
'''
if zs is None:
if self.N == 1:
zs = [1.0]
else:
raise ValueError("Composition missing for flash")
constants, correlations = self.constants, self.correlations
settings = self.settings
if dest is None:
dest = EquilibriumState
# if self.N > 1 and 0:
# for zi in zs:
# if zi == 1.0:
# # Does not work - phases expect multiple components mole fractions
# return self.flash_pure.flash(zs=zs, T=T, P=P, VF=VF, SF=SF,
# V=V, H=H, S=S, U=U, G=G, A=A,
# solution=solution, retry=retry,
# hot_start=hot_start)
T_spec = T is not None
P_spec = P is not None
V_spec = V is not None
H_spec = H is not None
S_spec = S is not None
U_spec = U is not None
# Normally multiple solutions
A_spec = A is not None
G_spec = G is not None
HSGUA_spec_count = H_spec + S_spec + G_spec + U_spec + A_spec
VF_spec = VF is not None
SF_spec = SF is not None
flash_specs = {'zs': zs}
if T_spec:
flash_specs['T'] = T
if P_spec:
flash_specs['P'] = P
if V_spec:
flash_specs['V'] = V
if H_spec:
flash_specs['H'] = H
if S_spec:
flash_specs['S'] = S
if U_spec:
flash_specs['U'] = U
if G_spec:
flash_specs['G'] = G
if A_spec:
flash_specs['A'] = A
if VF_spec:
flash_specs['VF'] = VF
if SF_spec:
flash_specs['SF'] = SF
if ((T_spec and (P_spec or V_spec)) or (P_spec and V_spec)):
g, ls, ss, betas, flash_convergence = self.flash_TPV(T=T, P=P, V=V, zs=zs, solution=solution, hot_start=hot_start)
if g is not None:
id_phases = [g] + ls + ss
else:
id_phases = ls + ss
g, ls, ss, betas = identify_sort_phases(id_phases, betas, constants,
correlations, settings=settings,
skip_solids=self.skip_solids)
a_phase = id_phases[0]
return dest(a_phase.T, a_phase.P, zs, gas=g, liquids=ls, solids=ss,
betas=betas, flash_specs=flash_specs,
flash_convergence=flash_convergence,
constants=constants, correlations=correlations,
flasher=self)
elif T_spec and VF_spec:
# All dew/bubble are the same with 1 component
Psat, ls, g, iterations, err = self.flash_TVF(T, VF=VF, zs=zs, hot_start=hot_start)
if type(ls) is not list:
ls = [ls]
flash_convergence = {'iterations': iterations, 'err': err}
return dest(T, Psat, zs, gas=g, | |
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
from paddle.fluid.framework import Variable, in_dygraph_mode
from paddle.fluid import core
class AnchorGenerator(nn.Layer):
"""
Compute anchors in the standard ways described in
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks".
Attributes:
anchor_size (list[list[float]] | list[float]):
If ``anchor_size`` is list[list[float]], ``anchor_size[i]`` is the list of anchor sizes
(i.e. sqrt of anchor area) to use for the i-th feature map.
If ``anchor_size`` is list[float], ``anchor_size`` is used for all feature maps.
Anchor anchor_size are given in absolute lengths in units of
the input image; they do not dynamically scale if the input image size changes.
aspect_ratios (list[list[float]] or list[float]): list of aspect ratios
(i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies.
strides (list[int]): stride of each input feature.
offset (float): Relative offset between the center of the first anchor and the top-left
corner of the image. Value has to be in [0, 1).
Recommend to use 0.5, which means half stride.
"""
def __init__(self,
anchor_sizes = [[32], [64], [128], [256], [512]],
aspect_ratios = [0.5, 1.0, 2.0],
strides = [4, 8, 16, 32, 64],
offset = 0.5):
super(AnchorGenerator, self).__init__()
self.anchor_sizes = anchor_sizes
self.aspect_ratios = aspect_ratios
self.strides = strides
self.offset = offset
self.base_anchors = self._compute_anchors()
assert 0. <= self.offset <= 1.0
def generate_anchors(self,
sizes = [32, 64, 128, 256, 512],
aspect_ratios = [0.5, 1.0, 2.0]):
"""
Generate a tensor storing canonical anchor boxes, which are all anchor
boxes of different sizes and aspect_ratios centered at (0, 0).
We can later build the set of anchors for a full feature map by
shifting and tiling these tensors (see `meth:_grid_anchors`).
Args:
sizes (list[float] | tuple[float]):
aspect_ratios (list[float] | tuple[float]]):
Returns:
Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes
in xyxy format.
"""
anchors = []
for size in sizes:
area = size ** 2.0
for ratio in aspect_ratios:
w = math.sqrt(area / ratio)
h = ratio * w
x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0
anchors.append([x0, y0, x1, y1])
return paddle.to_tensor(anchors, dtype='float32')
def _broadcast_params(self, params, num_features):
if not isinstance(params[0], (list, tuple)):
return [params] * num_features
if len(params) == 1:
return params * num_features
return params
def _compute_anchors(self):
sizes = self._broadcast_params(self.anchor_sizes, len(self.strides))
aspect_ratios = self._broadcast_params(self.aspect_ratios, len(self.strides))
base_anchors = [self.generate_anchors(s, a) for s, a in zip(sizes, aspect_ratios)]
[self.register_buffer(t.name, t, persistable=False) for t in base_anchors]
return base_anchors
def _grid_anchors(self, grid_sizes):
anchors = []
for grid_size, stride, base_anchor in zip(grid_sizes, self.strides, self.base_anchors):
grid_h, grid_w = grid_size
grid_x = paddle.arange(
self.offset * stride, grid_w * stride, step = stride, dtype='float32'
)
grid_y = paddle.arange(
self.offset * stride, grid_h * stride, step = stride, dtype='float32'
)
grid_y, grid_x = paddle.meshgrid(grid_y, grid_x)
grid_x = grid_x.reshape([-1])
grid_y = grid_y.reshape([-1])
grid_coord = paddle.stack([grid_x, grid_y, grid_x, grid_y], axis=1)
anchors.append((grid_coord.unsqueeze(1) + base_anchor.unsqueeze(0)).reshape([-1, 4]))
return anchors
def forward(self, feats):
grid_sizes = [feat.shape[-2:] for feat in feats]
anchor_over_all_feat_maps = self._grid_anchors(grid_sizes)
return anchor_over_all_feat_maps
@property
def num_anchors(self):
return [len(num_a) for num_a in self.base_anchors][0]
# feats = []
# h, w = 800., 800
# for i in range(4):
# feats.append(paddle.rand([4, 256, h / (2 ** (i + 2)), w / (2 ** (i + 2))]))
# anchorgenerator = AnchorGenerator()
# res = anchorgenerator(feats)
# print(anchorgenerator.num_anchors)
# print(res)
def generate_proposals(scores,
bbox_deltas,
im_shape,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
pixel_offset=False,
return_rois_num=False,
name=None):
"""
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores (tensor): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas (tensor): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_shape (tensor): A 2-D Tensor with shape [N, 2] represents H, W, the
origin image size or input size. The data type can be float32 or
float64.
anchors (tensor): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4] or [H * W * A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances (tensor): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in (xcenter, ycenter, w, h) format.
The data type must be float32.
pre_nms_top_n (float): Number of total bboxes to be kept per image before NMS.
The data type must be float32. `6000` by default.
post_nms_top_n (float): Number of total bboxes to be kept per image after NMS. The data type must be float32.
`1000` by default.
nms_thresh (float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size (float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta (float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num (bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs.
The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs.
The data type is the same as ``scores``.
"""
assert in_dygraph_mode()
assert return_rois_num, "return_rois_num should be True in dygraph mode."
attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n,
'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta,
'pixel_offset', pixel_offset)
rpn_rois, rpn_roi_probs, rpn_rois_num = core.ops.generate_proposals_v2(
scores, bbox_deltas, im_shape, anchors, variances, *attrs)
return rpn_rois, rpn_roi_probs, rpn_rois_num
class ProposalGenerator(object):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps for each image.
Attributes:
pre_nms_top_n (int): number of top k scoring proposals to keep before applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is | |
be str"
assert self._body is not None, "body must not be None"
@property
def ledger_id(self) -> str:
"""Get the id of the ledger on which the terms are to be settled."""
return self._ledger_id
@property
def body(self):
"""Get the body."""
return self._body
@staticmethod
def encode(state_protobuf_object, state_object: "State") -> None:
"""
Encode an instance of this class into the protocol buffer object.
The protocol buffer object in the state_protobuf_object argument must be matched with the instance of this class in the 'state_object' argument.
:param state_protobuf_object: the protocol buffer object whose type corresponds with this class.
:param state_object: an instance of this class to be encoded in the protocol buffer object.
:return: None
"""
state_bytes = pickle.dumps(state_object) # nosec
state_protobuf_object.state_bytes = state_bytes
@classmethod
def decode(cls, state_protobuf_object) -> "State":
"""
Decode a protocol buffer object that corresponds with this class into an instance of this class.
A new instance of this class must be created that matches the protocol buffer object in the 'state_protobuf_object' argument.
:param state_protobuf_object: the protocol buffer object whose type corresponds with this class.
:return: A new instance of this class that matches the protocol buffer object in the 'state_protobuf_object' argument.
"""
state = pickle.loads(state_protobuf_object.state_bytes) # nosec
return state
def __eq__(self, other):
return (
isinstance(other, State)
and self.ledger_id == other.ledger_id
and self.body == other.body
)
def __str__(self):
return "State: ledger_id={}, body={}".format(self.ledger_id, self.body)
class Terms:
"""Class to represent the terms of a multi-currency & multi-token ledger transaction."""
def __init__(
self,
ledger_id: str,
sender_address: Address,
counterparty_address: Address,
amount_by_currency_id: Dict[str, int],
quantities_by_good_id: Dict[str, int],
nonce: str,
is_sender_payable_tx_fee: bool = True,
fee_by_currency_id: Optional[Dict[str, int]] = None,
is_strict: bool = False,
**kwargs,
):
"""
Instantiate terms of a transaction.
:param ledger_id: the ledger on which the terms are to be settled.
:param sender_address: the sender address of the transaction.
:param counterparty_address: the counterparty address of the transaction.
:param amount_by_currency_id: the amount by the currency of the transaction.
:param quantities_by_good_id: a map from good id to the quantity of that good involved in the transaction.
:param is_sender_payable_tx_fee: whether the sender or counterparty pays the tx fee.
:param nonce: nonce to be included in transaction to discriminate otherwise identical transactions.
:param fee_by_currency_id: the fee associated with the transaction.
:param is_strict: whether or not terms must have quantities and amounts of opposite signs.
"""
self._ledger_id = ledger_id
self._sender_address = sender_address
self._counterparty_address = counterparty_address
self._amount_by_currency_id = amount_by_currency_id
self._quantities_by_good_id = quantities_by_good_id
self._is_sender_payable_tx_fee = is_sender_payable_tx_fee
self._nonce = nonce
self._fee_by_currency_id = (
fee_by_currency_id if fee_by_currency_id is not None else {}
)
self._is_strict = is_strict
self._kwargs = kwargs if kwargs is not None else {}
self._check_consistency()
(
good_ids,
sender_supplied_quantities,
counterparty_supplied_quantities,
) = self._get_lists()
self._good_ids = good_ids
self._sender_supplied_quantities = sender_supplied_quantities
self._counterparty_supplied_quantities = counterparty_supplied_quantities
self._sender_hash = self.get_hash(
self.ledger_id,
sender_address=self.sender_address,
counterparty_address=self.counterparty_address,
good_ids=self.good_ids,
sender_supplied_quantities=self.sender_supplied_quantities,
counterparty_supplied_quantities=self.counterparty_supplied_quantities,
sender_payable_amount=self.sender_payable_amount,
counterparty_payable_amount=self.counterparty_payable_amount,
nonce=self.nonce,
)
self._counterparty_hash = self.get_hash(
self.ledger_id,
sender_address=self.counterparty_address,
counterparty_address=self.sender_address,
good_ids=self.good_ids,
sender_supplied_quantities=self.counterparty_supplied_quantities,
counterparty_supplied_quantities=self.sender_supplied_quantities,
sender_payable_amount=self.counterparty_payable_amount,
counterparty_payable_amount=self.sender_payable_amount,
nonce=self.nonce,
)
def _check_consistency(self) -> None:
"""Check consistency of the object."""
assert isinstance(self._ledger_id, str), "ledger_id must be str"
assert isinstance(self._sender_address, str), "sender_address must be str"
assert isinstance(
self._counterparty_address, str
), "counterparty_address must be str"
assert isinstance(self._amount_by_currency_id, dict) and all(
[
isinstance(key, str) and isinstance(value, int)
for key, value in self._amount_by_currency_id.items()
]
), "amount_by_currency_id must be a dictionary with str keys and int values."
assert isinstance(self._quantities_by_good_id, dict) and all(
[
isinstance(key, str) and isinstance(value, int)
for key, value in self._quantities_by_good_id.items()
]
), "quantities_by_good_id must be a dictionary with str keys and int values."
assert isinstance(
self._is_sender_payable_tx_fee, bool
), "is_sender_payable_tx_fee must be bool"
assert isinstance(self._nonce, str), "nonce must be str"
assert self._fee_by_currency_id is None or (
isinstance(self._fee_by_currency_id, dict)
and all(
[
isinstance(key, str) and isinstance(value, int) and value >= 0
for key, value in self._fee_by_currency_id.items()
]
)
), "fee must be None or Dict[str, int] with positive fees only."
assert all(
[
key in self._amount_by_currency_id
for key in self._fee_by_currency_id.keys()
]
), "Fee dictionary has keys which are not present in amount dictionary."
if self._is_strict:
is_pos_amounts = all(
[amount >= 0 for amount in self._amount_by_currency_id.values()]
)
is_neg_amounts = all(
[amount <= 0 for amount in self._amount_by_currency_id.values()]
)
is_pos_quantities = all(
[quantity >= 0 for quantity in self._quantities_by_good_id.values()]
)
is_neg_quantities = all(
[quantity <= 0 for quantity in self._quantities_by_good_id.values()]
)
assert (is_pos_amounts and is_neg_quantities) or (
is_neg_amounts and is_pos_quantities
), "quantities and amounts do not constitute valid terms. All quantities must be of same sign. All amounts must be of same sign. Quantities and amounts must be of different sign."
@property
def id(self) -> str:
"""Get hash of the terms."""
return self.sender_hash
@property
def sender_hash(self) -> str:
"""Get the sender hash."""
return self._sender_hash
@property
def counterparty_hash(self) -> str:
"""Get the sender hash."""
return self._counterparty_hash
@property
def ledger_id(self) -> str:
"""Get the id of the ledger on which the terms are to be settled."""
return self._ledger_id
@property
def sender_address(self) -> Address:
"""Get the sender address."""
return self._sender_address
@property
def counterparty_address(self) -> Address:
"""Get the counterparty address."""
return self._counterparty_address
@counterparty_address.setter
def counterparty_address(self, counterparty_address: Address) -> None:
"""Set the counterparty address."""
assert isinstance(counterparty_address, str), "counterparty_address must be str"
self._counterparty_address = counterparty_address
@property
def amount_by_currency_id(self) -> Dict[str, int]:
"""Get the amount by currency id."""
return copy.copy(self._amount_by_currency_id)
@property
def is_sender_payable_tx_fee(self) -> bool:
"""Bool indicating whether the tx fee is paid by sender or counterparty."""
return self._is_sender_payable_tx_fee
@property
def is_single_currency(self) -> bool:
"""Check whether a single currency is used for payment."""
return (
len(self._amount_by_currency_id) == 1 and len(self._fee_by_currency_id) <= 1
)
@property
def is_empty_currency(self) -> bool:
"""Check whether a single currency is used for payment."""
return len(self._amount_by_currency_id) == 0
@property
def currency_id(self) -> str:
"""Get the amount the sender must pay."""
assert self.is_single_currency, "More than one currency id, cannot get id."
value = next(iter(self._amount_by_currency_id.keys()))
return value
@property
def sender_payable_amount(self) -> int:
"""Get the amount the sender must pay."""
assert (
self.is_single_currency or self.is_empty_currency
), "More than one currency id, cannot get amount."
value = (
next(iter(self._amount_by_currency_id.values()))
if not self.is_empty_currency
else 0
)
payable = -value if value <= 0 else 0
return payable
@property
def sender_payable_amount_incl_fee(self) -> int:
"""Get the amount the sender must pay inclusive fee."""
assert (
self.is_single_currency or self.is_empty_currency
), "More than one currency id, cannot get amount."
payable = self.sender_payable_amount
if self.is_sender_payable_tx_fee and len(self._fee_by_currency_id) == 1:
payable += next(iter(self._fee_by_currency_id.values()))
return payable
@property
def counterparty_payable_amount(self) -> int:
"""Get the amount the counterparty must pay."""
assert (
self.is_single_currency or self.is_empty_currency
), "More than one currency id, cannot get amount."
value = (
next(iter(self._amount_by_currency_id.values()))
if not self.is_empty_currency
else 0
)
payable = value if value >= 0 else 0
return payable
@property
def counterparty_payable_amount_incl_fee(self) -> int:
"""Get the amount the counterparty must pay."""
assert (
self.is_single_currency or self.is_empty_currency
), "More than one currency id, cannot get amount."
payable = self.counterparty_payable_amount
if not self.is_sender_payable_tx_fee and len(self._fee_by_currency_id) == 1:
payable += next(iter(self._fee_by_currency_id.values()))
return payable
@property
def quantities_by_good_id(self) -> Dict[str, int]:
"""Get the quantities by good id."""
return copy.copy(self._quantities_by_good_id)
@property
def good_ids(self) -> List[str]:
"""Get the (ordered) good ids."""
return self._good_ids
@property
def sender_supplied_quantities(self) -> List[int]:
"""Get the (ordered) quantities supplied by the sender."""
return self._sender_supplied_quantities
@property
def counterparty_supplied_quantities(self) -> List[int]:
"""Get the (ordered) quantities supplied by the counterparty."""
return self._counterparty_supplied_quantities
@property
def nonce(self) -> str:
"""Get the nonce."""
return self._nonce
@property
def has_fee(self) -> bool:
"""Check if fee is set."""
return self.fee_by_currency_id != {}
@property
def fee(self) -> int:
"""Get the fee."""
assert self.has_fee, "fee_by_currency_id not set."
assert (
len(self.fee_by_currency_id) == 1
), "More than one currency id, cannot get fee."
return next(iter(self._fee_by_currency_id.values()))
@property
def sender_fee(self) -> int:
"""Get the sender fee."""
value = self.fee if self.is_sender_payable_tx_fee else 0
return value
@property
def counterparty_fee(self) -> int:
"""Get the counterparty fee."""
value = 0 if self.is_sender_payable_tx_fee else self.fee
return value
@property
def fee_by_currency_id(self) -> Dict[str, int]:
"""Get fee by currency."""
return copy.copy(self._fee_by_currency_id)
@property
def kwargs(self) -> Dict[str, Any]:
"""Get the kwargs."""
return self._kwargs
def _get_lists(self) -> Tuple[List[str], List[int], List[int]]:
ordered = collections.OrderedDict(sorted(self.quantities_by_good_id.items()))
good_ids = [] # type: List[str]
sender_supplied_quantities = [] | |
auction: address
:param amount: uint128
:param nonce: uint128
"""
return self.C_.call_getter_raw('revealBid', {'auction': auction, 'amount': amount, 'nonce': nonce}, expect_ec=ts4_expect_ec)
def M_revealBid(self, auction, amount, nonce, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.revealBid method call
:param auction: address
:param amount: uint128
:param nonce: uint128
"""
_r_ = self.C_.call_method('revealBid', {'auction': auction, 'amount': amount, 'nonce': nonce}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_revealBid(self, auction, amount, nonce, ts4_expect_ec=0):
"""
Wrapper for D4User.revealBid signed method call
:param auction: address
:param amount: uint128
:param nonce: uint128
"""
_r_ = self.C_.call_method_signed('revealBid', {'auction': auction, 'amount': amount, 'nonce': nonce}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def bidRevealComplete(self, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.bidRevealComplete
:rtype:
"""
if ts4_sign:
return self.S_bidRevealComplete(ts4_expect_ec=ts4_expect_ec)
else:
return self.M_bidRevealComplete(ts4_expect_ec=ts4_expect_ec)
def G_bidRevealComplete(self, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.bidRevealComplete getter
:rtype:
"""
return self.C_.call_getter('bidRevealComplete', {}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_bidRevealComplete(self, ts4_expect_ec=0):
"""
Wrapper for D4User.bidRevealComplete raw getter
:rtype:
"""
return self.C_.call_getter_raw('bidRevealComplete', {}, expect_ec=ts4_expect_ec)
def M_bidRevealComplete(self, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.bidRevealComplete method call
"""
_r_ = self.C_.call_method('bidRevealComplete', {}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_bidRevealComplete(self, ts4_expect_ec=0):
"""
Wrapper for D4User.bidRevealComplete signed method call
"""
_r_ = self.C_.call_method_signed('bidRevealComplete', {}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def finalize(self, auction, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.finalize
:rtype:
:param auction: address
"""
if ts4_sign:
return self.S_finalize(auction, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_finalize(auction, ts4_expect_ec=ts4_expect_ec)
def G_finalize(self, auction, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.finalize getter
:rtype:
:param auction: address
"""
return self.C_.call_getter('finalize', {'auction': auction}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_finalize(self, auction, ts4_expect_ec=0):
"""
Wrapper for D4User.finalize raw getter
:rtype:
:param auction: address
"""
return self.C_.call_getter_raw('finalize', {'auction': auction}, expect_ec=ts4_expect_ec)
def M_finalize(self, auction, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.finalize method call
:param auction: address
"""
_r_ = self.C_.call_method('finalize', {'auction': auction}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_finalize(self, auction, ts4_expect_ec=0):
"""
Wrapper for D4User.finalize signed method call
:param auction: address
"""
_r_ = self.C_.call_method_signed('finalize', {'auction': auction}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def utilBidHash(self, auction, startTime, user, amount, nonce, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.utilBidHash
:rtype: uint256
:param auction: address
:param startTime: uint32
:param user: address
:param amount: uint128
:param nonce: uint128
"""
return self.G_utilBidHash(auction, startTime, user, amount, nonce, ts4_expect_ec=ts4_expect_ec)
def G_utilBidHash(self, auction, startTime, user, amount, nonce, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.utilBidHash getter
:rtype: uint256
:param auction: address
:param startTime: uint32
:param user: address
:param amount: uint128
:param nonce: uint128
"""
return self.C_.call_getter('utilBidHash', {'auction': auction, 'startTime': startTime, 'user': user, 'amount': amount, 'nonce': nonce}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_utilBidHash(self, auction, startTime, user, amount, nonce, ts4_expect_ec=0):
"""
Wrapper for D4User.utilBidHash raw getter
:rtype: uint256
:param auction: address
:param startTime: uint32
:param user: address
:param amount: uint128
:param nonce: uint128
"""
return self.C_.call_getter_raw('utilBidHash', {'auction': auction, 'startTime': startTime, 'user': user, 'amount': amount, 'nonce': nonce}, expect_ec=ts4_expect_ec)
def M_utilBidHash(self, auction, startTime, user, amount, nonce, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.utilBidHash method call
:param auction: address
:param startTime: uint32
:param user: address
:param amount: uint128
:param nonce: uint128
"""
_r_ = self.C_.call_method('utilBidHash', {'auction': auction, 'startTime': startTime, 'user': user, 'amount': amount, 'nonce': nonce}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_utilBidHash(self, auction, startTime, user, amount, nonce, ts4_expect_ec=0):
"""
Wrapper for D4User.utilBidHash signed method call
:param auction: address
:param startTime: uint32
:param user: address
:param amount: uint128
:param nonce: uint128
"""
_r_ = self.C_.call_method_signed('utilBidHash', {'auction': auction, 'startTime': startTime, 'user': user, 'amount': amount, 'nonce': nonce}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def queryCert(self, target, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.queryCert
:rtype:
:param target: address
"""
if ts4_sign:
return self.S_queryCert(target, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_queryCert(target, ts4_expect_ec=ts4_expect_ec)
def G_queryCert(self, target, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.queryCert getter
:rtype:
:param target: address
"""
return self.C_.call_getter('queryCert', {'target': target}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_queryCert(self, target, ts4_expect_ec=0):
"""
Wrapper for D4User.queryCert raw getter
:rtype:
:param target: address
"""
return self.C_.call_getter_raw('queryCert', {'target': target}, expect_ec=ts4_expect_ec)
def M_queryCert(self, target, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.queryCert method call
:param target: address
"""
_r_ = self.C_.call_method('queryCert', {'target': target}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_queryCert(self, target, ts4_expect_ec=0):
"""
Wrapper for D4User.queryCert signed method call
:param target: address
"""
_r_ = self.C_.call_method_signed('queryCert', {'target': target}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def queryAuct(self, target, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.queryAuct
:rtype:
:param target: address
"""
if ts4_sign:
return self.S_queryAuct(target, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_queryAuct(target, ts4_expect_ec=ts4_expect_ec)
def G_queryAuct(self, target, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.queryAuct getter
:rtype:
:param target: address
"""
return self.C_.call_getter('queryAuct', {'target': target}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_queryAuct(self, target, ts4_expect_ec=0):
"""
Wrapper for D4User.queryAuct raw getter
:rtype:
:param target: address
"""
return self.C_.call_getter_raw('queryAuct', {'target': target}, expect_ec=ts4_expect_ec)
def M_queryAuct(self, target, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.queryAuct method call
:param target: address
"""
_r_ = self.C_.call_method('queryAuct', {'target': target}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_queryAuct(self, target, ts4_expect_ec=0):
"""
Wrapper for D4User.queryAuct signed method call
:param target: address
"""
_r_ = self.C_.call_method_signed('queryAuct', {'target': target}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def forgetCert(self, target, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.forgetCert
:rtype:
:param target: address
"""
if ts4_sign:
return self.S_forgetCert(target, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_forgetCert(target, ts4_expect_ec=ts4_expect_ec)
def G_forgetCert(self, target, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.forgetCert getter
:rtype:
:param target: address
"""
return self.C_.call_getter('forgetCert', {'target': target}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_forgetCert(self, target, ts4_expect_ec=0):
"""
Wrapper for D4User.forgetCert raw getter
:rtype:
:param target: address
"""
return self.C_.call_getter_raw('forgetCert', {'target': target}, expect_ec=ts4_expect_ec)
def M_forgetCert(self, target, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.forgetCert method call
:param target: address
"""
_r_ = self.C_.call_method('forgetCert', {'target': target}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_forgetCert(self, target, ts4_expect_ec=0):
"""
Wrapper for D4User.forgetCert signed method call
:param target: address
"""
_r_ = self.C_.call_method_signed('forgetCert', {'target': target}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def forgetAuct(self, target, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.forgetAuct
:rtype:
:param target: address
"""
if ts4_sign:
return self.S_forgetAuct(target, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_forgetAuct(target, ts4_expect_ec=ts4_expect_ec)
def G_forgetAuct(self, target, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.forgetAuct getter
:rtype:
:param target: address
"""
return self.C_.call_getter('forgetAuct', {'target': target}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_forgetAuct(self, target, ts4_expect_ec=0):
"""
Wrapper for D4User.forgetAuct raw getter
:rtype:
:param target: address
"""
return self.C_.call_getter_raw('forgetAuct', {'target': target}, expect_ec=ts4_expect_ec)
def M_forgetAuct(self, target, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.forgetAuct method call
:param target: address
"""
_r_ = self.C_.call_method('forgetAuct', {'target': target}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_forgetAuct(self, target, ts4_expect_ec=0):
"""
Wrapper for D4User.forgetAuct signed method call
:param target: address
"""
_r_ = self.C_.call_method_signed('forgetAuct', {'target': target}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def queryCertCallback(self, info, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.queryCertCallback
:rtype:
:param info: tuple
"""
if ts4_sign:
return self.S_queryCertCallback(info, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_queryCertCallback(info, ts4_expect_ec=ts4_expect_ec)
def G_queryCertCallback(self, info, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.queryCertCallback getter
:rtype:
:param info: tuple
"""
return self.C_.call_getter('queryCertCallback', {'info': info}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_queryCertCallback(self, info, ts4_expect_ec=0):
"""
Wrapper for D4User.queryCertCallback raw getter
:rtype:
:param info: tuple
"""
return self.C_.call_getter_raw('queryCertCallback', {'info': info}, expect_ec=ts4_expect_ec)
def M_queryCertCallback(self, info, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.queryCertCallback method call
:param info: tuple
"""
_r_ = self.C_.call_method('queryCertCallback', {'info': info}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_queryCertCallback(self, info, ts4_expect_ec=0):
"""
Wrapper for D4User.queryCertCallback signed method call
:param info: tuple
"""
_r_ = self.C_.call_method_signed('queryCertCallback', {'info': info}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def queryAuctCallback(self, info, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4User.queryAuctCallback
:rtype:
:param info: tuple
"""
if ts4_sign:
return self.S_queryAuctCallback(info, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_queryAuctCallback(info, ts4_expect_ec=ts4_expect_ec)
def G_queryAuctCallback(self, info, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4User.queryAuctCallback getter
:rtype:
:param info: tuple
"""
return self.C_.call_getter('queryAuctCallback', {'info': info}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_queryAuctCallback(self, info, ts4_expect_ec=0):
"""
Wrapper for D4User.queryAuctCallback raw getter
:rtype:
:param info: tuple
"""
return self.C_.call_getter_raw('queryAuctCallback', {'info': info}, expect_ec=ts4_expect_ec)
def M_queryAuctCallback(self, info, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4User.queryAuctCallback method call
:param info: tuple
"""
_r_ = self.C_.call_method('queryAuctCallback', {'info': | |
<filename>scipy/linalg/_matfuncs_inv_ssq.py
"""
Matrix functions that use Pade approximation with inverse scaling and squaring.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
from scipy.linalg.decomp_schur import schur, rsf2csf
from scipy.linalg.matfuncs import funm
from scipy.linalg import svdvals, solve_triangular
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import onenormest
import scipy.special
__all__ = ['logm', 'fractional_matrix_power']
class LogmRankWarning(UserWarning):
pass
class LogmExactlySingularWarning(LogmRankWarning):
pass
class LogmNearlySingularWarning(LogmRankWarning):
pass
class LogmError(np.linalg.LinAlgError):
pass
class FractionalMatrixPowerError(np.linalg.LinAlgError):
pass
def _count_nonzero(x):
"""np.count_nonzero not available in numpy 1.5.x"""
return np.sum(x != 0)
#TODO renovate or move this class when scipy operators are more mature
class _MatrixM1PowerOperator(LinearOperator):
"""
A representation of the linear operator (A - I)^p.
"""
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0 or p != int(p):
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def matvec(self, x):
for i in range(self._p):
x = self._A.dot(x) - x
return x
def rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A) - x
return x
def matmat(self, X):
for i in range(self._p):
X = self._A.dot(X) - X
return X
@property
def T(self):
return _MatrixM1PowerOperator(self._A.T, self._p)
#TODO renovate or move this function when scipy operators are more mature
def _onenormest_m1_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of (A - I)^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return onenormest(_MatrixM1PowerOperator(A, p),
t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
def _unwindk(z):
"""
Compute the scalar unwinding number.
Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
Note that this definition differs in sign from the original definition
in equations (5, 6) in [2]_. The sign convention is justified in [3]_.
Parameters
----------
z : complex
A complex number.
Returns
-------
unwinding_number : integer
The scalar unwinding number of z.
References
----------
.. [1] <NAME> and <NAME> (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
.. [2] <NAME> and <NAME>,
"The unwinding number." Newsletter ACM SIGSAM Bulletin
Volume 30, Issue 2, June 1996, Pages 28-35.
.. [3] <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>,
"Reasoning about the elementary functions of complex analysis"
Annals of Mathematics and Artificial Intelligence,
36: 303-318, 2002.
"""
return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
def _briggs_helper_function(a, k):
"""
Computes r = a^(1 / (2^k)) - 1.
This is algorithm (2) of [1]_.
The purpose is to avoid a danger of subtractive cancellation.
For more computational efficiency it should probably be cythonized.
Parameters
----------
a : complex
A complex number preferably belonging to the closed negative real axis.
k : integer
A nonnegative integer.
Returns
-------
r : complex
The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
Notes
-----
The algorithm as written in the publication does not handle k=0 or k=1
correctly, so these are special-cased in this implementation.
This function is intended to not allow `a` to belong to the closed
negative real axis, but this is constraint is relaxed.
References
----------
.. [1] <NAME>. Al-Mohy (2012)
"A more accurate Briggs method for the logarithm",
Numerical Algorithms, 59 : 393--402.
"""
if k < 0 or int(k) != k:
raise ValueError('expected a nonnegative integer k')
if k == 0:
return a - 1
elif k == 1:
return np.sqrt(a) - 1
else:
k_hat = k
if np.angle(a) >= np.pi / 2:
a = np.sqrt(a)
k_hat = k - 1
z0 = a - 1
a = np.sqrt(a)
r = 1 + a
for j in range(1, k_hat):
a = np.sqrt(a)
r = r * (1 + a)
r = z0 / r
return r
def _fractional_power_superdiag_entry(l1, l2, t12, p):
"""
Compute a superdiagonal entry of a fractional matrix power.
This is Eq. (5.6) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
p : float
A fractional power.
Returns
-------
f12 : complex
A superdiagonal entry of the fractional matrix power.
Notes
-----
Some amount of care has been taken to return a real number
if all of the inputs are real.
References
----------
.. [1] <NAME> and <NAME> (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if l1 == l2:
f12 = t12 * p * l1**(p-1)
elif abs(l1) < abs(l2) / 2 or abs(l2) < abs(l1) / 2:
f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
else:
# This is Eq. (5.5) in [1].
z = (l2 - l1) / (l2 + l1)
log_l1 = np.log(l1)
log_l2 = np.log(l2)
arctanh_z = np.arctanh(z)
tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
tmp_u = _unwindk(log_l2 - log_l1)
if tmp_u:
tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
else:
tmp_b = p * arctanh_z
tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
f12 = tmp_a * tmp_c
return f12
def _logm_superdiag_entry(l1, l2, t12):
"""
Compute a superdiagonal entry of a matrix logarithm.
This is Eq. (11.28) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
Returns
-------
f12 : complex
A superdiagonal entry of the matrix logarithm.
Notes
-----
Some amount of care has been taken to return a real number
if all of the inputs are real.
References
----------
.. [1] <NAME> (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
"""
if l1 == l2:
f12 = t12 / l1
elif abs(l1) < abs(l2) / 2 or abs(l2) < abs(l1) / 2:
f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
else:
z = (l2 - l1) / (l2 + l1)
ua = _unwindk(np.log(l2) - np.log(l1))
ub = _unwindk(np.log(1+z) - np.log(1-z))
u = ua + ub
if u:
f12 = t12 * (2*np.arctanh(z) + 2*np.pi*1j*(ua + ub)) / (l2 - l1)
else:
f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
return f12
def _inverse_squaring_helper(T0, theta):
"""
A helper function for inverse scaling and squaring for Pade approximation.
Parameters
----------
T0 : (N, N) array_like upper triangular
Matrix involved in inverse scaling and squaring.
theta : indexable
The values theta[1] .. theta[7] must be available.
They represent bounds related to Pade approximation, and they depend
on the matrix function which is being computed.
For example, different values of theta are required for
matrix logarithm than for fractional matrix power.
Returns
-------
R : (N, N) array_like upper triangular
Composition of zero or more matrix square roots of T0, minus I.
s : non-negative integer
Number of square roots taken.
m : positive integer
| |
'and':
credit = credit[:-3].strip()
# Remove surrounding parentheses
parentheses_match = re.search(r'^\(.*\)$', credit)
if parentheses_match:
credit = credit.strip('()')
# Final catch for empty credit
if len(credit.strip()) == 0:
credit = None
yield CreditScrape(
name_id=name_id,
title_id=title_id,
job_title=curr_title,
credit=credit,
episode_count=episode_count,
episode_year_start=episode_year_start,
episode_year_end=episode_year_end
)
found_title = False # only because we use continue when set to True for now...
def get_full_credits(self, title_id, include_episodes=False):
"""Scrapes the full list of credited people for a title.
Will scrape all the cast and crew for a title by returning both
:obj:`~.scraper.PyMDbScraper.get_full_cast` and :obj:`~.scraper.PyMDbScraper.get_full_crew` as a single generator.
An optional argument `include_episodes` will also scrape each episode an actor is in
if the title is a TV series.
Args:
title_id (:obj:`str`): The title's ID used by IMDb prefixed with `tt`.
include_episodes (:obj:`bool`, optional): Specify if individual episodes of a
TV series should also be scraped.
Yields:
:class:`~.models.title.CreditScrape`: An object for each credited crew member in the title.
Raises:
HTTPError: If the request failed.
"""
for cast_member in self.get_full_cast(title_id, include_episodes=include_episodes):
yield cast_member
for crew_member in self.get_full_crew(title_id):
yield crew_member
def get_full_credits_as_dict(self, title_id, include_episodes=False):
"""Scrapes the full list of credited people for a title into a dictionary.
Builds a dictionary with `job_title` as key of lists of :class:`~.models.title.CreditScrape`
objects. Uses the results of the :obj:`~.scraper.PyMDbScraper.get_full_credits` method
to gather the objects.
An optional argument `include_episodes` will also scrape each episode an actor is in
if the title is a TV series.
Args:
title_id (:obj:`str`): The title's ID used by IMDb prefixed with `tt`.
include_episodes (:obj:`bool`, optional): Specify if individual episodes of a
TV series should also be scraped.
Returns:
:obj:`dict` of :obj:`list` of :class:`~.models.title.CreditScrape`: A dictionary where
each key is a :obj:`str` of a `job_title` and the value is a :obj:`list` of
:class:`~.models.title.CreditScrape` objects who's `job_title` value is the same
as the key.
Raises:
HTTPError: If the request failed.
"""
full_credits = defaultdict(list)
for credit in self.get_full_credits(title_id, include_episodes):
full_credits[credit.job_title].append(credit)
return full_credits
def get_name(self, name_id, include_known_for_titles=False):
"""Scrapes detailed information from a person's personal IMDb web page.
Will scrape detailed information on a person's IMDb `bio` page into a new
`NameScrape` object.
Args:
name_id (:obj:`str`): The person's ID used by IMDb prefixed with `nm`.
include_known_for_titles (:obj:`bool`, optional): Determines if an second request should
be sent to get the known for titles on a person's default IMDb page.
Returns:
:class:`~.models.name.NameScrape`: An object with the person's information.
Raises:
HTTPError: If the request failed.
"""
request = f'https://www.imdb.com/name/{name_id}/bio'
tree = self._get_tree(request)
display_name = None
known_for_titles = []
birth_date = None
birth_city = None
death_date = None
death_city = None
death_cause = None
birth_name = None
nicknames = []
height = None
display_name_node = tree.css_first('div#main > div:nth-of-type(1) > div:nth-of-type(1) > div > h3 > a')
if display_name_node:
display_name = display_name_node.text().strip()
bio_node = tree.css_first('div#bio_content')
if bio_node:
overview_node = bio_node.css_first('table#overviewTable')
if overview_node:
for row_node in overview_node.css('tr'):
label_node = row_node.css_first('td.label')
if label_node:
label = label_node.text().lower().strip()
if label == 'born':
birth_date_node = row_node.css_first('td > time')
if birth_date_node and 'datetime' in birth_date_node.attributes:
birth_date = birth_date_node.attributes['datetime']
birth_city_node = row_node.css_first('td > a')
if birth_city_node:
birth_city = birth_city_node.text().strip()
elif label == 'died':
death_date_node = row_node.css_first('td > time')
if death_date_node and 'datetime' in death_date_node.attributes:
death_date = death_date_node.attributes['datetime']
death_city_node = row_node.css_first('td > a')
if death_city_node:
death_city = death_city_node.text().strip()
death_cause_node = row_node.css_first('td ~ td')
if death_cause_node:
death_cause_match = re.search(r'\(.*\)', death_cause_node.text())
if death_cause_match:
death_cause = death_cause_match.group(0).strip('()')
elif label == 'birth name':
birth_name_node = row_node.css_first('td ~ td')
if birth_name_node:
birth_name = birth_name_node.text().strip()
elif label == 'nicknames':
nicknames_node = row_node.css_first('td ~ td')
if nicknames_node:
nicknames = split_by_br(re.sub(r'</*td>', '', nicknames_node.html).strip())
elif label == 'height':
height_node = row_node.css_first('td ~ td')
if height_node:
height_match = re.search(r'\(\d+\.*\d*', height_node.text().strip())
if height_match:
height = height_match.group(0).strip('(')
if include_known_for_titles:
known_for_titles_request = f'https://www.imdb.com/name/{name_id}/'
known_for_titles_tree = self._get_tree(known_for_titles_request)
known_for_titles_node = known_for_titles_tree.css_first('#knownfor, #knownfor-stacked')
if known_for_titles_node:
for known_for_title_node in known_for_titles_node.css('.knownfor-title'):
known_for_title_id = get_title_id(known_for_title_node.css_first('a'))
if known_for_title_id:
known_for_titles.append(known_for_title_id)
return NameScrape(
name_id=name_id,
display_name=display_name,
known_for_titles=known_for_titles,
birth_name=birth_name,
birth_date=birth_date,
birth_city=birth_city,
death_date=death_date,
death_city=death_city,
death_cause=death_cause,
nicknames=nicknames,
height=height
)
def get_name_credits(self, name_id, include_episodes=False):
"""Scrapes all title credits a person is included in.
Scrapes the `full filmography` from a person's IMDb page to get each
title they are credited in, and what category that credit is under.
An optional argument `include_episodes` will also scrape each episode
an actor is in if the title is a TV series. Each credit is created
with a new `NameCreditScrape` object.
Args:
name_id (:obj:`str`): The person's ID used by IMDb prefixed with `nm`.
include_episodes (:obj:`bool`, optional): Specify if individual episodes of a TV series
should also be scraped.
Yields:
:class:`~.models.name.NameCreditScrape`: An object for each credit in the person's filmography.
Raises:
HTTPError: If a request failed.
"""
request = f'https://www.imdb.com/name/{name_id}/'
tree = self._get_tree(request)
filmography_node = tree.css_first('div#filmography')
if not filmography_node:
return None
for row_node in filmography_node.css('div.filmo-row'):
category, title_id = row_node.id.split('-')
category = '_'.join(category.split()).lower()
start_year = None
end_year = None
title_info = None
role = None
years_node = row_node.css_first('span.year_column')
if years_node:
years = years_node.text().strip()
if len(years) > 0:
if '-' in years:
start_year, end_year = years.split('-')
else:
start_year = years
info = split_by_br(row_node.html)
if len(info) > 1:
title_info, role = info
role = re.sub(r'<.*?>', '', remove_tags_and_content(role, 'div')).strip()
if include_episodes and row_node.css_first('div.filmo-episodes'):
# Send AJAX request if a "show all" link exists
more_episodes_node = row_node.css_first(
f'div#more-episodes-{title_id}-{category} ~ div.filmo-episodes'
)
episode_nodes = row_node
if more_episodes_node:
onclick_node = more_episodes_node.css_first('div > a')
ref_marker = get_ref_marker(onclick_node)
category_req = get_category(onclick_node)
request = f'https://www.imdb.com/name/{name_id}/episodes/_ajax?title={title_id}' + \
f'&category={category_req}&ref_marker={ref_marker}&start_index=0'
try:
episode_nodes = self._get_tree(request)
except requests.exceptions.HTTPError as e:
# Some AJAX calls seem to 404, so ignore them and remove the "show all" link
if e.response.status_code == 404:
more_episodes_node.decompose()
else:
raise e
episode_nodes = episode_nodes.css('div.filmo-episodes')
for episode_node in episode_nodes:
episode_info_node = episode_node.css_first('a')
episode_id = None
if episode_info_node:
episode_id = get_title_id(episode_info_node)
episode_info = episode_node.text().split('...')
episode_year = None
episode_role = None
if len(episode_info) > 1:
year_info = episode_info[0]
episode_role = '...'.join(episode_info[1:]).strip()
if len(episode_role) == 0:
episode_role = None
else:
year_info, = episode_info
year_info_match = re.search(r'\([\d]{4}\)', year_info)
if year_info_match:
episode_year = year_info_match.group(0).strip('()')
yield NameCreditScrape(
name_id=name_id,
title_id=episode_id,
category=category,
start_year=episode_year,
end_year=None,
role=episode_role,
title_notes=[]
)
else:
title_info, = info
title_info = re.sub(r'(<\s*a.*?>|<.*?a\s*>)', '', title_info)
title_notes = [note.strip('()') for note in re.findall(r'\(.*?\)', title_info)]
if role is not None and len(role) == 0:
role = None
yield NameCreditScrape(
name_id=name_id,
title_id=title_id,
category=category,
start_year=trim_year(start_year),
end_year=trim_year(end_year),
role=role,
title_notes=title_notes
)
def get_company(self, company_id):
"""Scrapes all titles a company is credited for on IMDb.
Will scrape all titles listed under a company on IMDb by going through each page
in IMDb's `company search`. This only gives the year(s) the company was involved with
each title and `notes` for each listed on IMDb.
Args:
company_id (:obj:`str`): The company's ID used by IMDb prefixed with `co`.
Yields:
:class:`~.models.company.CompanyScrape`: An object for each title the company is credited for.
Raises:
HTTPError: If a request failed.
InvalidCompanyId: If an invalid company ID was given.
"""
index = 1
finding_titles = True
while finding_titles:
request = f'https://www.imdb.com/search/title/?companies={company_id}&view=simple&start={index}'
try:
tree = self._get_tree(request)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
finding_titles = False
else:
raise e
# Check if this was a valid company ID
company_title_node = tree.css_first('div.article > h1.header')
if company_title_node:
company_title = company_title_node.text().replace('(Sorted by Popularity Ascending)', '').strip()
if len(company_title) == 0:
raise InvalidCompanyId(f'Invalid company ID: {company_id}')
title_list_node = tree.css_first('div.lister-list')
if not title_list_node:
finding_titles = False
else:
for title_info_node in title_list_node.css('span.lister-item-header'):
title_id = None
start_year = None
end_year = None
notes = None
year_info_node = None
# Check if this is a TV episode
episode_node = title_info_node.css_first('small')
if episode_node and 'Episode' in episode_node.text():
episode_link_node = title_info_node.css_first('small ~ a')
title_id = get_title_id(episode_link_node)
year_info_node = title_info_node.css_first('small ~ a ~ span.lister-item-year')
else:
title_info_node = title_info_node.css_first('span.lister-item-index ~ span')
if title_info_node:
title_link_node = title_info_node.css_first('a')
title_id = get_title_id(title_link_node)
year_info_node = title_info_node.css_first('span.lister-item-year')
if year_info_node:
year_info_text = year_info_node.text().strip('()')
years_match = re.search(r'(\d|–|-)+', year_info_text)
notes_match = re.search(r'([A-Za-z]+\s*)+', year_info_text)
if years_match:
year_info = re.sub(r'[–\-]+', '\t', years_match.group(0)).split('\t')
if len(year_info) > 1:
start_year, end_year = year_info
# Handle shows that are still on-air (ex: '2005- ')
if len(end_year.strip()) == 0:
end_year = None
else:
start_year, = year_info
if notes_match:
notes | |
<reponame>ChenHuaYou/cONNXr
import re
import itertools
def format_text(prefix, start, texts):
output = []
curr = []
if start:
curr.append(start)
linebreaks = 0
for text in texts:
lines = []
length = len(prefix)
if start:
length += + len(start)
# split text into words by splitting on space and remove empty splits ("")
# then split on newline boundaries, but keep empty splits ("\n\n")
words = [w.split("\n") for w in text.strip().split(" ") if w != ""]
words = list(itertools.chain(*words))
for w in words:
if w.strip() == "":
if linebreaks == 0:
linebreaks += 1
continue
if linebreaks >= 2:
# we already did 2 line breaks, skip this one
continue
# empty split, caused by "\n\n", should cause single line break
linebreaks += 1
length = len(prefix)
lines.append(prefix + " ".join(curr))
curr = []
if start:
length += len(start)
curr.append(" "*len(start))
continue
else:
linebreaks = 0
if length + len(w) < 79:
# keep adding words
length += len(w) + 1
curr.append(w)
continue
# line is full, do line break
length = len(prefix) + len(w)
lines.append(prefix + " ".join(curr))
curr = []
if start:
length += len(start)
curr.append(" "*len(start))
curr.append(w)
lines.append(prefix + " ".join(curr))
curr = []
if start:
curr.append(" "*len(start))
output.append("\n".join(lines))
return "\n".join(output)
class OnnxType(dict):
_onnxTensorDataType = {
"float": "ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT",
"uint8": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT8",
"int8": "ONNX__TENSOR_PROTO__DATA_TYPE__INT8",
"uint16": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT16",
"int16": "ONNX__TENSOR_PROTO__DATA_TYPE__INT16",
"int32": "ONNX__TENSOR_PROTO__DATA_TYPE__INT32",
"int64": "ONNX__TENSOR_PROTO__DATA_TYPE__INT64",
"string": "ONNX__TENSOR_PROTO__DATA_TYPE__STRING",
"bool": "ONNX__TENSOR_PROTO__DATA_TYPE__BOOL",
"float16": "ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT16",
"double": "ONNX__TENSOR_PROTO__DATA_TYPE__DOUBLE",
"uint32": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT32",
"uint64": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT64",
"complex64": "ONNX__TENSOR_PROTO__DATA_TYPE__COMPLEX64",
"complex128": "ONNX__TENSOR_PROTO__DATA_TYPE__COMPLEX128",
"bfloat16": "ONNX__TENSOR_PROTO__DATA_TYPE__BFLOAT16",
}
class _Scanner:
_tokens = {
re.compile(r"tensor") : "tensor" ,
re.compile(r"map") : "map" ,
re.compile(r"seq") : "seq" ,
re.compile(r"\(") : "(" ,
re.compile(r"\)") : ")" ,
re.compile(r"float") : "float" ,
re.compile(r"uint8") : "uint8" ,
re.compile(r"int8") : "int8" ,
re.compile(r"uint16") : "uint16" ,
re.compile(r"int16") : "int16" ,
re.compile(r"int32") : "int32" ,
re.compile(r"int64") : "int64" ,
re.compile(r"string") : "string" ,
re.compile(r"bool") : "bool" ,
re.compile(r"float16") : "float16" ,
re.compile(r"double") : "double" ,
re.compile(r"uint32") : "uint32" ,
re.compile(r"uint64") : "uint64" ,
re.compile(r"complex64") : "complex64" ,
re.compile(r"complex128") : "complex128",
re.compile(r"bfloat16") : "bfloat16" ,
re.compile(r",") : "," ,
re.compile(r"\s+") : None ,
}
def __init__(self, string):
self.string = string
self.tokens = self.tokenize(string)
def tokenize(self, string):
pos = 0
tokens = []
while string[pos:]:
allMatches = map(lambda x: (x[0].match(string[pos:]), x[1]), self._tokens.items())
validMatches = filter(lambda x: x[0], allMatches)
try:
longestMatch = max( validMatches, key=lambda x: x[0].end())
except:
raise SyntaxError(f"no token matches: '{string[pos:]}'")
else:
pos += longestMatch[0].end()
if longestMatch[1]:
tokens.append(longestMatch[1])
return tokens
def consume(self, expected_token = None):
if not expected_token:
return self.pop()
if not self.peek(expected_token):
raise SyntaxError(
f"expected '{expected_token}', but got '{self.peek()}'")
return self.pop()
def peek(self, expected_token=None):
token = self.tokens[0]
if expected_token:
return token == expected_token
else:
return token
def pop(self):
return self.tokens.pop(0)
def onToken(self, token2function, consume=False):
for token, function in token2function.items():
if self.peek(token):
if consume:
self.pop()
return function()
tokens = ", ".join([f"'{t}'" for t in token2function.keys()])
raise SyntaxError(f"expected one of {tokens}, but got '{self.peek()}'")
def __repr__(self):
return f"OnnxType._Scanner({self.string.__repr__()})"
class _Parser:
_terminals = {
"float": lambda: "float",
"uint8": lambda: "uint8",
"int8": lambda: "int8",
"uint16": lambda: "uint16",
"int16": lambda: "int16",
"int32": lambda: "int32",
"int64": lambda: "int64",
"string": lambda: "string",
"bool": lambda: "bool",
"float16": lambda: "float16",
"double": lambda: "double",
"uint32": lambda: "uint32",
"uint64": lambda: "uint64",
"complex64": lambda: "complex64",
"complex128": lambda: "complex128",
"bfloat16": lambda: "bfloat16",
}
def __init__(self, scanner):
self.scanner = scanner
def __repr__(self):
return f"OnnxType._Parser({self.scanner.__repr__()})"
def _rule_tensor(self):
self.scanner.consume('(')
result = self.scanner.onToken(self._terminals, consume=True)
self.scanner.consume(')')
return {"tensor": result}
def _rule_map(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq,
}
rules.update(self._terminals)
self.scanner.consume('(')
key = self.scanner.onToken(self._terminals, consume=True)
self.scanner.consume(',')
value = self.scanner.onToken(rules, consume=True)
self.scanner.consume(')')
return {"map": (key, value)}
def _rule_seq(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq
}
rules.update(self._terminals)
self.scanner.consume('(')
result = self.scanner.onToken(rules, consume=True)
self.scanner.consume(')')
return {"seq": result}
def parse(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq,
}
return self.scanner.onToken(rules, consume=True)
def __init__(self, typeStr):
super()
self.original = typeStr
scanner = self._Scanner(typeStr)
parser = self._Parser(scanner)
self.update(parser.parse())
def __str__(self):
return self._text_walkParseTree(self)
def __repr__(self):
return f"OnnxType({self.original.__repr__()})"
def _text_walkParseTree(self, node):
if isinstance(node,str):
return node.replace("_","")
elif isinstance(node,dict):
subresults = []
for key,val in node.items():
subresults.append(key + "_" + self._text_walkParseTree(val))
return "__".join(subresults)
elif isinstance(node,tuple):
return "__".join([ self._text_walkParseTree(t) for t in node ])
else:
raise BaseException(f"unknown parseTree item: '{node}'")
def onnxTensorDataTypes(self):
results = []
self._onnxTensorDataType_walkParseTree(self, results)
return list(filter(None,results))
def _onnxTensorDataType_walkParseTree(self, node, results):
if isinstance(node,str):
results.append(None)
elif isinstance(node,dict):
for key,val in node.items():
if key == "tensor":
results.append(self._onnxTensorDataType[val])
else:
self._onnxTensorDataType_walkParseTree(val,results)
elif isinstance(node,tuple):
for val in node:
self._onnxTensorDataType_walkParseTree(val, results)
else:
raise BaseException(f"unknown parseTree item: '{node}'")
def __hash__(self):
return self.original.__hash__()
class OnnxTypeList(list):
def __init__(self, typeList):
super()
types = []
types.extend(typeList)
types.sort()
self.extend([OnnxType(t) for t in types])
def __str__(self):
return ", ".join([f"{t}" for t in self])
def __repr__(self):
types = ", ".join([t.original.__repr__() for t in self])
return f"OnnxTypeList([{types}])"
class OnnxConstraint():
def __init__(self, constraint, input=False, output=False):
if isinstance(constraint, dict):
self.types = constraint['types']
self.description = constraint['description']
self.name = constraint['name']
self.input = constraint['input']
self.output = constraint['output']
else:
self.types = OnnxTypeList(constraint.allowed_type_strs)
self.description = constraint.description
self.name = constraint.type_param_str
self.input = input
self.output = output
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Constraint {self.name}:")
lines.append(format_text(prefix + " ", "", [self.description]))
lines.append(format_text(prefix + " ", "Allowed Types:", [str(self.types)] ))
return "\n".join(lines)
def __repr__(self):
return f"OnnxConstraint({self.__dict__.__repr__()})"
class OnnxConstraints(dict):
def __init__(self, schema):
super()
constraints = {c.type_param_str for c in schema.type_constraints}
inputs = {i.typeStr for i in schema.inputs if i.typeStr in constraints}
outputs = {o.typeStr for o in schema.outputs if o.typeStr in constraints}
for constraint in schema.type_constraints:
self[constraint.type_param_str] = OnnxConstraint(constraint, input=constraint.type_param_str in inputs, output=constraint.type_param_str in outputs)
def typePermutations(self, filterInput=False, filterOutput=False):
return list(filter(None,(self.typePermutationText(p) for p in self.typePermutationsTuple(filterInput,filterOutput))))
def typePermutationText(self, permutation):
return "__".join([ f"{x[0]}_{x[1]}" for x in permutation ])
def typePermutationsTuple(self, filterInput=False, filterOutput=False):
# a implies b is the same as bool(a) ** bool(b)
values = filter(lambda x: (x.input ** filterInput) and (x.output ** filterOutput), self.values())
tuples = [list(map(lambda x: (c.name,x), c.types)) for c in values]
return itertools.product(*tuples)
def typePermutationsMap(self, filterInput=False, filterOutput=False):
result = {}
for permutation in self.typePermutationsTuple(filterInput, filterOutput):
tmp = result
constraints = []
for constraint in permutation:
constraints.append(constraint)
tmp = tmp.setdefault(tuple(constraints), {})
return result
def text(self, prefix=""):
paragraphs = [ c.text(prefix) for c in self.values() ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxAttribute():
_onnxAttributeDataType = {
"UNDEFINED" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__UNDEFINED",
"FLOAT" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__FLOAT",
"INT" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INT",
"STRING" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRING",
"TENSOR" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__TENSOR",
"GRAPH" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__GRAPH",
"SPARSE_TENSOR" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__SPARSE_TENSOR",
"FLOATS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__FLOATS",
"INTS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS",
"STRINGS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRINGS",
"TENSORS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__TENSORS",
"GRAPHS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__GRAPHS",
"SPARSE_TENSORS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__SPARSE_TENSORS",
}
_onnxAttributeDataTypeCDecl = {
"FLOAT" : [("f","float","{name}")],
"INT" : [("i","int64_t","{name}")],
"STRING" : [("s","char*","{name}")],
"TENSOR" : [("t","Onnx__TensorProto*","{name}")],
"GRAPH" : [("g","Onnx__GraphProto*","{name}")],
"SPARSE_TENSOR" : [("sparse_tensor","Onnx__SparseTensorProto*","{name}")],
"FLOATS" : [("n_floats","size_t","n_{name}"),("floats","float*","{name}")],
"INTS" : [("n_ints","size_t","n_{name}"),("ints","int64_t*","{name}")],
"STRINGS" : [("n_strings","size_t","n_{name}"),("strings","char**","{name}")],
"TENSORS" : [("n_tensors","size_t","n_{name}"),("tensors","Onnx__TensorProto**","{name}")],
"GRAPHS" : [("n_graphs","size_t","n_{name}"),("graphs","Onnx__GraphProto**","{name}")],
"SPARSE_TENSORS" : [("n_sparse_tensors","size_t","n_{name}"),("sparse_tensors","Onnx__SparseTensorProto**","{name}")],
}
def __init__(self, name, attribute):
self.name = name
if isinstance(attribute, dict):
self.optional = attribute['optional']
self.type = attribute['type']
self.description = attribute['description']
else:
self.optional = not attribute.required
self.type = attribute.type.name
self.description = attribute.description
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Attribute {self.type} {self.name} {'(optional)'*self.optional}:")
lines.append(format_text(prefix + " ", None, [self.description]))
return "\n".join(lines)
def onnxAttributeDataType(self):
return self._onnxAttributeDataType[self.type]
def onnxAttributeDataTypeCDecl(self):
result = []
for decls in self._onnxAttributeDataTypeCDecl[self.type]:
result.append((s.format(name = self.name) for s in decls ))
return result
def __repr__(self):
attribute = self.__dict__.copy()
del attribute['name']
return f"OnnxAttribute({self.name.__repr__()}, {attribute.__repr__()})"
def __str__(self):
return self.text()
class OnnxAttributeList(list):
def __init__(self, schema):
super()
for name,attribute in schema.attributes.items():
self.append(OnnxAttribute(name, attribute))
def text(self, prefix=""):
paragraphs = [ a.text(prefix) for a in self ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxInput():
def __init__(self, input):
if isinstance(input, dict):
self.name = input['name']
self.description = input['description']
self.isHomogeneous = input['isHomogeneous']
self.optional = input['optional']
self.variadic = input['variadic']
self.constraint = input['constraint']
self.types = input['types']
else:
self.name = input.name
self.description = input.description.strip()
self.isHomogeneous = input.isHomogeneous
self.optional = (input.option.name == "Optional")
self.variadic = (input.option.name == "Variadic")
self.constraint = input.typeStr
self.types = OnnxTypeList(input.types)
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Input {self.constraint} {self.name}:")
lines.append(format_text(prefix + " ", "", [self.description]))
lines.append(format_text(prefix + " ", "Allowed Types:", [str(self.types)] ))
return "\n".join(lines)
def __repr__(self):
return f"OnnxInput({self.__dict__.__repr__()})"
def __str__(self):
return self.text()
class OnnxInputList(list):
def __init__(self, schema):
super()
self.extend([ OnnxInput(i) for i in schema.inputs])
def text(self, prefix=""):
paragraphs = [ i.text(prefix) for i in self ]
| |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
from sdh.metrics.org import app, st as store
from sdh.metrics.server import ORG, SCM, CI, APIError
import calendar
from datetime import datetime
__author__ = '<NAME>'
def get_average_list(l):
return reduce(lambda x, y: x + y, l) / len(l)
def get_correct_kwargs(kwargs):
args = {
'begin': 0 if kwargs.get('begin') is None else kwargs.get('begin'),
'end': calendar.timegm(datetime.now().timetuple())
if kwargs.get('end') is None else kwargs.get('end'),
'max': 0 if kwargs.get('max') is None else kwargs.get('max')
}
if args['max'] == 0:
args['step'] = 86400
else:
args['step'] = (args.get('end') - args.get('begin')) / args.get('max')
return args
def detect_overlap_date(a_begin, a_end, b_begin, b_end):
return (
(int(a_begin) <= int(b_begin)) and (int(a_end) >= int(b_end)) # contains
) or (
(int(a_begin) >= int(b_begin)) and (int(a_end) <= int(b_end)) # contains
) or (
(int(a_begin) <= int(b_begin)) and (int(b_begin) <= int(a_end)) # shift right
) or (
(int(a_begin) <= int(b_end)) and (int(b_end) <= int(a_end)) # shift left
)
def detect_project_repositories_overlap(uri, args):
temp_frame = store.get_project_temporal_frame(uri)
return detect_overlap_date(
args.get('begin'), args.get('end'),
temp_frame.get('first_commit'), temp_frame.get('last_commit')
)
def get_external_position_metric(uid, endpoint, position, aggregate, args, flag):
try:
pr = get_position_products(uid, args, position, flag)
pr_res = []
if args['begin'] == 0:
args['begin'] = None
tmp_arg = args
if flag:
if aggregate == 'sum':
tmp_frame = store.get_specific_products_temporal_frame(pr)
tmp_arg['begin'] = tmp_frame.get('first_commit')
tmp_arg['end'] = tmp_frame.get('last_commit')
pr_res = map(
lambda x: app.request_metric(endpoint, prid=x.get('id'), **tmp_arg), pr
)
else:
for k in pr:
pr_temp_frame = store.get_product_temporal_frame(k.get('uri'))
tmp_arg['begin'] = pr_temp_frame.get('first_commit')
tmp_arg['end'] = pr_temp_frame.get('last_commit')
pr_res.append(app.request_metric(endpoint, prid=k.get('id'), **tmp_arg))
else:
pr_res = map(lambda k: app.request_metric(endpoint, prid=k.get('id'), **tmp_arg), pr)
if len(pr_res):
context = pr_res[0][0]
else:
context = args
v = zip(*map(lambda x: x[1], pr_res))
if aggregate == 'avg':
res = [get_average_list(x) for x in v]
else:
res = [sum(x) for x in v]
return context, res
except (EnvironmentError, AttributeError) as e:
raise APIError(e.message)
return args, []
def get_position_repositories(uid, args, position, flag_total, only_uris):
positions_id = store.get_all_members_id(position)
if uid not in positions_id:
return []
else:
projects = store.get_all_member_projects(positions_id[uid])
res_prj = set()
res = []
for x in projects:
repos = store.get_all_project_repositories(x)
if not flag_total:
for k in repos:
rep_info = store.db.hgetall(k)
if detect_overlap_date(
args.get('begin'), args.get('end'),
rep_info.get('first_commit'), rep_info.get('last_commit')
):
res_prj.add(k)
if only_uris:
return res_prj
else:
[res.append({
'id': store.db.hgetall(x).get('id'),
'uri': x
}) for x in res_prj]
return res
def get_position_projects(uid, args, position, flag_total, only_uris):
positions_id = store.get_all_members_id(position)
if uid not in positions_id:
return []
else:
projects = store.get_all_member_projects(positions_id[uid])
if not flag_total:
res_prj = set()
for x in projects:
if detect_project_repositories_overlap(x, args):
res_prj.add(x)
projects = list(res_prj)
res = []
if only_uris:
return projects
else:
[res.append({
'id': store.db.get(x),
'uri': x
}) for x in projects]
return res
def get_position_products(uid, args, position, flag_total):
pr = get_position_projects(uid, args, position, flag_total, False)
pro = set()
res = []
for x in pr:
pro = pro.union(set(store.get_all_project_products(x.get('uri'))))
[res.append({
'id': store.db.get(x),
'uri': x
}) for x in pro]
return res
def get_position_position(uid, args, fil, position, flag_total):
pr = set(get_position_projects(uid, args, fil, flag_total, True))
members = store.get_all_members(position)
members_dir = set()
res = []
for x in members:
if len(pr.intersection(set(store.get_all_member_projects(x)))) > 0:
members_dir.add(x)
[res.append({
'id': store.db.hgetall(x).get("id"),
'uri': x
}) for x in members_dir]
return res
def get_director_position(uid, args, position, flag_total):
return get_position_position(uid, args, 'directors', position, flag_total)
def get_pmanager_position(uid, args, position, flag_total):
return get_position_position(uid, args, 'productmanagers', position, flag_total)
def get_project_roles(pjid, args, role, flag_total):
projects_id = store.get_all_projects_id()
if pjid not in projects_id:
return []
else:
if not flag_total and not detect_project_repositories_overlap(projects_id[pjid], args):
return []
if role == "softwaredeveloper":
tmp_arg = args
if not flag_total:
pr_temp_frame = store.get_project_temporal_frame(projects_id[pjid])
tmp_arg['begin'] = pr_temp_frame.get('first_commit')
tmp_arg['end'] = pr_temp_frame.get('last_commit')
co, res = app.request_view('project-developers', pjid=pjid, **tmp_arg)
return res
else:
res = set()
users_id = store.get_all_members(role)
for x in users_id:
pr_res = store.get_all_member_projects(x)
if projects_id[pjid] in pr_res:
res.add(x)
res_set = []
[res_set.append({
'id': store.db.hgetall(x).get("id"),
'uri': x
}) for x in res]
return res_set
def get_director_roles(uid, args, role, flag_total):
return get_position_position(uid, args, 'directors', role, flag_total)
def get_pmanager_roles(uid, args, role, flag_total):
return get_position_position(uid, args, 'productmanagers', role, flag_total)
def helper_get_director_pmanagers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_director_position(uid, args, 'productmanagers', flag_total)
def helper_get_director_architects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_director_position(uid, args, 'architects', flag_total)
def helper_get_pmanager_architects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_pmanager_position(uid, args, 'architects', flag_total)
def helper_get_position_developers(uid, position, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
try:
res = set()
pr = get_position_products(uid, args, position, flag_total)
devs = map(lambda k: app.request_view('product-developers', prid=k.get('id'), **kwargs), pr)
[[res.add(j.get('uri')) for j in x] for x in map(lambda x: x[1], devs)]
res_devs = []
[res_devs.append({
"id": store.db.hgetall(x).get("id"),
"uri": x
}) for x in res]
return args, res_devs
except (EnvironmentError, AttributeError) as e:
raise APIError(e.message)
return args, []
@app.view('/product-projects', target=ORG.Project, parameters=[ORG.Product],
id='product-projects', title='Projects of Product')
def get_product_projects(prid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
products_id = store.get_all_products_id()
if prid not in products_id:
return args, []
else:
projects = store.get_all_product_projects(products_id[prid])
if not flag_total:
res_prj = set()
for x in projects:
if detect_project_repositories_overlap(x, args):
res_prj.add(x)
projects = list(res_prj)
res = []
[res.append({
'id': store.db.get(x),
'uri': x
}) for x in projects]
return args, res
@app.view('/project-repositories', target=SCM.Repository, parameters=[ORG.Project],
id='project-repositories', title='Repositories of Project')
def get_project_repositories(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
projects_id = store.get_all_projects_id()
if pjid not in projects_id:
return args, []
else:
repos = store.get_all_project_repositories(projects_id[pjid])
if not flag_total:
res_prj = set()
for k in repos:
rep_info = store.db.hgetall(k)
if detect_overlap_date(
args.get('begin'), args.get('end'),
rep_info.get('first_commit'), rep_info.get('last_commit')
):
res_prj.add(k)
repos = res_prj
res = []
[res.append({
'id': store.db.hgetall(x).get('id'),
'uri': x
}) for x in repos]
return args, res
@app.metric('/total-project-stakeholders', parameters=[ORG.Project],
id='project-stakeholders', title='Stakeholders of Project')
def get_total_project_stakeholders(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_project_roles(pjid, args, 'stakeholder', flag_total))]
@app.view('/project-stakeholders', target=ORG.Person, parameters=[ORG.Project],
id='project-stakeholders', title='Stakeholders of Project')
def get_project_stakeholders(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_project_roles(pjid, args, 'stakeholder', flag_total)
@app.metric('/total-project-swarchitects', parameters=[ORG.Project],
id='project-swarchitects', title='Software Architects of Project')
def get_total_project_swarchitects(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_project_roles(pjid, args, 'softwarearchitect', flag_total))]
@app.view('/project-swarchitects', target=ORG.Person, parameters=[ORG.Project],
id='project-swarchitects', title='Software Architects of Project')
def get_project_swarchitects(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_project_roles(pjid, args, 'softwarearchitect', flag_total)
@app.metric('/total-project-pjmanagers', parameters=[ORG.Project],
id='project-pjmanagers', title='Project Managers of Project')
def get_total_project_pjmanagers(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_project_roles(pjid, args, 'projectmanager', flag_total))]
@app.view('/project-pjmanagers', target=ORG.Person, parameters=[ORG.Project],
id='project-pjmanagers', title='Project Managers of Project')
def get_project_pjmanagers(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_project_roles(pjid, args, 'projectmanager', flag_total)
@app.metric('/total-project-swdevelopers', parameters=[ORG.Project],
id='project-swdevelopers', title='Software Developers of Project')
def get_total_project_swdevelopers(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_project_roles(pjid, args, 'softwaredeveloper', flag_total))]
@app.view('/project-swdevelopers', target=ORG.Person, parameters=[ORG.Project],
id='project-swdevelopers', title='Software Developers of Project')
def get_project_swdevelopers(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_project_roles(pjid, args, 'softwaredeveloper', flag_total)
@app.metric('/total-director-repositories', parameters=[ORG.Person],
id='director-repositories', title='Repositories of Director')
def get_total_director_repositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_repositories(uid, args, 'directors', flag_total, False))]
@app.view('/director-repositories', target=SCM.Repository, parameters=[ORG.Person],
id='director-repositories', title='Repositories of Director')
def get_director_repositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_repositories(uid, args, 'directors', flag_total, False)
@app.metric('/total-director-projects', parameters=[ORG.Person],
id='director-projects', title='Projects of Director')
def get_total_director_projects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_projects(uid, args, 'directors', flag_total, False))]
@app.view('/director-projects', target=ORG.Project, parameters=[ORG.Person],
id='director-projects', title='Projects of Director')
def get_director_projects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_projects(uid, args, | |
'''
Module for log displays and visualizations
Functions
four_plot(logs, top, base, depth=False)
four_plots(logs, x1, x2, x3, x4, top, base, depth=False)
three_plots(logs, x1, x2, x3, top, base, depth=False)
two_plots(logs, x1, x2, top, base, depth=False)
two_plot(logs, x1, x2, top, base, depth=False, scale=False)
one_plot(logs, x1, top, base, depth=False)
make_facies_log_plot(logs, x1, x2, x3, x4, x5, Depth=False)
compare_plots(logs, x1, x2, x3, x4, x5, Depth=False)
'''
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from .utils import process
import numpy as np
import warnings
warnings.filterwarnings('ignore')
def four_plot(logs, top, base, depth=False):
'''
Function to automatically plot well logs
Returns a plot of four logs(Gamma ray, Porosity, Density and Resistivity)
args::
logs: Dataframe object of well logs
depth: Set to false or leave as default to use dataframe index
Set to column title if column depth should be used
'''
logs = process(logs)
if depth == False:
logs['DEPTH'] = logs.index
logs = logs.reset_index(drop=True)
else:
depth = np.array(logs[depth])
logs = logs.reset_index(drop=True)
logs['DEPTH'] = depth
logs = logs.loc[(logs.DEPTH >= float(top)) & (logs.DEPTH <= float(base))]
try:
logs = logs.sort_values(by='DEPTH')
f, ax = plt.subplots(nrows=1, ncols=4, figsize=(12,10))
for i in range(len(ax)):
ax[i].set_ylim(top, base)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=4)
if logs.NPHI.max() == np.Inf or logs.NPHI.max() == np.nan:
nphi_max = 0.9
ax[0].plot(logs.GR, logs.DEPTH, color='black')
ax[1].plot(logs.NPHI, logs.DEPTH, color='c')
ax[2].plot(logs.RHOB, logs.DEPTH, color='blue')
ax[3].plot(logs.RT, logs.DEPTH, color='red')
ax[0].set_xlabel("GR (API)")
ax[0].set_xlim(logs.GR.min(), nphi_max)
ax[0].set_ylabel("Depth(ft)")
ax[0].set_title(f"Plot of Depth Against GR")
ax[1].set_xlabel("NPHI (v/v)")
ax[1].set_xlim(0, logs.NPHI.max())
ax[1].set_title(f"Plot of Depth Against Neutron Porosity")
ax[2].set_xlabel("RHOB (g/cm3)")
ax[2].set_xlim(logs.RHOB.min(),logs.RHOB.max())
ax[2].set_title(f"Plot of Depth Against Density")
ax[3].set_xlabel("RT (ohm.m)")
ax[3].set_xscale("log")
ax[3].set_xlim(logs.RT.min(), logs.RT.max())
ax[3].set_title(f"Plot of Depth Against Resistivity")
except NameError as err:
print(f'Depth column could not be located. {err}')
def four_plots(logs, x1, x2, x3, x4, top, base, depth=False):
'''
Function to automatically plot well logs
Returns
--------
plot of four logs(x1, x2, x3, x4)
Arguments
----------
logs: Dataframe object of well logs
depth: Set to false or leave as default to use dataframe index
Set to column title if column depth should be used
'''
logs = process(logs)
#Setting the value of the y axis. Using index or property specified
if depth == False:
logs['DEPTH'] = logs.index
logs = logs.reset_index(drop=True)
else:
depth = np.array(logs[depth])
logs = logs.reset_index(drop=True)
logs['DEPTH'] = depth
logs = logs.loc[(logs.DEPTH >= float(top)) & (logs.DEPTH <= float(base))]
try:
logs = logs.sort_values(by='DEPTH')
#top = logs.DEPTH.min()
#bot = logs.DEPTH.max()
f, ax = plt.subplots(nrows=1, ncols=4, figsize=(10,10))
for i in range(len(ax)):
ax[i].set_ylim(top, base)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=4)
ax[0].plot(logs[x1], logs.DEPTH, color='black')
ax[1].plot(logs[x2], logs.DEPTH, color='c')
ax[2].plot(logs[x3], logs.DEPTH, color='blue')
ax[3].plot(logs[x4], logs.DEPTH, color='red')
ax[0].set_xlabel(f"{x1} ")
if x1 == 'RT':
ax[0].set_xscale("log")
ax[0].set_xlim(logs[x1].min(), logs[x1].max())
ax[0].set_ylabel("Depth(ft)")
ax[0].set_title(f"Plot of Depth Against {x1}")
ax[1].set_xlabel(f"{x2} ")
if x2 == 'RT':
ax[1].set_xscale("log")
ax[1].set_xlim(logs[x2].min(),logs[x2].max())
ax[1].set_title(f"Plot of Depth Against {x2}")
ax[2].set_xlabel(f"{x3}")
if x3 == 'RT':
ax[2].set_xscale("log")
ax[2].set_xlim(logs[x3].min(),logs[x3].max())
ax[2].set_title(f"Plot of Depth Against {x3}")
if x4 == 'RT':
ax[3].set_xscale("log")
ax[3].set_xlim(logs[x3].min(),logs[x3].max())
ax[3].set_title(f"Plot of Depth Against {x4}")
ax[3].set_xlabel(f"{x4}")
except NameError as err:
print(f'Depth column could not be located. {err}')
def three_plots(logs, x1, x2, x3, top, base, depth=False):
'''
Function to automatically plot well logs
Returns
-------
plot of three logs(x1, x2, x3)
Arguments
---------
logs: Dataframe object of well logs
depth: Set to false or leave as default to use dataframe index
Set to column title if column depth should be used
'''
logs = process(logs)
#Setting the value of the y axis. Using index or property specified
if depth == False:
logs['DEPTH'] = logs.index
logs = logs.reset_index(drop=True)
else:
depth = np.array(logs[depth])
logs = logs.reset_index(drop=True)
logs['DEPTH'] = depth
logs = logs.loc[(logs.DEPTH >= float(top)) & (logs.DEPTH <= float(base))]
try:
logs = logs.sort_values(by='DEPTH')
#top = logs.DEPTH.min()
#bot = logs.DEPTH.max()
f, ax = plt.subplots(nrows=1, ncols=3, figsize=(10,10))
for i in range(len(ax)):
ax[i].set_ylim(top, base)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=4)
ax[0].plot(logs[x1], logs.DEPTH, color='black')
ax[1].plot(logs[x2], logs.DEPTH, color='c')
ax[2].plot(logs[x3], logs.DEPTH, color='blue')
ax[0].set_xlabel(f"{x1} ")
if x1 == 'RT':
ax[0].set_xscale("log")
ax[0].set_xlim(logs[x1].min(), logs[x1].max())
ax[0].set_ylabel("Depth(ft)")
ax[0].set_title(f"Plot of Depth Against {x1}")
ax[1].set_xlabel(f"{x2} ")
if x2 == 'RT':
ax[1].set_xscale("log")
ax[1].set_xlim(logs[x2].min(),logs[x2].max())
ax[1].set_title(f"Plot of Depth Against {x2}")
ax[2].set_xlabel(f"{x3}")
if x3 == 'RT':
ax[2].set_xscale("log")
ax[2].set_xlim(logs[x3].min(),logs[x3].max())
ax[2].set_title(f"Plot of Depth Against {x3}")
except NameError as err:
print(f'Depth column could not be located. {err}')
def two_plots(logs, x1, x2, top, base, depth=False):
'''
Function to automatically plot well logs
Returns a plot of two logs(x1, x2)
args::
logs: Dataframe object of well logs
depth: Set to false or leave as default to use dataframe index
Set to column title if column depth should be used
'''
logs = process(logs)
#Setting the value of the y axis. Using index or property specified
if depth == False:
logs['DEPTH'] = logs.index
logs = logs.reset_index(drop=True)
else:
depth = np.array(logs[depth])
logs = logs.reset_index(drop=True)
logs['DEPTH'] = depth
#logs = logs.loc[(logs.DEPTH >= float(top)) & (logs.DEPTH <= float(base))]
try:
logs = logs.sort_values(by='DEPTH')
f, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,10))
for i in range(len(ax)):
ax[i].set_ylim(top, base)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=4)
ax[0].plot(logs[x1], logs.DEPTH, color='black')
ax[1].plot(logs[x2], logs.DEPTH, color='c')
ax[0].set_xlabel(f"{x1} ")
if x1 == 'RT':
ax[0].set_xscale("log")
ax[0].set_xlim(logs[x1].min(), logs[x1].max())
ax[0].set_ylabel("Depth(ft)")
ax[0].set_title(f"Plot of Depth Against {x1}")
ax[1].set_xlabel(f"{x2} ")
if x2 == 'RT':
ax[1].set_xscale("log")
ax[1].set_xlim(logs[x2].min(),logs[x2].max())
ax[1].set_title(f"Plot of Depth Against {x2}")
except NameError as err:
print(f'Depth column could not be located. {err}')
def two_plot(logs, x1, x2, top, base, depth=False, scale=False):
'''
Function to automatically plot well logs
Returns a plot of two logs(x1, x2)
args::
logs: Dataframe object of well logs
depth: Set to false or leave as default to use dataframe index
Set to column title if column depth should be used
#Converting the values of the resistivity logs to log scale
if x1 == 'RT':
logs[x1] = np.log(logs[x1])
#logs[x1] = logs[x1].replace({np.Inf:0, np.nan:0}, inplace=False)
if x2 == 'RT':
logs[x2] = np.log(logs[x2])
#logs[x2] = logs[x2].replace({np.Inf:0, np.nan:0}, inplace=False)
'''
logs = process(logs)
#Setting the value of the y axis. Using index or property specified
if depth == False:
logs['DEPTH'] = logs.index
logs = logs.reset_index(drop=True)
else:
depth = np.array(logs[depth])
logs = logs.reset_index(drop=True)
logs['DEPTH'] = depth
logs = logs.loc[(logs.DEPTH >= float(top)) & (logs.DEPTH <= float(base))]
if scale == True:
try:
logs = logs.sort_values(by='DEPTH')
f, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,10))
for i in range(len(ax)):
ax[i].set_ylim(top, base)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=4)
if logs[x1].min() < logs[x2].min():
x_min=logs[x1].min()
else:
x_min=logs[x2].min()
if logs[x1].max() < logs[x2].max():
x_max=logs[x1].max()
else:
x_max=logs[x2].max()
ax[0].plot(logs[x1], logs.DEPTH, color='black')
ax[1].plot(logs[x2], logs.DEPTH, color='c')
ax[0].set_xlabel(f"{x1} ")
if x1 == 'RT':
ax[0].set_xscale("log")
ax[0].set_xlim(x_min, x_max)
ax[0].set_ylabel("Depth(ft)")
ax[0].set_title(f"Plot of Depth Against {x1}")
ax[1].set_xlabel(f"{x2} ")
if x2 == 'RT':
ax[1].set_xscale("log")
ax[1].set_xlim(x_min, x_max)
ax[1].set_title(f"Plot of Depth Against {x2}")
except NameError as err:
print(f'Depth column could not be located. {err}')
elif scale == False:
try:
logs = logs.sort_values(by='DEPTH')
f, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,10))
for i in range(len(ax)):
ax[i].set_ylim(top, base)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=4)
ax[0].plot(logs[x1], logs.DEPTH, color='black')
ax[1].plot(logs[x2], logs.DEPTH, color='c')
ax[0].set_xlabel(f"{x1} ")
if x1 == 'RT':
ax[0].set_xscale("log")
ax[0].set_xlim(logs[x1].min(), logs[x1].max())
ax[0].set_ylabel("Depth(ft)")
ax[0].set_title(f"Plot of Depth Against {x1}")
ax[1].set_xlabel(f"{x2} ")
if x2 == 'RT':
ax[1].set_xscale("log")
ax[1].set_xlim(logs[x2].min(),logs[x2].max())
ax[1].set_title(f"Plot of Depth Against {x2}")
except NameError as err:
print(f'Depth column could not be located. {err}')
else:
print(f'Attributes takes in True or False')
def one_plot(logs, x1, top, base, depth=False):
'''
Function to automatically plot a single well log
args::
logs: Dataframe object of well logs
depth: Set to false or leave as default to use dataframe index
Set to column title if column depth should be used
'''
logs = process(logs)
#Setting the value of the y axis. Using index or property specified
if depth == False:
logs['DEPTH'] = logs.index
logs = logs.reset_index(drop=True)
else:
depth = np.array(logs[depth])
logs = logs.reset_index(drop=True)
logs['DEPTH'] = depth
logs = logs.loc[(logs.DEPTH >= float(top)) & (logs.DEPTH <= float(base))]
try:
logs = logs.sort_values(by='DEPTH')
f, ax = plt.subplots(nrows=1, ncols=1, figsize=(6,15))
ax.plot(logs[x1], logs.DEPTH, color='black')
ax.set_ylim(top, base)
ax.plot(logs[x1], logs.DEPTH, color='black')
ax.invert_yaxis()
ax.grid()
ax.locator_params(axis='x', nbins=4)
ax.set_xlabel(f"{x1}")
if x1 == 'RT':
ax.set_xscale("log")
ax.set_xlim(logs[x1].min(), logs[x1].max())
ax.set_ylabel("Depth(ft)")
ax.set_title(f"Plot of Depth Against {x1}")
except NameError as err:
print(f'Depth column could not be located. {err}')
logs[x1] = np.log10(logs[x1])
'''
The functions below are adapted and modified from the SEG 2015 tutorials on SEG's
github page "The Leading |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.