input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>umairwaheed/scrapy-do
#-------------------------------------------------------------------------------
# Author: <NAME> <<EMAIL>>
# Date: 26.11.2017
#
# Licensed under the 3-Clause BSD License, see the LICENSE file for details.
#-------------------------------------------------------------------------------
"""
A collection of utility classes and functions used throughout the project.
"""
import importlib
import OpenSSL
import time
import pem
import os
import re
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
from twisted.internet.ssl import CertificateOptions
from twisted.internet import reactor, task
from distutils.spawn import find_executable
from datetime import datetime
from schedule import Job as SchJob
from schedule import IntervalError
#-------------------------------------------------------------------------------
def exc_repr(e):
"""
Return a string representation of an exception together with the excepion
name.
"""
return "{}: {}".format(type(e).__name__, str(e))
#-------------------------------------------------------------------------------
def get_object(name):
"""
Retrieve an object from a module given its fully qualified name. For
example: `get_object('scrapy_do.webservice.Status')`.
"""
name = name.split('.')
object_name = name[-1]
module = importlib.import_module('.'.join(name[:-1]))
return getattr(module, object_name)
#-------------------------------------------------------------------------------
class TimeStamper:
"""
Set the timestamp attribute of the object whenever the associated attribute
is set. For example:
:Example:
>>> class Test:
>>> attr = TimeStamper('_attr')
>>>
>>> def __init__(self, attr):
>>> self._attr = attr
>>> self.timestamp = datetime.now()
>>> test = Test('foo')
>>> test.attr
'foo'
>>> test.timestamp
datetime.datetime(2017, 12, 2, 23, 0, 56, 671634)
>>> test.attr = 'bar'
>>> test.timestamp
datetime.datetime(2017, 12, 2, 23, 1, 9, 688899)
"""
#---------------------------------------------------------------------------
def __init__(self, attr_name):
self.attr_name = attr_name
#---------------------------------------------------------------------------
def __get__(self, obj, obj_type):
return getattr(obj, self.attr_name)
#---------------------------------------------------------------------------
def __set__(self, obj, value):
obj.timestamp = datetime.now()
return setattr(obj, self.attr_name, value)
#-------------------------------------------------------------------------------
def _build_directive_map(job):
#---------------------------------------------------------------------------
# A list of valid directives
#---------------------------------------------------------------------------
directive_names = ['second', 'seconds', 'minute', 'minutes', 'hour',
'hours', 'day', 'days', 'week', 'weeks', 'monday',
'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday', 'at', 'to']
#---------------------------------------------------------------------------
# Get an appropriate setter reference
#---------------------------------------------------------------------------
def get_attr(obj, attr):
for obj in [obj] + obj.__class__.mro():
if attr in obj.__dict__:
ret = obj.__dict__[attr]
if isinstance(ret, property):
return lambda x: ret.__get__(x, type(x))
return ret
#---------------------------------------------------------------------------
# Build the dictionary of setters
#---------------------------------------------------------------------------
directive_map = {}
for d in directive_names:
directive_map[d] = get_attr(job, d)
return directive_map
#-------------------------------------------------------------------------------
def _parse_args(directive, directives):
#---------------------------------------------------------------------------
# Check the argument to "to"
#---------------------------------------------------------------------------
if directive == 'to':
arg = directives.pop()
try:
arg = int(arg)
except ValueError:
raise ValueError('The "to" directive expects an integer')
return [arg]
#---------------------------------------------------------------------------
# Check the argument to "at"
#---------------------------------------------------------------------------
if directive == 'at':
arg = directives.pop()
arg_split = arg.split(':')
if len(arg_split) != 2:
raise ValueError('The "at" directive expects a string like "12:34"')
try:
int(arg_split[0])
int(arg_split[1])
except ValueError:
raise ValueError('The "at" directive expects a string like "12:34"')
return [arg]
#---------------------------------------------------------------------------
# Nothing else accepts arguments
#---------------------------------------------------------------------------
return []
#-------------------------------------------------------------------------------
def _parse_spec(job, spec):
#---------------------------------------------------------------------------
# Check the directive
#---------------------------------------------------------------------------
directives = spec.lower().split()
if len(directives) < 2:
raise ValueError('Spec too short')
if directives[0] != 'every':
raise ValueError('Spec must start with "every"')
#---------------------------------------------------------------------------
# Set up the interval if necessary
#---------------------------------------------------------------------------
try:
interval = int(directives[1])
job.interval = interval
if len(directives) < 3:
raise ValueError("Spec to short")
directives = directives[2:]
except ValueError:
directives = directives[1:]
#---------------------------------------------------------------------------
# Parse the spec
#---------------------------------------------------------------------------
directive_map = _build_directive_map(job)
directives.reverse()
while directives:
directive = directives.pop()
if directive not in directive_map:
raise ValueError('Unknown directive: ' + directive)
args = _parse_args(directive, directives)
try:
directive_map[directive](job, *args)
except IntervalError as e:
raise ValueError(str(e))
return job
#-------------------------------------------------------------------------------
def schedule_job(scheduler, spec):
"""
Take a `schedule.Scheduler` object and an interval spec and convert it
to a `schedule.Job` registered with the scheduler. The spec can be
any string that can be translated to `schedule calls
<https://schedule.readthedocs.io/en/stable/>`_. For example: string
'every 2 to 3 minutes' corresponds to `schedule.every(2).to(3).minutes`.
:param scheduler: A `schedule.Scheduler`
:param spec: String containing the interval spec
:return: A `schedule.Job` registered with the scheduler
:raises ValueError: If the spec is not a valid sequence of `schedule`
method calls
"""
job = SchJob(1, scheduler)
try:
_parse_spec(job, spec)
except Exception:
scheduler.cancel_job(job)
raise
return job
#-------------------------------------------------------------------------------
def arg_require_all(dict, args):
"""
Check if all of the args are present in the dict.
:raises KeyError: If any argument is missing from the dict.
"""
for arg in args:
if arg not in dict:
raise KeyError('Missing argument "{}"'.format(arg))
#-------------------------------------------------------------------------------
def arg_require_any(dict, args):
"""
Check if any of the args is in the dict.
:raises KeyError: If none of the args is present in the dict.
"""
for arg in args:
if arg in dict:
return
raise KeyError('Neither argument present: "{}"'.format(str(args)))
#-------------------------------------------------------------------------------
def twisted_sleep(time):
"""
Return a deferred that will be triggered after the specified amount of
time passes
"""
return task.deferLater(reactor, time, lambda: None)
#-------------------------------------------------------------------------------
class LoggedProcessProtocol(ProcessProtocol):
"""
An implementation of ProcessProtocol that forwards the program output
to logfiles. It creates files `job_name.out` and `job_name.err` and
redirects the standard output and standard error output of the
process to the respective file. If a log file is empty upon program
exit it is deleted. The :data:`finished <LoggedProcessProtocol.finished>`
deferred is triggered upon process exit and called with it's exit code.
:param job_name: Name of the job
:param log_dir: A directory to put the log files in
"""
#---------------------------------------------------------------------------
def __init__(self, job_name, log_dir):
self.finished = Deferred()
self.out_path = os.path.join(log_dir, job_name + '.out')
self.err_path = os.path.join(log_dir, job_name + '.err')
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
self.out_fd = os.open(self.out_path, flags, 0o644)
self.err_fd = os.open(self.err_path, flags, 0o644)
os.set_inheritable(self.out_fd, True)
os.set_inheritable(self.err_fd, True)
#---------------------------------------------------------------------------
def processExited(self, status):
"""
Callback called by `twisted` upon process exit.
"""
out_size = os.fstat(self.out_fd).st_size
err_size = os.fstat(self.err_fd).st_size
os.close(self.out_fd)
os.close(self.err_fd)
if out_size == 0:
os.remove(self.out_path)
if err_size == 0:
os.remove(self.err_path)
self.finished.callback(status.value.exitCode)
#-------------------------------------------------------------------------------
def run_process(cmd, args, job_name, log_dir, env=None, path=None):
"""
Run a process using :class:`LoggedProcessProtocol <LoggedProcessProtocol>`
:param cmd: Command to run
:param args: Argument passed to the command
:param job_name: Name of the job that will be used for the name of the
log files
:param log_dir: Directory where the log files will be stored
:param env: A dictionary with environment variables and their values
:param path: Program's working directory
:return: A tuple of an `IProcessTransport` object as returned
by twisted's `reactor.spawnProcess` and a deferred
called on program exit with the return code of the
process.
"""
cmd = find_executable(cmd)
args = [cmd] + args
pp = LoggedProcessProtocol(job_name, log_dir)
p = reactor.spawnProcess(pp, cmd, args, env=env, path=path,
childFDs={1: pp.out_fd, 2: pp.err_fd})
return p, pp.finished
#-------------------------------------------------------------------------------
def pprint_relativedelta(delta):
"""
Return a string representation of a relativedelta object in the form
similar to: "1y 2m 3d 5h 6m". If any of the components is equal to zero,
it's omitted.
"""
ret = ''
if delta.years:
ret += '{}y '.format(delta.years)
if delta.months:
ret += '{}m '.format(delta.months)
if delta.days:
ret += '{}d '.format(delta.days)
if delta.hours:
ret += '{}h '.format(delta.hours)
if delta.minutes:
ret += '{}m '.format(delta.minutes)
ret += '{}s'.format(delta.seconds)
return ret
#-------------------------------------------------------------------------------
def load_cert_chain(t, data):
"""
Load X509 objects from all the certificates in the given PEM data.
:param t: format type; only :data:`OpenSSL.crypto.FILETYPE_PEM` is
supported; the parameter is here only to keep the same
function signature as the other similar functions in
pyOpenSSL
:param data: string containing certificate chain data in the PEM
format
:return: a list of X509 objects representing the certificates
"""
if t != OpenSSL.crypto.FILETYPE_PEM:
raise OpenSSL.crypto.Error('Only the PEM format is supported')
certs_pem = pem.parse(data.encode('utf-8'))
certs = []
for cert_pem in certs_pem:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
str(cert_pem))
certs.append(cert)
return certs
#-------------------------------------------------------------------------------
class SSLCertOptions(CertificateOptions):
"""
This class implements an SSL context factory that remediates the problem
with the default factory not being able to handle arbitrary certificate
chains. It allows the user to pass file names instead of pyOpenSSL objects
which hides quite a lot of complexity. Furthermore, any time an SSL
context object is requested it check the mtime of the files to see if
they have been changed. If they were changed, they are reloaded. Doing
things this way allows you to renew your certificates without having
to restart the server. It's quite convenient if you use Let's Encrypt
as a CA which offers certificates with 90 days lifespan. The class
extends the functionality of the recommended `CertificateOptions` factory
and constructs it with the defaults, except for the parameters described
below.
:param key_file: A file containing the private key in either ASN.1 or PEM
format
:param cert_file: A file containing the certificate in either ASN.1 or PEM
format.
:param chain_file: A file containing any additional certificates in the
chain of trust in the PEM format
"""
#---------------------------------------------------------------------------
def | |
<filename>ma_option_vol/VIX_calculation.py
import numpy as np
import datetime as dt
from iv_calculation import days_till_expiration
class Option_Contract_Data():
'''
Defines data for an option contract on a given date
'''
def __init__(self, option_description, exp_date, strike_price, px_last, px_bid, px_ask):
self.option_description = option_description
self.exp_date = exp_date
self.strike_price = strike_price
self.px_last = px_last
self.px_bid = px_bid
self.px_ask = px_ask
if self.px_bid == 0:
self.px_mid = self.px_ask
elif self.px_ask == 0:
self.px_mid = self.px_bid
else:
self.px_mid = round((self.px_ask + self.px_bid)/2,5)
def __str__(self):
return '{} Last: {} Bid: {}, Ask: {}'.format(self.option_description, self.px_last, self.px_bid, self.px_ask)
def set_px_mid(self, value):
'''
sets the mid price for the option
'''
self.px_mid = value
def current_time_format():
'''
Returns the current time as a string in the form 'HH:MM (AM or PM)'
'''
now = dt.datetime.now()
return (dt.datetime.strftime(now, '%I:%M %p'))
class Near_Term():
'''
A class containing Options Contracts. orders strikes from smallest to largest
'''
#total number of minutes in a 365 day year
N365 = 525600
def __init__(self, option_dict, risk_free_rate, current_date=dt.datetime.today(),
current_time=current_time_format(), settlement_time='8:30 AM'):
#assumes that the values for the keys of option_dic are list of Option_Contract_Data objects
self.current_date = current_date
self.option_dict = self.sort_dict_by_strike(option_dict)
self.current_time= self.convert_time(current_time)
self.settlement_time = self.convert_time(settlement_time)
#each option has the same expiration date, so that information is just being passed from the first contract in the call list
self.exp_date = option_dict['call'][0].exp_date
self.R = risk_free_rate
self.T = self.time_to_expiration()
self.e_RT = np.exp(self.R * self.T)
self.F_strike = self.smallest_mid_price_diff()
self.F = self.forward_index_level()
#both the call and the put list will contain the same strikes, in this case the call list is used to determin k0, the stirk immideatily below F
self.k0 = self.find_k0(option_list= self.option_dict['call'])
self.non_zero_bid_list = self.create_calculation_list()
self.variance = self.calculate_variance()
def sort_dict_by_strike(self, some_dict):
'''
Sorts the 'call' and 'put' list by strike
'''
#loop through all the keys in the dictionary, in this case, just 'call' and 'put':
for key in some_dict.keys():
self.sort_by_strike(some_dict[key])
#return the sorted dictionary
return some_dict
def sort_by_strike(self, alist):
'''
Given a list of Option_Contract_Data objects, they are sorted by strike.
This method is based on an insertion sort method
'''
for index in range(1,len(alist)):
current_object = alist[index]
current_strike = alist[index].strike_price
position = index
while position>0 and alist[position-1].strike_price>current_strike:
alist[position]=alist[position-1]
position = position-1
alist[position]=current_object
def convert_time(self,str_time):
'''
Given a string in the form 'HH:MM (AM or PM)' the appropriate 24 hour datetime object is returned.
'''
return dt.datetime.strptime(str_time, '%I:%M %p')
def convert_days_to_minutes(self, num):
'''
Given a number of days, the number of minutes is returned
Note: there are 1440 minutes in a single day
'''
return num* 1440
def minutes_till_exp(self, settlement_time):
'''
Given the settlemt time, the minutes from mindnight till settlement on the settlement day are calculated
'''
return (settlement_time - self.convert_time('12:00 AM')).seconds / 60
def minutes_till_midnight(self, current_time):
'''
Given the current_time, the minutes till midnight are returned
'''
return (self.convert_time('12:00 AM')- current_time).seconds / 60
def time_to_expiration(self):
'''
Given the current date and the expiration date, the the minutes/year till expiration are returned
m_current_day Minutes remaining until midnight of the current day. Markets close at 4:00 PM ET
m_settlement_day Minutes from midnight until the expirtion on the settlement dat
expiration is 8:30 am for standard monthly expirations
expiration is 3:00 pm for standard weekly expirations
m_other_day Total minutes in the days between current date and expiration date
'''
m_current_day = self.minutes_till_midnight(self.current_time)
m_settlement_day = self.minutes_till_exp(self.settlement_time)
m_other_day = self.convert_days_to_minutes(days_till_expiration(start_date=self.current_date, expiration_date=self.exp_date)-1)
return (m_current_day + m_settlement_day + m_other_day)/ self.N365
def smallest_mid_price_diff(self):
'''
Returns the strike with the smallest absolute difference between the price of its respective call and put
'''
#creates a list of (strike price, mid price differences) tuples for each strike and midprice in both call and put lists
same_strike_list = [(x.px_mid, y.px_mid, x.strike_price) for x in self.option_dict['call'] for y in self.option_dict['put'] if((x.strike_price == y.strike_price) and (x.px_mid !=y.px_mid !=0))]
#diff_list = [(round(np.abs(x.px_mid - y.px_mid),2), x.strike_price) for (x,y) in zip(self.option_dict['call'], self.option_dict['put'])]
diff_list = [(round(np.abs(item[0]-item[1]), 2),item[2]) for item in same_strike_list]
#import pdb;pdb.set_trace()
return min(diff_list)[1] #returns just the strike price from the tuple
def forward_index_level(self):
'''
strike_price strike price where the absolute difference between the call_price and put_price is smallest
call_price call price associated with the given strike_price
put_price put price associated with the given strike price
risk_free_rate the bond equivalent yeild of the U.S T-Bill maturing cloest to the expiration date of the given option
time_to_expiration time to expiration in minutes
'''
call_price = self.get_mid_price_by_strike(strike_price= self.F_strike, call=True, put=False)
put_price = self.get_mid_price_by_strike(strike_price= self.F_strike, call=False, put=True)
return self.F_strike +self.e_RT*(call_price - put_price)
def get_mid_price_by_strike(self, strike_price, call=True, put=False):
'''
will return the mid price of a given call or put based on the strike_price
'''
#if searching for a call price
if call:
#iterate through each option contract
for option in self.option_dict['call']:
#if the option's strike matches the one we're searching for, then return the options mid price
if option.strike_price == strike_price:
return option.px_mid
#if searching for a put price
if put:
#iterate through each option contract
for option in self.option_dict['put']:
#if the option's strike matches the one we're searching for, then return the options mid price
if option.strike_price == strike_price:
return option.px_mid
def find_k0(self, option_list):
'''
Given F, the forward_index_level, K0 is returned.
K0 is defined as the strike immideately below F.
'''
#creates a list of strike prices if the strike price is less than the forward level, F
#uses the call list, but both call and puts have the same strikes
below_F = [x.strike_price for x in option_list if x.strike_price < self.F ]
#return the largest strike in the list, which will be the closest strike below F
return max(below_F)
def create_calculation_list(self):
'''
Creates a list of options to be included in the variance calculation. options are centered around 1 at the money option K0.
Calls with strike prices > K0 are included and, puts with strike prices <= K0 are included.
The mid price for the K0 strike is determined to be the average of the call and put with strike price of K0. When searching for options
to include, if two consecutive options are found to have 0 bid values, then no further options are considered beyond what has already been included
'''
#list of call options if their strikes are greater than self.K0
initial_call_list = [x for x in self.option_dict['call'] if x.strike_price > self.k0]
#list of put options if their strikes are less than or equal to self.K0
initial_put_list = [x for x in self.option_dict['put'] if x.strike_price <= self.k0]
#combining the call and put list, while removing zero bid options
combined_option_list = self.remove_zero_bid(option_list= initial_put_list[::-1]) + self.remove_zero_bid(option_list= initial_call_list)
#sort the combined_option_list
self.sort_by_strike(combined_option_list)
#go through the combined_option_list, and set the mid price of the k0 option to the average of the call and put mid price.
for option in combined_option_list:
if option.strike_price == self.k0:
#get the mid price for the call with stirke of k0
call_price = self.get_mid_price_by_strike(strike_price=self.k0, call=True, put=False)
#get the mid price for the put with stirke of k0
put_price = self.get_mid_price_by_strike(strike_price=self.k0, call=False, put=True)
#calculate the mean
mean_px_mid = (call_price + put_price)/2
#set the px_mid of the given option.
option.set_px_mid(value= mean_px_mid)
return combined_option_list
def remove_zero_bid(self, option_list):
'''
Goes through an option list and adds non zero bid options to a new list.
If two consecutive zero bid options are found, no further options are considered
'''
final_list = []
#iterate through ever item in the give list
for (index, item) in enumerate(option_list):
#import pdb; pdb.set_trace()
#if the bid price does not equal zero, then add the option to the final list
if item.px_bid != 0:
final_list.append(item)
else:
#if the last item in the list is 0
if item == option_list[-1] and item.px_bid == 0:
continue
#if an item in the list is zero and followed by a zero
elif item.px_bid == option_list[index+1].px_bid ==0:
break
return final_list
def delta_k(self, strike_high, strike_low, end_point=False):
'''
Calculates the interval between the two given strike prices. By default, strike_high, and strike_low
are assumed to be one strike above and below the strike delta_k is being calculated for. If delta_k is
being determined for either the start or end of a list of strikes, then return the difference of strike_high and strike_low
'''
if end_point:
return (strike_high - strike_low)
else:
return (strike_high - strike_low)/2
def option_contribution(self, strike_price, delta_strike, mid_price):
'''
strike_price :given strike price, either an integer or a float
delta_strike :should be the average difference between the
mid_price :mid price for the option with the given strike price
'''
return (delta_strike/strike_price**2)*(self.e_RT)*mid_price
def sum_option_contribution(self):
'''
Loops through each option and calculates that options contribution to the formula
'''
sum_contribution = 0
#loop through every option that was added to the non_zero_bid_list
for (index,option) in enumerate(self.non_zero_bid_list):
#calculate delta_k
#first contract, which has the lowest strike
if option == self.non_zero_bid_list[0]:
delta_k = self.delta_k(strike_high=self.non_zero_bid_list[index+1].strike_price, strike_low= self.non_zero_bid_list[index].strike_price, end_point=True)
#last contract, which has the highest strike
elif option == self.non_zero_bid_list[-1]:
delta_k = self.delta_k(strike_high=self.non_zero_bid_list[index].strike_price, strike_low= self.non_zero_bid_list[index-1].strike_price, end_point=True)
else:
delta_k = self.delta_k(strike_high=self.non_zero_bid_list[index+1].strike_price, strike_low= self.non_zero_bid_list[index-1].strike_price)
sum_contribution += self.option_contribution(strike_price = option.strike_price, delta_strike= delta_k, mid_price= option.px_mid)
return sum_contribution
def forward_contribution(self):
'''
Returns the forward contribution for the given option chain
'''
return (1/self.T)*((self.F/self.k0)-1)**2
def calculate_variance(self):
'''
returns the Variance for the entire options chain
'''
return (2/self.T)* self.sum_option_contribution() - self.forward_contribution()
class Next_Term(Near_Term):
'''
Inherits everything from the Near_Term class
'''
pass
class VIX_Calculation(object):
'''
Given a Near_Term and Next_Term object, the VIX volatility calculation is performed
'''
#total number of minutes in a 365 day year
N365 = 525600
#total number of minutes in a 30 day period
N30 = 43200
def __init__(self, Near_Term, Next_Term):
self.T1 = Near_Term.T
self.T2 = Next_Term.T
self.N_T1 = self.T1 * self.N365
self.N_T2 = self.T2 * self.N365
self.w1 = self.calculate_weight1()
self.w2 = self.calculate_weight2()
self.var1 = Near_Term.variance
self.var2 | |
# spicierModbus2mqtt - Modbus TCP/RTU to MQTT bridge (and vice versa)
# https://github.com/mbs38/spicierModbus2mqtt
#
# Written in 2018 by <NAME> <<EMAIL>>
#
#
# Provided under the terms of the MIT license.
# Contains a bunch of code taken from:
# modbus2mqtt - Modbus master with MQTT publishing
# Written and (C) 2015 by <NAME> <<EMAIL>>
# Provided under the terms of the MIT license.
# Main improvements over modbus2mqtt:
# - more abstraction when writing to coils/registers using mqtt. Writing is now
# possible without having to know slave id, reference, function code etc.
# - specific coils/registers can be made read only
# - multiple slave devices on one bus are now supported
# - polling speed has been increased sgnificantly. With modbus RTU @ 38400 baud
# more than 80 transactions per second have been achieved.
# - switched over to pymodbus which is in active development
# Requires:
# - Eclipse Paho for Python - http://www.eclipse.org/paho/clients/python/
# - pymodbus - https://github.com/riptideio/pymodbus
import argparse
import time
import socket
import paho.mqtt.client as mqtt
import serial
import io
import sys
import csv
import signal
import random
import ssl
import addToHomeAssistant
from pymodbus.pdu import ModbusRequest
from pymodbus.client.sync import ModbusSerialClient as SerialModbusClient
from pymodbus.client.sync import ModbusTcpClient as TCPModbusClient
from pymodbus.transaction import ModbusRtuFramer
version="0.3"
parser = argparse.ArgumentParser(description='Bridge between ModBus and MQTT')
parser.add_argument('--mqtt-host', default='localhost', help='MQTT server address. Defaults to "localhost"')
parser.add_argument('--mqtt-port', default=None, type=int, help='Defaults to 8883 for TLS or 1883 for non-TLS')
parser.add_argument('--mqtt-topic', default='modbus/', help='Topic prefix to be used for subscribing/publishing. Defaults to "modbus/"')
parser.add_argument('--mqtt-user', default=None, help='Username for authentication (optional)')
parser.add_argument('--mqtt-pass', default="", help='Password for authentication (optional)')
parser.add_argument('--mqtt-use-tls', action='store_true')
parser.add_argument('--mqtt-insecure', action='store_true')
parser.add_argument('--mqtt-cacerts', default=None, help="Path to keychain including ")
parser.add_argument('--mqtt-tls-version', default=None, help='TLS protocol version, can be one of tlsv1.2 tlsv1.1 or tlsv1')
parser.add_argument('--rtu',help='pyserial URL (or port name) for RTU serial port')
parser.add_argument('--rtu-baud', default='19200', type=int, help='Baud rate for serial port. Defaults to 19200')
parser.add_argument('--rtu-parity', default='even', choices=['even','odd','none'], help='Parity for serial port. Defaults to even')
parser.add_argument('--tcp', help='Act as a Modbus TCP master, connecting to host TCP')
parser.add_argument('--tcp-port', default='502', type=int, help='Port for MODBUS TCP. Defaults to 502')
parser.add_argument('--set-modbus-timeout',default='1',type=float, help='Response time-out for MODBUS devices')
parser.add_argument('--config', required=True, help='Configuration file. Required!')
parser.add_argument('--verbosity', default='3', type=int, help='Verbose level, 0=silent, 1=errors only, 2=connections, 3=mb writes, 4=all')
parser.add_argument('--autoremove',action='store_true',help='Automatically remove poller if modbus communication has failed three times.')
parser.add_argument('--add-to-homeassistant',action='store_true',help='Add devices to Home Assistant using Home Assistant\'s MQTT-Discovery')
parser.add_argument('--set-loop-break',default='0.01',type=float, help='Set pause in main polling loop. Defaults to 10ms.')
args=parser.parse_args()
verbosity=args.verbosity
addToHass=False
addToHass=args.add_to_homeassistant
class Control:
def __init__(self):
self.runLoop = True
def stopLoop(self):
self.runLoop = False
control = Control()
globaltopic=args.mqtt_topic
if not globaltopic.endswith("/"):
globaltopic+="/"
if verbosity>=0:
print('Starting spiciermodbus2mqtt V%s with topic prefix \"%s\"' %(version, globaltopic))
master=None
def signal_handler(signal, frame):
print('Exiting ' + sys.argv[0])
control.stopLoop()
signal.signal(signal.SIGINT, signal_handler)
deviceList=[]
referenceList=[]
class Device:
def __init__(self,name,slaveid):
self.name=name
self.occupiedTopics=[]
self.writableReferences=[]
self.slaveid=slaveid
if verbosity>=2:
print('Added new device \"'+self.name+'\"')
class Poller:
def __init__(self,topic,rate,slaveid,functioncode,reference,size,dataType):
self.topic=topic
self.rate=float(rate)
self.slaveid=int(slaveid)
self.functioncode=int(functioncode)
self.dataType=dataType
self.reference=int(reference)
self.size=int(size)
self.next_due=time.clock_gettime(0)+self.rate*random.uniform(0,1)
self.last = None
self.readableReferences=[]
self.device=None
self.disabled=False
self.failcounter=0
for myDev in deviceList:
if myDev.name == self.topic:
self.device=myDev
break
if self.device == None:
device = Device(self.topic,slaveid)
deviceList.append(device)
self.device=device
if verbosity>=2:
print("Added new Poller "+str(self.topic)+","+str(self.functioncode)+","+str(self.dataType)+","+str(self.reference)+","+str(self.size)+",")
def failCount(self,failed):
if not failed:
self.failcounter=0
else:
if self.failcounter==3:
self.disabled=True
print("Poller "+self.topic+" with Slave-ID "+str(self.slaveid)+ " and functioncode "+str(self.functioncode)+" disabled due to the above error.")
else:
self.failcounter=self.failcounter+1
def poll(self):
result = None
if master.is_socket_open()==True:
failed = False
try:
if self.functioncode == 3:
result = master.read_holding_registers(self.reference, self.size, unit=self.slaveid)
if result.function_code < 0x80:
data = result.registers
else:
failed = True
if self.functioncode == 1:
result = master.read_coils(self.reference, self.size, unit=self.slaveid)
if result.function_code < 0x80:
data = result.bits
else:
failed = True
if self.functioncode == 2:
result = master.read_discrete_inputs(self.reference, self.size, unit=self.slaveid)
if result.function_code < 0x80:
data = result.bits
else:
failed = True
if self.functioncode == 4:
result = master.read_input_registers(self.reference, self.size, unit=self.slaveid)
if result.function_code < 0x80:
data = result.registers
else:
failed = True
if not failed:
if verbosity>=4:
print("Read MODBUS, FC:"+str(self.functioncode)+", DataType:"+str(self.dataType)+", ref:"+str(self.reference)+", Qty:"+str(self.size)+", SI:"+str(self.slaveid))
print("Read MODBUS, DATA:"+str(data))
for ref in self.readableReferences:
ref.checkPublish(data,self.topic)
else:
if verbosity>=1:
print("Slave device "+str(self.slaveid)+" responded with error code: "+str(result.function_code))
except:
failed = True
if verbosity>=1:
print("Error talking to slave device:"+str(self.slaveid)+", trying again...")
if args.autoremove:
self.failCount(failed)
else:
if master.connect():
if verbosity >= 1:
print("MODBUS connected successfully")
else:
if verbosity >= 1:
print("MODBUS connection error, trying again...")
def checkPoll(self):
if time.clock_gettime(0) >= self.next_due and not self.disabled:
self.poll()
self.next_due=time.clock_gettime(0)+self.rate
def addReference(self,myRef):
#check reference configuration and maybe add to this poller or to the list of writable things
if myRef.topic not in self.device.occupiedTopics:
self.device.occupiedTopics.append(myRef.topic)
if "r" in myRef.rw or "w" in myRef.rw:
myRef.device=self.device
if "r" in myRef.rw:
if myRef.checkSanity(self.reference,self.size):
self.readableReferences.append(myRef)
if "w" not in myRef.rw:
referenceList.append(myRef)
if verbosity >= 2:
print('Added new reference \"' + myRef.topic + '\"')
else:
print("Reference \""+str(myRef.reference)+"\" with topic "+myRef.topic+" is not in range ("+str(self.reference)+" to "+str(int(self.reference+self.size-1))+") of poller \""+self.topic+"\", therefore ignoring it for polling.")
if "w" in myRef.rw:
if self.functioncode == 3: #holding registers
myRef.writefunctioncode=6 #preset single register
if self.functioncode == 1: #coils
myRef.writefunctioncode=5 #force single coil
if self.functioncode == 2: #read input status, not writable
print("Reference \""+str(myRef.reference)+"\" with topic "+myRef.topic+" in poller \""+self.topic+"\" is not writable (discrete input)")
if self.functioncode == 4: #read input register, not writable
print("Reference \""+str(myRef.reference)+"\" with topic "+myRef.topic+" in poller \""+self.topic+"\" is not writable (input register)")
if myRef.writefunctioncode is not None:
self.device.writableReferences.append(myRef)
referenceList.append(myRef)
else:
print("Reference \""+str(myRef.reference)+"\" with topic "+myRef.topic+" in poller \""+self.topic+"\" is neither read nor writable, therefore ignoring it.")
else:
print("Reference topic ("+str(myRef.topic)+") is already occupied for poller \""+self.topic+"\", therefore ignoring it.")
class Reference:
def __init__(self,topic,reference,format,rw,poller):
self.topic=topic
self.reference=int(reference)
self.format=format.split(":",2)
self.lastval=None
self.rw=rw
self.relativeReference=None
self.writefunctioncode=None
self.device=None
self.poller=poller
def checkSanity(self,reference,size):
if self.reference in range(reference,size+reference):
self.relativeReference=self.reference-reference
return True
def checkPublish(self,result,topic):
# Only publish messages after the initial connection has been made. If it became disconnected then the offline buffer will store messages,
# but only after the intial connection was made.
if mqc.initial_connection_made == True:
if self.poller.dataType == "int32":
val = result[self.relativeReference]*256 + result[self.relativeReference+1]
else:
val = result[self.relativeReference]
if self.lastval != val:
self.lastval= val
try:
publish_result = mqc.publish(globaltopic+self.device.name+"/state/"+self.topic,self.lastval,qos=1,retain=True)
if verbosity>=4:
print("published MQTT topic: " + str(self.device.name+"/state/"+self.topic)+"value: " + str(self.lastval)+" RC:"+str(publish_result.rc))
except:
if verbosity>=1:
print("Error publishing MQTT topic: " + str(self.device.name+"/state/"+self.topic)+"value: " + str(self.lastval))
pollers=[]
# type, topic, slaveid, ref, size, functioncode, rate
# type, topic, reference, rw, , ,
# Now let's read the config file
with open(args.config,"r") as csvfile:
csvfile.seek(0)
reader=csv.DictReader(csvfile)
currentPoller=None
for row in reader:
if row["type"]=="poller" or row["type"]=="poll":
if row["col5"] == "holding_register":
functioncode = 3
dataType="int16"
if row["col5"] == "coil":
functioncode = 1
dataType="bool"
if row["col5"] == "input_register":
functioncode = 4
dataType="int16"
if row["col5"] == "input_status":
functioncode = 2
dataType="bool"
if row["col5"] == "input_register_32BE":
functioncode = 4
dataType="int32"
rate = float(row["col6"])
slaveid = int(row["col2"])
reference = int(row["col3"])
size = int(row["col4"])
currentPoller = Poller(row["topic"],rate,slaveid,functioncode,reference,size,dataType)
pollers.append(currentPoller)
continue
elif row["type"]=="reference" or row["type"]=="ref":
reference = int(row["col2"])
currentPoller.addReference(Reference(row["topic"],reference,"",row["col3"],currentPoller))
def messagehandler(mqc,userdata,msg):
if True:
(prefix,device,function,reference) = msg.topic.split("/")
if function != 'set':
return
myRef = None
myDevice = None
for iterDevice in deviceList:
if iterDevice.name == device:
myDevice = iterDevice
if myDevice == None: # no such device
return
for iterRef in myDevice.writableReferences:
if iterRef.topic == reference:
myRef=iterRef
if myRef == None: # no such reference
return
payload = str(msg.payload.decode("utf-8"))
if myRef.writefunctioncode == 5:
value = None
if payload == 'True' or payload == 'true' or payload == '1' or payload == 'TRUE':
value = True
if payload == 'False' or payload == 'false' or payload == '0' or payload == 'FALSE':
value = False
if value != None:
result = master.write_coil(int(myRef.reference),value,unit=int(myRef.device.slaveid))
try:
if result.function_code < 0x80:
if verbosity>=3:
print("Writing to device "+str(myDevice.name)+", Slave-ID="+str(myDevice.slaveid)+" at Reference="+str(myRef.reference)+" using function code "+str(myRef.writefunctioncode)+" successful.")
else:
if verbosity>=1:
print("Writing to device "+str(myDevice.name)+", Slave-ID="+str(myDevice.slaveid)+" at Reference="+str(myRef.reference)+" using function code "+str(myRef.writefunctioncode)+" FAILED! (Devices responded with errorcode. Maybe bad configuration?)")
except NameError:
if verbosity>=1:
print("Error writing to slave device "+str(myDevice.slaveid)+" (maybe CRC error or timeout)")
else:
if verbosity >= 1:
print("Writing to device "+str(myDevice.name)+", Slave-ID="+str(myDevice.slaveid)+" at Reference="+str(myRef.reference)+" using function code "+str(myRef.writefunctioncode)+" not possible. Given value is not \"True\" or \"False\".")
if myRef.writefunctioncode == 6:
try:
value=int(payload)
if value > 65535 or value < 0:
value = None
except:
value=None
if value is not None:
result = master.write_registers(int(myRef.reference),value,unit=myRef.device.slaveid)
try:
if result.function_code < 0x80:
if verbosity>=3:
print("Writing to device "+str(myDevice.name)+", Slave-ID="+str(myDevice.slaveid)+" at Reference="+str(myRef.reference)+" using function code "+str(myRef.writefunctioncode)+" successful.")
else:
if verbosity>=1:
print("Writing to device "+str(myDevice.name)+", Slave-ID="+str(myDevice.slaveid)+" at Reference="+str(myRef.reference)+" using function code "+str(myRef.writefunctioncode)+" FAILED! (Devices responded with errorcode. Maybe bad configuration?)")
except NameError:
if verbosity >= 1:
print("Error writing to | |
############################################################################
# Copyright 2019 <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
#adapted from https://www.depends-on-the-definition.com/named-entity-recognition-with-bert/
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
import pickle as pk
#add pname at the beginning of the sentence like 'Product name is xxx'
def add_pname(data, cve_cpe_pnames):
def add(pname):
if len(pname)==0:
return [], []
add_sent = ['Product','name','is']
add_label = ['O','O','O']
#only consider pname without '_'
pname = [i for i in pname if '_' not in i]
for i in range(len(pname)):
spl_name = pname[i].split()
add_sent.extend(spl_name)
add_label.extend(['pn']*len(spl_name))
if i!=len(pname)-1:
add_sent.append(',')
else:
add_sent.append('.')
add_label.append('O')
return add_sent, add_label
def agg_func(s):
add_sent, add_label = add(cve_cpe_pnames[s.name[0]])
new_sent = add_sent + s["token"].values.tolist()
new_tag = add_label + s["label"].values.tolist()
return [(w, t) for w, t in zip(new_sent, new_tag)], len(add_label)
grouped = data.groupby(['sent_ind','cve_sent_ind']).apply(agg_func)
words = [w[0] for w in grouped]
add_len = [w[1] for w in grouped]
return words, add_len
def read_data(config, path):
data = pd.read_csv(path, encoding="latin1").fillna(method="ffill")
count_label = data.groupby('label')['sent_ind'].count()
with open('data/cpe.pkl','rb') as f:
cve_cpe_pnames,cve_cpe_vendors = pk.load(f)
words, add_len = add_pname(data, cve_cpe_pnames)
sentences = [" ".join([s[0] for s in sent]) for sent in words]
labels = [[s[1] for s in sent] for sent in words]
substitue = config['substitue']
tags_vals = list(set(data["label"].values)) + [substitue]
tag2idx = {t: i for i, t in enumerate(tags_vals)}
return words, sentences, labels, tags_vals, tag2idx, add_len
from pytorch_pretrained_bert import BertTokenizer, BertConfig, BertModel
from keras.preprocessing.sequence import pad_sequences
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
def vectorization(config, sentences, labels, tags_vals, tag2idx, add_len):
#use bert tokenization and substitute label
#vectorize and pad dataset
tokenizer = BertTokenizer.from_pretrained(config['name'], do_lower_case=config['do_lower_case'])
mytexts = []
mylabels = []
for sent, tags in zip(sentences,labels):
BERT_texts = []
BERT_labels = np.array([])
for word, tag in zip(sent.split(),tags):
sub_words = tokenizer.tokenize(word)
n_underscore = sub_words.count('_')
for i in range(n_underscore):
sub_words.remove('_')
tags = np.array([tag for x in sub_words])
tags[1:] = config['substitue']
BERT_texts += sub_words
BERT_labels = np.append(BERT_labels,tags)
mytexts.append(BERT_texts)
mylabels.append(BERT_labels)
l = 0
for w in mytexts:
if len(w)>l:
l = len(w)
print('The longest sentence has {} tokens.'.format(l))
MAX_LEN = config['MAX_LEN']
#padding data
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in mytexts],
maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
tags = pad_sequences([[tag2idx.get(l) for l in lab] for lab in mylabels],
maxlen=MAX_LEN, value=tag2idx["O"], padding="post",
dtype="long", truncating="post")
attention_masks = np.array([[float(i>0) for i in ii] for ii in input_ids])
add_masks = np.ones((tags.shape[0], MAX_LEN))
for i in range(tags.shape[0]):
add_masks[i, :add_len[i]] = 0
data_fold = (input_ids, tags, attention_masks, add_masks)
return data_fold
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
def myDataLoader(config, data_fold, train_index=None, test_index=None):
bs = config['bs']
test_size = config['test_size']
if_cross_val = config['if_cross_val']
input_ids, tags, attention_masks, add_masks = data_fold
if if_cross_val:
tr_inputs, val_inputs = input_ids[train_index], input_ids[test_index]
tr_tags, val_tags = tags[train_index], tags[test_index]
tr_masks, val_masks = attention_masks[train_index], attention_masks[test_index]
tr_add_masks, val_add_masks = add_masks[train_index], add_masks[test_index]
else:
tr_inputs, val_inputs, tr_tags, val_tags = train_test_split(input_ids, tags,
random_state=1, test_size=test_size)
tr_masks, val_masks, _, _ = train_test_split(attention_masks, input_ids,
random_state=1, test_size=test_size)
tr_add_masks, val_add_masks, _, _ = train_test_split(add_masks, input_ids,
random_state=1, test_size=test_size)
tr_inputs = torch.tensor(tr_inputs)
val_inputs = torch.tensor(val_inputs)
tr_tags = torch.tensor(tr_tags)
val_tags = torch.tensor(val_tags)
tr_masks = torch.tensor(tr_masks)
val_masks = torch.tensor(val_masks)
tr_add_masks = torch.tensor(tr_add_masks)
val_add_masks = torch.tensor(val_add_masks)
train_data = TensorDataset(tr_inputs, tr_masks, tr_tags, tr_add_masks)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=bs, drop_last=False)
valid_data = TensorDataset(val_inputs, val_masks, val_tags, val_add_masks)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=bs, drop_last=False)
dataloader = (train_dataloader, valid_dataloader)
count = np.unique(tr_tags, return_counts=True)[1]
return dataloader, count
from pytorch_pretrained_bert import BertForTokenClassification, BertAdam
def BuildModel(config, weight=None):
# change the forward method: do not consider 'X' when computing loss
def new_forward(self, input_ids, token_type_ids=None, attention_mask=None, add_masks=None, labels=None, weight=weight):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
if weight is not None:
weight = weight.to(torch.float).to(config['device'])
loss_fct = nn.CrossEntropyLoss(weight=weight, ignore_index=self.num_labels-1)
# Only keep active parts of the loss
if attention_mask is not None or add_masks is not None:
if add_masks is None:
add_masks = 1
if attention_mask is None:
attention_mask = 1
active_loss = (attention_mask.view(-1) == 1) * (add_masks.view(-1) == 1)
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
BertForTokenClassification.forward = new_forward
model = BertForTokenClassification.from_pretrained(config['name'], num_labels=config['num_labels'])
model.to(config['device'])
return model
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score,classification_report
from sklearn.metrics import fbeta_score, precision_recall_fscore_support
def test(config, model, dataloader, validation = False, tags_vals = None):
#dataloader is only validation data or test data
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in dataloader:
batch = tuple(t.to(config['device']) for t in batch)
b_input_ids, b_input_mask, b_labels, b_add_masks = batch
with torch.no_grad():
tmp_eval_loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, add_masks=b_add_masks, labels=b_labels)
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, add_masks=b_add_masks)
active = ((b_input_mask.view(-1) == 1) * (b_labels.view(-1) != config['num_labels']-1)) * (b_add_masks.view(-1) == 1)
active_logits = logits.view(-1, config['num_labels'])[active].cpu().numpy()
active_labels = b_labels.view(-1)[active].cpu().numpy()
pred_labels = np.argmax(active_logits, axis=1)
predictions.append(pred_labels)
true_labels.append(active_labels)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_examples += b_input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss/nb_eval_steps
predictions = np.concatenate(predictions)
true_labels = np.concatenate(true_labels)
eval_accuracy = accuracy_score(true_labels, predictions, normalize=True, sample_weight=None)
fbeta = fbeta_score(true_labels, predictions, average='macro', beta=config['beta'])
precision = precision_score(true_labels, predictions, average='macro')
recall = recall_score(true_labels, predictions, average='macro')
if validation==True:
return eval_loss, eval_accuracy, fbeta, precision, recall
else:
print("Test loss: {}".format(eval_loss))
print("Test Accuracy: {}".format(eval_accuracy))
print("macro Fbeta-Score: {}".format(fbeta))
print("Precision: {}".format(precision))
print("Recall: {}".format(recall))
pred_tags = [tags_vals[p] for p in predictions]
valid_tags = [tags_vals[l] for l in true_labels]
counts = [valid_tags.count(tag) for tag in tags_vals]
cfs_mat = confusion_matrix(valid_tags, pred_tags,tags_vals)
cfs_with_index = pd.DataFrame(cfs_mat, index = tags_vals,
columns = tags_vals)
cfs_mat_norm = cfs_mat/cfs_mat.sum(axis=1, keepdims = True)
cfs_with_index_norm = pd.DataFrame(cfs_mat_norm, index = tags_vals,
columns = tags_vals)
print('')
print('test counts:')
print(pd.DataFrame(tags_vals,counts))
print('')
print(classification_report(valid_tags, pred_tags))
print('')
print('Confusion matrix:')
print(cfs_with_index)
sn.heatmap(cfs_with_index_norm)
print('')
return predictions, true_labels, eval_loss, eval_accuracy, fbeta, precision, recall
from torch.optim import Adam
import matplotlib.pyplot as plt
import seaborn as sn
from copy import deepcopy
def train(config, model, dataloader, if_plot=True, fold_id=None):
#the dataloader is the combination of training data and validation data
epochs = config['epochs']
max_grad_norm = config['max_grad_norm']
period = config['period']
FULL_FINETUNING = config['FULL_FINETUNING']
if FULL_FINETUNING:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': config['decay']},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
else:
param_optimizer = list(model.classifier.named_parameters())
optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}]
optimizer = Adam(optimizer_grouped_parameters, lr=config['lr'])
tr_loss_list = []
eval_loss_list = []
eval_acc_list = []
fbeta_list = []
precision_list = []
recall_list = []
max_acc = 0
max_fbeta = 0
mas_precision = 0
max_recall = 0
train_dataloader, valid_dataloader = dataloader
if not config['if_cross_val'] and config['test_size']:
eval_loss, eval_accuracy, fbeta, precision, recall = test(config, model, dataloader=valid_dataloader, validation=True)
# print train loss per epoch
print('Epoch: {}'.format(0))
# VALIDATION on validation set
print("Validation loss: {}".format(eval_loss))
print("Validation Accuracy: {}".format(eval_accuracy))
print("Macro Fbeta-Score: {}".format(fbeta))
print('')
for epoch in range(1, epochs+1):
# TRAIN loop
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
# add batch to gpu
batch = tuple(t.to(config['device']) for t in batch)
b_input_ids, b_input_mask, b_labels, b_add_masks = batch
# forward pass
loss = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, add_masks=b_add_masks, labels=b_labels)
# backward pass
loss.backward()
# track train loss
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
# gradient clipping
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=max_grad_norm)
# update parameters
optimizer.step()
model.zero_grad()
if config['if_cross_val'] or config['test_size']:
eval_loss, eval_accuracy, fbeta, precision, recall = test(config, model, valid_dataloader, validation = True)
if recall>max_recall:
max_acc = eval_accuracy
max_fbeta = fbeta
max_precision = precision
max_recall = recall
best_model = deepcopy(model)
if if_plot:
tr_loss_list.append(tr_loss)
eval_loss_list.append(eval_loss)
eval_acc_list.append(eval_accuracy)
fbeta_list.append(fbeta)
precision_list.append(precision)
recall_list.append(recall)
if epoch % period == 0:
# print train loss per epoch
# VALIDATION on validation set
print("Validation loss: {}".format(eval_loss))
print("Validation Accuracy: {}".format(eval_accuracy))
print("Macro Fbeta-Score: {}".format(fbeta))
print("Macro Precision: | |
# ./econfirmation.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:ea6fc53702d74154a12297ea3787ddddc34f5684
# Generated 2015-02-11 21:35:49.977166 by PyXB version 1.2.4 using Python 2.6.9.final.0
# Namespace urn:eSENS:xsd:ElectronicConfirmationRequest
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:2b2e2fd1-b225-11e4-b26c-14109fe53921')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import _cva as _ImportedBinding__cva
import _ext as _ImportedBinding__ext
import _cac as _ImportedBinding__cac
import pyxb.binding.datatypes
import _cvc as _ImportedBinding__cvc
import _cbc as _ImportedBinding__cbc
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('urn:eSENS:xsd:ElectronicConfirmationRequest', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_cva = _ImportedBinding__cva.Namespace
_Namespace_cva.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_cvc = _ImportedBinding__cvc.Namespace
_Namespace_cvc.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_cbc = _ImportedBinding__cbc.Namespace
_Namespace_cbc.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_ext = _ImportedBinding__ext.Namespace
_Namespace_ext.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Complex type {urn:eSENS:xsd:ElectronicConfirmationRequest}ElectronicConfirmationRequestType with content type ELEMENT_ONLY
class ElectronicConfirmationRequestType (pyxb.binding.basis.complexTypeDefinition):
"""
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'ElectronicConfirmationRequestType')
_XSDLocation = pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 35, 3)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}CompanyActivity uses Python identifier CompanyActivity
__CompanyActivity = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cva, 'CompanyActivity'), 'CompanyActivity', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_httpwww_w3_orgnscorevocabularyAggregateComponentsCompanyActivity', True, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 29, 3), )
CompanyActivity = property(__CompanyActivity.value, __CompanyActivity.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}HealthCareProviderCvaddress uses Python identifier HealthCareProviderCvaddress
__HealthCareProviderCvaddress = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cva, 'HealthCareProviderCvaddress'), 'HealthCareProviderCvaddress', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_httpwww_w3_orgnscorevocabularyAggregateComponentsHealthCareProviderCvaddress', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 35, 3), )
HealthCareProviderCvaddress = property(__HealthCareProviderCvaddress.value, __HealthCareProviderCvaddress.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}HealthInsuranceOrganizationCvaddress uses Python identifier HealthInsuranceOrganizationCvaddress
__HealthInsuranceOrganizationCvaddress = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cva, 'HealthInsuranceOrganizationCvaddress'), 'HealthInsuranceOrganizationCvaddress', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_httpwww_w3_orgnscorevocabularyAggregateComponentsHealthInsuranceOrganizationCvaddress', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 36, 3), )
HealthInsuranceOrganizationCvaddress = property(__HealthInsuranceOrganizationCvaddress.value, __HealthInsuranceOrganizationCvaddress.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}RequestingParty uses Python identifier RequestingParty
__RequestingParty = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cva, 'RequestingParty'), 'RequestingParty', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_httpwww_w3_orgnscorevocabularyAggregateComponentsRequestingParty', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 38, 3), )
RequestingParty = property(__RequestingParty.value, __RequestingParty.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}HealthCareProviderID uses Python identifier HealthCareProviderID
__HealthCareProviderID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'HealthCareProviderID'), 'HealthCareProviderID', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_httpwww_w3_orgnscorevocabularyBasicComponentsHealthCareProviderID', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 33, 3), )
HealthCareProviderID = property(__HealthCareProviderID.value, __HealthCareProviderID.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}HealthInsuranceOrganizationID uses Python identifier HealthInsuranceOrganizationID
__HealthInsuranceOrganizationID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'HealthInsuranceOrganizationID'), 'HealthInsuranceOrganizationID', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_httpwww_w3_orgnscorevocabularyBasicComponentsHealthInsuranceOrganizationID', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 34, 3), )
HealthInsuranceOrganizationID = property(__HealthInsuranceOrganizationID.value, __HealthInsuranceOrganizationID.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}RequestDate uses Python identifier RequestDate
__RequestDate = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'RequestDate'), 'RequestDate', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_httpwww_w3_orgnscorevocabularyBasicComponentsRequestDate', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 39, 3), )
RequestDate = property(__RequestDate.value, __RequestDate.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}RequestID uses Python identifier RequestID
__RequestID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'RequestID'), 'RequestID', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_httpwww_w3_orgnscorevocabularyBasicComponentsRequestID', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 40, 3), )
RequestID = property(__RequestID.value, __RequestID.set, None, None)
# Element {urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2}CustomizationID uses Python identifier CustomizationID
__CustomizationID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cbc, 'CustomizationID'), 'CustomizationID', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_urnoasisnamesspecificationublschemaxsdCommonBasicComponents_2CustomizationID', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 206, 3), )
CustomizationID = property(__CustomizationID.value, __CustomizationID.set, None, None)
# Element {urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2}ProfileExecutionID uses Python identifier ProfileExecutionID
__ProfileExecutionID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cbc, 'ProfileExecutionID'), 'ProfileExecutionID', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_urnoasisnamesspecificationublschemaxsdCommonBasicComponents_2ProfileExecutionID', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 604, 3), )
ProfileExecutionID = property(__ProfileExecutionID.value, __ProfileExecutionID.set, None, None)
# Element {urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2}ProfileID uses Python identifier ProfileID
__ProfileID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cbc, 'ProfileID'), 'ProfileID', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_urnoasisnamesspecificationublschemaxsdCommonBasicComponents_2ProfileID', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 605, 3), )
ProfileID = property(__ProfileID.value, __ProfileID.set, None, None)
# Element {urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2}UBLVersionID uses Python identifier UBLVersionID
__UBLVersionID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cbc, 'UBLVersionID'), 'UBLVersionID', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_urnoasisnamesspecificationublschemaxsdCommonBasicComponents_2UBLVersionID', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 862, 3), )
UBLVersionID = property(__UBLVersionID.value, __UBLVersionID.set, None, None)
# Element {urn:oasis:names:specification:ubl:schema:xsd:CommonExtensionComponents-2}UBLExtensions uses Python identifier UBLExtensions
__UBLExtensions = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_ext, 'UBLExtensions'), 'UBLExtensions', '__urneSENSxsdElectronicConfirmationRequest_ElectronicConfirmationRequestType_urnoasisnamesspecificationublschemaxsdCommonExtensionComponents_2UBLExtensions', False, pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonExtensionComponents-2.1.xsd', 21, 2), )
UBLExtensions = property(__UBLExtensions.value, __UBLExtensions.set, None, '\n A container for all extensions present in the document.\n ')
_ElementMap.update({
__CompanyActivity.name() : __CompanyActivity,
__HealthCareProviderCvaddress.name() : __HealthCareProviderCvaddress,
__HealthInsuranceOrganizationCvaddress.name() : __HealthInsuranceOrganizationCvaddress,
__RequestingParty.name() : __RequestingParty,
__HealthCareProviderID.name() : __HealthCareProviderID,
__HealthInsuranceOrganizationID.name() : __HealthInsuranceOrganizationID,
__RequestDate.name() : __RequestDate,
__RequestID.name() : __RequestID,
__CustomizationID.name() : __CustomizationID,
__ProfileExecutionID.name() : __ProfileExecutionID,
__ProfileID.name() : __ProfileID,
__UBLVersionID.name() : __UBLVersionID,
__UBLExtensions.name() : __UBLExtensions
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'ElectronicConfirmationRequestType', ElectronicConfirmationRequestType)
ElectronicConfirmationRequest = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ElectronicConfirmationRequest'), ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 31, 3))
Namespace.addCategoryObject('elementBinding', ElectronicConfirmationRequest.name().localName(), ElectronicConfirmationRequest)
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cva, 'CompanyActivity'), _ImportedBinding__cva.CompanyActivityType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 29, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cva, 'HealthCareProviderCvaddress'), _ImportedBinding__cva.CvaddressType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 35, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cva, 'HealthInsuranceOrganizationCvaddress'), _ImportedBinding__cva.CvaddressType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 36, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cva, 'RequestingParty'), _ImportedBinding__cac.PartyType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 38, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'HealthCareProviderID'), _ImportedBinding__cvc.HealthCareProviderIDType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 33, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'HealthInsuranceOrganizationID'), _ImportedBinding__cvc.HealthInsuranceOrganizationIDType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 34, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'RequestDate'), _ImportedBinding__cvc.RequestDateType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 39, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'RequestID'), _ImportedBinding__cvc.RequestIDType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 40, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cbc, 'CustomizationID'), _ImportedBinding__cbc.CustomizationIDType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 206, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cbc, 'ProfileExecutionID'), _ImportedBinding__cbc.ProfileExecutionIDType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 604, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cbc, 'ProfileID'), _ImportedBinding__cbc.ProfileIDType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 605, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cbc, 'UBLVersionID'), _ImportedBinding__cbc.UBLVersionIDType, scope=ElectronicConfirmationRequestType, location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 862, 3)))
ElectronicConfirmationRequestType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_ext, 'UBLExtensions'), _ImportedBinding__ext.UBLExtensionsType, scope=ElectronicConfirmationRequestType, documentation='\n A container for all extensions present in the document.\n ', location=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonExtensionComponents-2.1.xsd', 21, 2)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 47, 11))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 52, 10))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 69, 9))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 85, 9))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 101, 9))
counters.add(cc_4)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_ext, 'UBLExtensions')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 47, 11))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cbc, 'UBLVersionID')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 52, 10))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cbc, 'CustomizationID')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 69, 9))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cbc, 'ProfileID')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 85, 9))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cbc, 'ProfileExecutionID')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 101, 9))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'RequestDate')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 117, 9))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'RequestID')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 133, 9))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'HealthCareProviderID')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 149, 9))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'HealthInsuranceOrganizationID')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 165, 9))
st_8 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cva, 'HealthCareProviderCvaddress')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 181, 9))
st_9 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_9)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cva, 'HealthInsuranceOrganizationCvaddress')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 198, 9))
st_10 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_10)
final_update = None
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cva, 'CompanyActivity')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 215, 9))
st_11 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_11)
final_update = set()
symbol = pyxb.binding.content.ElementUse(ElectronicConfirmationRequestType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cva, 'RequestingParty')), pyxb.utils.utility.Location('/home/esens/edocument/profiles/e_confirmation/xsd/request/ElectronicConfirmationRequest.xsd', 231, 9))
st_12 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_12)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
]))
st_7._set_transitionSet(transitions)
| |
getting All TV Shows:", sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
#####################################################
# Get all movies
#####################################################
def get_allmovies():
"""
Get all the Movies in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
# Try executing the SQL and get from the database
sql = """select
m.movie_id, m.movie_title, m.release_year, count(mimd.md_id) as count
from
mediaserver.movie m left outer join mediaserver.mediaitemmetadata mimd on (m.movie_id = mimd.media_id)
group by m.movie_id, m.movie_title, m.release_year
order by movie_id;"""
r = dictfetchall(cur,sql)
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting All Movies:", sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
#####################################################
# Get one artist
#####################################################
def get_artist(artist_id):
"""
Get an artist by their ID in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
# Try executing the SQL and get from the database
sql = """select *
from mediaserver.artist a left outer join
(mediaserver.artistmetadata natural join mediaserver.metadata natural join mediaserver.MetaDataType) amd
on (a.artist_id=amd.artist_id)
where a.artist_id=%s;"""
r = dictfetchall(cur,sql,(artist_id,))
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting Artist with ID: '"+artist_id+"'", sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
#####################################################
# Additonal Function for single song to have album artwork
#
#####################################################
def get_artist_description(artist_id):
"""
Get an artist's descrption
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
# Try executing the SQL and get from the database
sql = """select * from mediaserver.artist A natural join mediaserver.artistmetadata natural join mediaserver.metadata natural join mediaserver.metadatatype mdt
where mdt.md_type_name = 'description' and A.artist_id=%s;"""
r = dictfetchall(cur,sql,(artist_id,))
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting Artist with ID: '"+artist_id+"'", sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
def get_song_descripton(song_id):
"""
Get a song's description by their ID in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
sql = """SELECT *
FROM mediaserver.Album Album LEFT OUTER JOIN
(mediaserver.AlbumMetaData NATURAL JOIN mediaserver.MetaData NATURAL JOIN mediaserver.MetaDataType) Album_to_MetaData
ON (Album.album_id = Album_to_MetaData.album_id)
LEFT OUTER JOIN mediaserver.album_songs albS on (Album.album_id=albS.album_id)
LEFT OUTER JOIN mediaserver.song S on (albS.song_id=S.song_id)
where md_type_name = 'description' and S.song_id = %s;"""
r = dictfetchall(cur,sql,(song_id,))
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting Get a song's album artwork :", sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
def get_song_artwork(song_id):
"""
Get a song's album artwork by their ID in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
sql = """SELECT Album.album_title, md_type_name, md_value
FROM mediaserver.Album Album LEFT OUTER JOIN
(mediaserver.AlbumMetaData NATURAL JOIN mediaserver.MetaData NATURAL JOIN mediaserver.MetaDataType) Album_to_MetaData
ON (Album.album_id = Album_to_MetaData.album_id)
LEFT OUTER JOIN mediaserver.album_songs albS on (Album.album_id=albS.album_id)
LEFT OUTER JOIN mediaserver.song S on (albS.song_id=S.song_id)
WHERE S.song_id = %s;"""
r = dictfetchall(cur,sql,(song_id,))
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting Get a song's album artwork :", sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
#####################################################
# Query (2 a,b,c)
# Get one song
#####################################################
def get_song(song_id):
"""
Get a song by their ID in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
#############################################################################
# Fill in the SQL below with a query to get all information about a song #
# and the artists that performed it #
#############################################################################
sql = """SELECT S.song_id, S.song_title, S.length, string_agg(song_To_Artist.artist_name,',') as performed_artists
FROM mediaserver.song S LEFT OUTER JOIN
(mediaserver.Song_Artists SA join mediaserver.Artist A on (SA.performing_artist_id = A.artist_id)
) as song_To_Artist on (S.song_id = song_To_Artist.song_id)
WHERE S.song_id = %s
GROUP BY S.song_id, S.song_title, S.length
ORDER BY S.song_id;"""
r = dictfetchall(cur,sql,(song_id,))
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting All Songs:", sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
#####################################################
# Query (2 d)
# Get metadata for one song
#####################################################
def get_song_metadata(song_id):
"""
Get the meta for a song by their ID in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
#############################################################################
# Fill in the SQL below with a query to get all metadata about a song #
#############################################################################
sql = """
select md_type_id, md_id, md_value, md_type_name
from mediaserver.song S LEFT OUTER JOIN
(mediaserver.mediaitemmetadata NATURAL JOIN mediaserver.metadata NATURAL JOIN mediaserver.MetaDataType) s_to_md
on (S.song_id = s_to_md.media_id)
WHERE S.song_id=%s;
"""
r = dictfetchall(cur,sql,(song_id,))
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting song metadata for ID: "+song_id, sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
#####################################################
# Query (6 a,b,c,d,e)
# Get one podcast and return all metadata associated with it
#####################################################
def get_podcast(podcast_id):
"""
Get a podcast by their ID in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
#############################################################################
# Fill in the SQL below with a query to get all information about a podcast #
# including all metadata associated with it #
#############################################################################
sql = """
SELECT *
FROM mediaserver.podcast pod LEFT OUTER JOIN
(mediaserver.podcastmetadata NATURAL JOIN mediaserver.metadata NATURAL JOIN mediaserver.metadatatype) p_to_md
ON (pod.podcast_id = p_to_md.podcast_id)
WHERE pod.podcast_id = %s;
"""
r = dictfetchall(cur,sql,(podcast_id,))
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting Podcast with ID: "+podcast_id, sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None
#####################################################
# Query (6 f)
# Get all podcast eps for one podcast
#####################################################
def get_all_podcasteps_for_podcast(podcast_id):
"""
Get all podcast eps for one podcast by their podcast ID in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
#############################################################################
# Fill in the SQL below with a query to get all information about all #
# podcast episodes in a podcast #
#############################################################################
sql = """
SELECT *
FROM mediaserver.podcastepisode
WHERE podcast_id = %s
ORDER BY podcast_episode_published_date DESC;
"""
r = dictfetchall(cur,sql,(podcast_id,))
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting All Podcast Episodes for Podcast with ID: "+podcast_id, sys.exc_info()[0])
raise
cur.close() # Close | |
self.dt(v)
v_t0 = self.fwd_dt(v[:,:,0:3])
v_tn = self.bwd_dt(v[:,:,-3:])
v_t = torch.cat((v_t0, v_t, v_tn), dim=2)
v_t = v_t.reshape(len_d, len_h, len_w, len_b, len_c, len_t)
v_t = v_t.permute(5,3,4,0,1,2) # [t,b,c,d,h,w]
############### corresponding u & v ###################
u = output[:, :, 0:1, 2:-2, 2:-2, 2:-2] # [step, b, c, depth(Z), height(Y), width(X)]
v = output[:, :, 1:2, 2:-2, 2:-2, 2:-2] # [step, b, c, swpth(Z), height(Y), width(X)]
# make sure the dimensions consistent
assert laplace_u.shape == u_t.shape
assert u_t.shape == v_t.shape
assert laplace_u.shape == u.shape
assert laplace_v.shape == v.shape
# governing equations
DA = 0.2
DB = 0.1
f = 0.025
k = 0.055
f_u = DA * laplace_u - u*(v**2) + f*(1-u) - u_t
f_v = DB * laplace_v + u*(v**2) - (f+k)*v - v_t
return f_u, f_v
def GetModelLoss(self, model):
''' Get the L2-norm of the model '''
l2_reg = torch.tensor(0.).cuda()
for param in model.parameters():
l2_reg += torch.norm(param)
return l2_reg
def LossGen(output, truth, beta, loss_func):
L1_loss = nn.L1Loss()
MSE_loss = nn.MSELoss()
# data loss
data_loss = L1_loss(output, truth)
# phy loss, output shape: [t,b,c,d,h,w]
output = torch.cat((output[:, :, :, :, :, -2:], output, output[:, :, :, :, :, 0:3]), dim=5)
output = torch.cat((output[:, :, :, :, -2:, :], output, output[:, :, :, :, 0:3, :]), dim=4)
output = torch.cat((output[:, :, :, -2:, :, :], output, output[:, :, :, 0:3, :, :]), dim=3)
f_u, f_v = loss_func.GetPhyLoss(output)
phy_loss = MSE_loss(f_u, torch.zeros_like(f_u).cuda()) + MSE_loss(
f_v, torch.zeros_like(f_v).cuda())
loss = data_loss + beta * phy_loss
return loss, data_loss, phy_loss
def train(model, train_loader, val_loader, init_state, n_iters, lr, print_every, dt, dx,
beta, save_path, pretrain_flag=False):
# train_loader: low resolution tensor
# beta works on physics loss
best_error = 1e2
print_loss_total = 0
train_loss_list, val_loss_list, val_error_list = [], [], []
pretrain_save_path = save_path + 'pretrain.pt'
model_save_path = save_path + 'checkpoint.pt'
if pretrain_flag == True:
model, _, _ = load_checkpoint(model, optimizer=None, scheduler=None,
save_dir=pretrain_save_path)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.998)
loss_function = LossGenerator(dt, dx)
for epoch in range(n_iters):
for idx, (lres, hres) in enumerate(train_loader):
optimizer.zero_grad()
lres, hres = lres.cuda(), hres.cuda()
lres, hres = lres.transpose(0,1), hres.transpose(0,1) # (b,t,c,d,h,w) -> (t,b,c,d,h,w)
outputs = model(lres, init_state)
# compute loss
loss, data_loss, phy_loss = LossGen(outputs, hres, beta, loss_function)
loss.backward(retain_graph=True)
print_loss_total += loss.item()
# gradient clipping
nn.utils.clip_grad_value_(model.parameters(), clip_value=1.0)
optimizer.step()
scheduler.step()
if (epoch+1) % print_every == 0:
# calculate the average training loss
print_loss_mean = print_loss_total / (print_every*len(train_loader))
train_loss_list.append(print_loss_mean)
print_loss_total = 0
# print the training loss
print('Train loss (%d/%d %d%%): %.8f' % (epoch+1, n_iters,
(epoch+1)/n_iters*100, print_loss_mean))
# for print training loss (details)
print('Epoch %d: data loss(%.8f), phy loss(%.8f)' %(
epoch+1, data_loss.item(), phy_loss.item()))
# calculate the validation loss
val_loss, val_error = validate(model, val_loader, init_state, loss_function, beta)
# for print validation loss
print('Epoch (%d/%d %d%%): val loss %.8f, val error %.8f' % (epoch+1, n_iters,
(epoch+1)/n_iters*100, val_loss, val_error))
print('')
val_loss_list.append(val_loss)
val_error_list.append(val_error)
# save model
if val_error < best_error:
save_checkpoint(model, optimizer, scheduler, model_save_path)
best_error = val_error
return train_loss_list, val_loss_list, val_error_list
def validate(model, val_loader, init_state, loss_function, beta):
''' evaluate the model performance '''
val_loss = 0
val_error = 0
MSE_function = nn.MSELoss()
for idx, (lres, hres) in enumerate(val_loader):
lres, hres = lres.cuda(), hres.cuda()
lres, hres = lres.transpose(0,1), hres.transpose(0,1) # (b,t,c,d,h,w) -> (t,b,c,d,h,w)
outputs = model(lres, init_state)
# calculate the loss
loss,_,_ = LossGen(outputs, hres, beta, loss_function)
val_loss += loss.item()
# calculate the error
error = torch.sqrt(MSE_function(hres, outputs.detach()) / MSE_function(
hres, torch.zeros_like(hres).cuda()))
val_error += error.item()
val_error = val_error / len(val_loader)
val_loss = val_loss / len(val_loader)
return val_loss, val_error
def test(model, test_loader, init_state, save_path, fig_save_path):
# load the well-trained model
model_save_path = save_path + 'checkpoint.pt'
model, _, _ = load_checkpoint(model, optimizer=None, scheduler=None,
save_dir=model_save_path)
MSE_function = nn.MSELoss()
pred_error = 0
for idx, (lres, hres) in enumerate(test_loader):
lres, hres = lres.cuda(), hres.cuda()
lres, hres = lres.transpose(0,1), hres.transpose(0,1) # (b,t,c,d,h,w) -> (t,b,c,d,h,w)
outputs = model(lres, init_state)
# calculate the error
error = torch.sqrt(MSE_function(hres, outputs.detach()) / MSE_function(
hres, torch.zeros_like(hres).cuda()))
pred_error += error.item()
torch.save({"pred": outputs.detach().cpu(), "lres": lres.cpu(),
"hres": hres.cpu()}, save_path + 'output_'+str(idx)+'.pt')
# comparison plot
t = np.arange(hres.shape[0])
for b in range(hres.shape[1]):
u_pred = outputs[:, b, 0, :, :, :].detach().cpu().numpy()
u_true = hres[:, b, 0, :, :, :].cpu().numpy()
plt.figure()
plt.plot(t, u_pred[:, 24, 24, 24], label = 'u-wdsr')
plt.plot(t, u_true[:, 24, 24, 24], label = 'u-Ref.')
plt.xlabel('t')
plt.ylabel('u')
plt.legend()
plt.savefig(fig_save_path + 'u_comp_[i=%d][b=%d].png' %(idx, b))
pred_error = pred_error/len(test_loader)
return pred_error
def save_checkpoint(model, optimizer, scheduler, save_dir):
'''save model and optimizer'''
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_dir)
def load_checkpoint(model, optimizer, scheduler, save_dir):
'''load model and optimizer'''
checkpoint = torch.load(save_dir)
model.load_state_dict(checkpoint['model_state_dict'])
if (not optimizer is None):
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
print('Pretrained model loaded!')
return model, optimizer, scheduler
class GSDataset(Dataset):
def __init__(self, data_dir, data_fname, ICs, n_slices):
'''
Args:
-----
data_dir: str,
folder path to the data
data_fname: str
the name of the dataset file
ICs: list
the list of random noise parameters
'''
self.data_dir = data_dir
self.data_fname = data_fname
self.ICs = ICs
self.n_slices = n_slices
self.samples = []
for i in range(len(self.ICs)):
# define the data filename
lres_filename = self.data_fname + str(ICs[i]) + '_2x501x12x12x12.mat'
hres_filename = self.data_fname + str(ICs[i]) + '_2x1501x48x48x48.mat'
# load the lres and hres tensor
lres = scio.loadmat(os.path.join(data_dir, lres_filename))
hres = scio.loadmat(os.path.join(data_dir, hres_filename))
lres = lres['uv'] # [251,2,6,6,6]
hres = hres['uv'][500:,...] # [1001,2,48,48,48]
lres_dt, hres_dt = int(lres.shape[0]/n_slices), int(hres.shape[0]/n_slices)
for j in range(n_slices):
lres_tensor = torch.tensor(lres[j*lres_dt:(j+1)*lres_dt,...], dtype=torch.float32)
hres_tensor = torch.tensor(hres[j*hres_dt:(j+1)*hres_dt,...], dtype=torch.float32)
self.samples.append((lres_tensor, hres_tensor))
def __len__(self):
return int(len(self.ICs)*self.n_slices)
def __getitem__(self, idx):
return self.samples[idx]
def get_init_state(batch_size, hidden_channels, output_size, mode='coord'):
'''initial hidden states for all convlstm layers'''
# (b, c, h, w)
num_layers = len(hidden_channels)
initial_state = []
if mode == 'coord':
for i in range(num_layers):
resolution = output_size[i][0]
x, y = [np.linspace(-64, 64, resolution+1)] * 2
x, y = np.meshgrid(x[:-1], y[:-1]) # [32, 32]
xy = np.concatenate((x[None, :], y[None, :]), 0) # [2, 32, 32]
xy = np.repeat(xy, int(hidden_channels[i]/2), axis=0) # [c,h,w]
xy = np.repeat(xy[None, :], batch_size[i], 0) # [b,c,h,w]
xy = torch.tensor(xy, dtype=torch.float32)
initial_state.append((xy, xy))
elif mode == 'zero':
for i in range(num_layers):
(h0, c0) = (torch.zeros(batch_size[i], hidden_channels[i], output_size[i][0],
output_size[i][1]), torch.zeros(batch_size[i], hidden_channels[i], output_size[i][0],
output_size[i][1]))
initial_state.append((h0,c0))
elif mode == 'random':
for i in range(num_layers):
(h0, c0) = (torch.randn(batch_size[i], hidden_channels[i], output_size[i][0],
output_size[i][1], output_size[i][2]), torch.randn(batch_size[i], hidden_channels[i], output_size[i][0],
output_size[i][1], output_size[i][2]))
initial_state.append((h0,c0))
else:
raise NotImplementedError
return initial_state
if __name__ == '__main__':
print('Super-Resolution for 3D GS equation...')
# define the data file path
data_dir = './data/3DGS/'
data_fname = '3DGS_IC'
# define the initial consitions
ICs = np.arange(1,3)
n_slices = 10
n_datasets = len(ICs) * n_slices
data_loader = GSDataset(data_dir, data_fname, ICs, n_slices)
# get mean and std
total_hres = torch.zeros(n_datasets, 100, 2, 48, 48, 48)
total_lres = torch.zeros(n_datasets, 50, 2, 12, 12, 12) # [b,t,c,d,h,w]
for i in range(len(data_loader)):
total_hres[i,...] = data_loader[i][1]
total_lres[i,...] = data_loader[i][0]
mean_hres = torch.mean(total_hres, axis = (0,1,3,4,5))
std_hres = torch.std(total_hres, axis = (0,1,3,4,5))
# split data
split_ratio = [int(n_datasets*0.7), int(n_datasets*0.2), int(n_datasets*0.1)]
train_data, val_data, test_data = torch.utils.data.random_split(data_loader, split_ratio)
# change to pytorch data
# data in train_loader is [b, t, c, h, w] -> [1, 151, 2, 32, 32]
train_loader = torch.utils.data.DataLoader(train_data, batch_size = 2,
shuffle=True, num_workers=0)
val_loader = torch.utils.data.DataLoader(val_data, batch_size = 2,
shuffle=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(test_data, batch_size = 2,
shuffle=False, num_workers=0)
######################### build model #############################
# training parameters
n_iters = 2000
print_every = 2
learning_rate = 1e-3
dt = 1.0
dx = 100.0 / 48.0
steps = 100
effective_step = list(range(0, steps))
beta = 0.025 # for physics loss
save_path = './model/3DGS/'
fig_save_path = './figures/3DGS/'
model = PhySR(
n_feats = 32,
n_layers = [1, 2], # [n_convlstm, n_resblock]
upscale_factor = [2, 4], # [t_up, s_up]
shift_mean_paras = [mean_hres, std_hres],
step = steps,
effective_step = effective_step).cuda()
# define the initial states and initial output for model
init_state = get_init_state(
batch_size = [2],
hidden_channels = [32],
output_size = | |
1.)
def _compute_unreduced_loss_impl(self, labels, logits, mask=None):
"""See `_RankingLoss`."""
if mask is None:
mask = utils.is_label_valid(labels)
def circle_loss_pairwise_op(score_i, score_j):
alpha_i = tf.stop_gradient(
tf.nn.relu(1 - score_i + self._margin), name='circle_loss_alpha_pos')
alpha_j = tf.stop_gradient(
tf.nn.relu(score_j + self._margin), name='circle_loss_alpha_neg')
return alpha_i * (1 - score_i - self._margin) + alpha_j * (
score_j - self._margin)
pairwise_labels, pairwise_logits = _pairwise_comparison(
labels, logits, mask, pairwise_logits_op=circle_loss_pairwise_op)
pairwise_weights = tf.stop_gradient(
pairwise_labels, name='weights_stop_gradient')
# TODO: try lambda_weights for circle loss.
# Pairwise losses and weights will be of shape
# [batch_size, list_size, list_size].
losses = tf.exp(self._gamma * pairwise_logits)
# This computes the per-list losses and weights for circle loss.
per_list_losses = tf.math.log1p(
tf.reduce_sum(tf.math.multiply(losses, pairwise_weights), axis=[1, 2]))
per_list_weights = tf.reduce_sum(
pairwise_weights, axis=[1, 2]) / tf.reduce_sum(
tf.cast(pairwise_weights > 0, tf.float32), axis=[1, 2])
# Return per-list losses and weights with shape [batch_size, 1].
return tf.expand_dims(per_list_losses,
1), tf.expand_dims(per_list_weights, 1)
class SoftmaxLoss(_ListwiseLoss):
"""Implements softmax loss."""
def precompute(self, labels, logits, weights, mask=None):
"""Precomputes Tensors for softmax cross entropy inputs."""
if mask is None:
mask = utils.is_label_valid(labels)
ranks = _compute_ranks(logits, mask)
# Reset the masked labels to 0 and reset the masked logits to a logit with
# ~= 0 contribution in softmax.
labels = tf.compat.v1.where(mask, labels, tf.zeros_like(labels))
logits = tf.compat.v1.where(mask, logits,
tf.math.log(_EPSILON) * tf.ones_like(logits))
if self._lambda_weight is not None and isinstance(self._lambda_weight,
DCGLambdaWeight):
labels = self._lambda_weight.individual_weights(labels, ranks)
if weights is not None:
labels *= weights
return labels, logits
def _compute_unreduced_loss_impl(self, labels, logits, mask=None):
"""See `_RankingLoss`."""
if mask is None:
mask = utils.is_label_valid(labels)
label_sum = tf.reduce_sum(input_tensor=labels, axis=1, keepdims=True)
# Padding for rows with label_sum = 0.
nonzero_mask = tf.greater(tf.reshape(label_sum, [-1]), 0.0)
padded_labels = tf.compat.v1.where(nonzero_mask, labels,
_EPSILON * tf.ones_like(labels))
padded_labels = tf.compat.v1.where(mask, padded_labels,
tf.zeros_like(padded_labels))
padded_label_sum = tf.reduce_sum(
input_tensor=padded_labels, axis=1, keepdims=True)
labels_for_softmax = tf.math.divide_no_nan(padded_labels, padded_label_sum)
logits_for_softmax = logits
# Padded labels have 0 weights in label_sum.
weights_for_softmax = tf.reshape(label_sum, [-1])
losses = tf.compat.v1.nn.softmax_cross_entropy_with_logits_v2(
labels_for_softmax, logits_for_softmax)
return losses, weights_for_softmax
def compute(self, labels, logits, weights, reduction, mask=None):
"""See `_RankingLoss`."""
logits = self.get_logits(logits)
labels, logits = self.precompute(labels, logits, weights, mask)
losses, weights = self._compute_unreduced_loss_impl(labels, logits, mask)
return tf.compat.v1.losses.compute_weighted_loss(
losses, weights, reduction=reduction)
def eval_metric(self, labels, logits, weights, mask=None):
"""See `_RankingLoss`."""
logits = self.get_logits(logits)
labels, logits = self.precompute(labels, logits, weights, mask)
losses, weights = self._compute_unreduced_loss_impl(labels, logits, mask)
return tf.compat.v1.metrics.mean(losses, weights)
def compute_per_list(self, labels, logits, weights, mask=None):
"""See `_RankingLoss`."""
# Prepare input params.
labels, logits, weights, mask = self._prepare_and_validate_params(
labels, logits, weights, mask)
# As opposed to the other listwise losses, SoftmaxLoss returns already
# squeezed losses, which can be returned directly.
logits = self.get_logits(logits)
labels, logits = self.precompute(labels, logits, weights, mask)
return self._compute_unreduced_loss_impl(labels, logits, mask)
def compute_unreduced_loss(self, labels, logits, mask=None):
"""See `_RankingLoss`."""
labels, logits, _, mask = self._prepare_and_validate_params(
labels, logits, None, mask)
logits = self.get_logits(logits)
labels, logits = self.precompute(labels, logits, weights=None, mask=mask)
return self._compute_unreduced_loss_impl(labels, logits, mask)
class UniqueSoftmaxLoss(_ListwiseLoss):
"""Implements unique rating softmax loss."""
def _compute_unreduced_loss_impl(self, labels, logits, mask=None):
"""See `_RankingLoss`."""
if mask is None:
mask = utils.is_label_valid(labels)
labels = tf.compat.v1.where(mask, labels, tf.zeros_like(labels))
logits = tf.compat.v1.where(mask, logits,
tf.math.log(_EPSILON) * tf.ones_like(logits))
pairwise_labels, _ = _pairwise_comparison(labels, logits, mask)
# Used in denominator to compute unique softmax probability for each doc.
denominator_logits = tf.expand_dims(logits, axis=1) * pairwise_labels
denominator_logits = tf.concat(
[denominator_logits, tf.expand_dims(logits, axis=2)], axis=2)
denominator_mask = tf.concat(
[pairwise_labels,
tf.expand_dims(tf.ones_like(logits), axis=2)], axis=2)
denominator_logits = tf.where(
tf.greater(denominator_mask, 0.0), denominator_logits, -1e-3 +
tf.reduce_min(denominator_logits) * tf.ones_like(denominator_logits))
logits_max = tf.reduce_max(denominator_logits, axis=-1, keepdims=True)
# Subtract the max so that exp(denominator_logits) is numerically valid.
denominator_logits -= logits_max
logits -= tf.squeeze(logits_max, axis=-1)
# Set gains for loss weights.
gains = tf.pow(2.0, labels) - 1
# Compute the softmax loss for each doc.
per_doc_softmax = -logits + tf.math.log(
tf.reduce_sum(tf.exp(denominator_logits) * denominator_mask, axis=-1))
losses = tf.reduce_sum(per_doc_softmax * gains, axis=1, keepdims=True)
return losses, tf.ones_like(losses)
class _PointwiseLoss(_RankingLoss):
"""Interface for pointwise loss."""
def _normalize_weights_impl(self, labels, weights):
"""See _RankingLoss."""
if weights is None:
weights = 1.
return tf.compat.v1.where(
utils.is_label_valid(labels),
tf.ones_like(labels) * weights, tf.zeros_like(labels))
def compute_per_list(self, labels, logits, weights, mask=None):
"""See `_RankingLoss`."""
# Prepare input params.
labels, logits, weights, mask = self._prepare_and_validate_params(
labels, logits, weights, mask)
# Pointwise losses and weights will be of shape [batch_size, list_size].
losses, loss_weights = self._compute_unreduced_loss_impl(labels, logits,
mask)
weights = tf.multiply(self._normalize_weights_impl(labels, weights),
loss_weights)
# Compute the weighted per-item loss.
weighted_per_item_loss = tf.math.multiply(losses, weights)
# Sum the inner dimensions to obtain per-list weights. For pointwise losses
# this typically indicates the (weighted) number of items per list.
per_list_weights = tf.reduce_sum(weights, axis=1)
# This computes the per-list losses by summing all weighted per-item losses.
per_list_losses = tf.reduce_sum(weighted_per_item_loss, axis=1)
# Normalize the per-list losses so that lists with different numbers of
# items have comparable losses. The different numbers of items is reflected
# in the per-list weights.
per_list_losses = tf.math.divide_no_nan(per_list_losses, per_list_weights)
return per_list_losses, per_list_weights
class ClickEMLoss(_PointwiseLoss):
"""Implements the click EM loss with examination and relevance.
The implementation is based on the the paper by Wang et al: "Position bias
estimation for unbiased learning to rank in personal search." It assumes that
a click is generated by a factorized model P(examination) * P(relevance),
which are latent variables determined by `exam_logits` and `rel_logits`
respectively. An EM algorithm is used for estimation and this function
implements the expectation step to estimate the P(latent | observed), i.e.,
P(examination | click) and P(relevance | click).
"""
def __init__(self,
name,
temperature=1.0,
exam_loss_weight=1.0,
rel_loss_weight=1.0,
ragged=False):
super().__init__(name, None, temperature, ragged)
self._exam_loss_weight = exam_loss_weight
self._rel_loss_weight = rel_loss_weight
def _compute_latent_prob(self, clicks, exam_logits, rel_logits):
"""Computes the probability of latent variables in EM.
The original compuation is as follows and can be unstable:
exam_prob = sigmoid(exam_logits)
rel_prob = sigmoid(rel_logits)
exam_prob_posterior = exam_prob * (1 - rel_prob) / (1 - exam_prob *
rel_prob)
rel_prob_posterior = rel_prob * (1 - exam_prob) / (1 - exam_prob *
rel_prob).
To increase the numeric stability, we compute the posteriror logits first.
Using the exam_logits_posterior as an example, we have:
exam_logit_posterior = logit(exam_prob_posterior)
= log(exam_prob_posterior / (1 - exam_prob_posterior))
It can be reduced to exam_logits and rel_logits:
exam_logit_posterior = exam_logits - log(1 + exp(rel_logits))
= exam_logits - softplus(rel_logits)
We can do similar reduction for rel_logit_posterior. Then we compute the
posterior probablity by apply sigmoid on the logits.
Args:
clicks: A 2-D `Tensor` for clicks as observed data. A value >= 1.0 is
treated as clicked.
exam_logits: A 2-D `Tensor` to compute P(examination) and has the same
shape as `clicks`.
rel_logits: A 2-D `Tensor` to compute P(relevance) and has the same shape
as `clicks`.
Returns:
A tuple of (exam_given_clicks, rel_given_clicks) representing
P(examination | click) and P(relevance | click).
"""
with tf.compat.v1.name_scope(name='compute_latent_prob'):
is_clicked = tf.greater_equal(tf.cast(clicks, tf.float32), 1.0)
exam_logits_posterior = exam_logits - tf.math.softplus(rel_logits)
rel_logits_posterior = rel_logits - tf.math.softplus(exam_logits)
exam_prob_posterior = tf.compat.v1.where(
is_clicked, tf.ones_like(exam_logits_posterior),
tf.sigmoid(exam_logits_posterior))
rel_prob_posterior = tf.compat.v1.where(
is_clicked, tf.ones_like(rel_logits_posterior),
tf.sigmoid(rel_logits_posterior))
return tf.stop_gradient(exam_prob_posterior), tf.stop_gradient(
rel_prob_posterior)
def _compute_unreduced_loss_impl(self, labels, logits, mask=None):
"""Computes the loss for each element.
Args:
labels: A `Tensor` with shape [batch_size, list_size] representing clicks.
logits: A `Tensor` with shape [batch_size, list_size, 2], where the first
value in the 3rd-dim is the logits for examination and the second value
is the logits for relevance.
mask: A `Tensor` of the same shape as labels indicating which entries are
valid for computing the loss.
Returns:
A tuple(losses, loss_weights).
"""
if mask is None:
mask = utils.is_label_valid(labels)
labels = tf.compat.v1.where(mask, labels, tf.zeros_like(labels))
exam_logits, rel_logits = tf.unstack(logits, axis=2)
exam_logits = tf.compat.v1.where(mask, exam_logits,
tf.zeros_like(exam_logits))
rel_logits = tf.compat.v1.where(mask, rel_logits,
tf.zeros_like(rel_logits))
# The distribution in the E step.
exam_latent_prob, rel_latent_prob = self._compute_latent_prob(
labels, exam_logits, rel_logits)
# The loss in the M step.
losses = tf.compat.v1.nn.sigmoid_cross_entropy_with_logits(
labels=exam_latent_prob, logits=exam_logits) * self._exam_loss_weight
losses += tf.compat.v1.nn.sigmoid_cross_entropy_with_logits(
labels=rel_latent_prob, logits=rel_logits) * self._rel_loss_weight
return losses, tf.cast(mask, dtype=tf.float32)
class SigmoidCrossEntropyLoss(_PointwiseLoss):
"""Implements sigmoid cross entropy loss."""
def __init__(self, name, temperature=1.0, ragged=False):
"""Overwrite the constructor.
Args:
name: A string used as the name for this loss.
temperature: A float number to modify the logits=logits/temperature.
ragged: A boolean indicating whether the input tensors are ragged.
"""
super().__init__(name, None, temperature, ragged)
def _compute_unreduced_loss_impl(self, labels, logits, mask=None):
"""See `_RankingLoss`."""
if mask is None:
mask = utils.is_label_valid(labels)
labels = tf.compat.v1.where(mask, labels, tf.zeros_like(labels))
logits = tf.compat.v1.where(mask, logits, tf.zeros_like(logits))
losses = tf.compat.v1.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
return losses, tf.cast(mask, dtype=tf.float32)
class MeanSquaredLoss(_PointwiseLoss):
"""Implements the | |
# Functions to do various things with compound structures
import os
import re
import pdb
import numpy as np
import molvs
from rdkit import Chem
from rdkit.Chem import AllChem, Draw, Descriptors
stdizer = molvs.standardize.Standardizer(prefer_organic=True)
uncharger = molvs.charge.Uncharger()
def get_rdkit_smiles(orig_smiles, useIsomericSmiles=True):
"""
Given a SMILES string, regenerate a "canonical" SMILES string for the same molecule
using the implementation in RDKit. If useIsomericSmiles is false, stereochemistry information
will be removed in the generated string.
"""
mol = Chem.MolFromSmiles(orig_smiles)
if mol is None:
return ""
else:
return Chem.MolToSmiles(mol, isomericSmiles=useIsomericSmiles)
def rdkit_smiles_from_smiles(orig_smiles, useIsomericSmiles=True, workers=1):
"""
Parallel version of get_rdkit_smiles. If orig_smiles is a list and workers is > 1, spawn 'workers'
threads to convert input SMILES strings to standardized RDKit format. If useIsomericSmiles is false,
stereochemistry information will be removed in the generated string.
"""
if isinstance(orig_smiles,list):
from functools import partial
func = partial(rdkit_smiles_from_smiles,useIsomericSmiles=useIsomericSmiles)
if workers > 1:
from multiprocessing import pool
batchsize = 200
batches = [orig_smiles[i:i+batchsize] for i in range(0, len(orig_smiles), batchsize)]
with pool.Pool(workers) as p:
rdkit_smiles = p.map(func,batches)
rdkit_smiles = [y for x in rdkit_smiles for y in x] #Flatten results
else:
rdkit_smiles = [func(smi) for smi in orig_smiles]
else:
# Actual standardization code, everything above here is for multiprocessing and list parsing
std_mol = Chem.MolFromSmiles(orig_smiles)
if std_mol is None:
rdkit_smiles = ""
else:
rdkit_smiles = Chem.MolToSmiles(std_mol, isomericSmiles=useIsomericSmiles)
return rdkit_smiles
def mols_from_smiles(orig_smiles, workers=1):
"""
Parallel function to create RDKit Mol objects for a list of SMILES strings. If orig_smiles is a list
and workers is > 1, spawn 'workers' threads to convert input SMILES strings to Mol objects.
"""
if isinstance(orig_smiles,list):
from functools import partial
func = partial(mols_from_smiles)
if workers > 1:
from multiprocessing import pool
batchsize = 200
batches = [orig_smiles[i:i+batchsize] for i in range(0, len(orig_smiles), batchsize)]
with pool.Pool(workers) as p:
mols = p.map(func, batches)
mols = [y for x in mols for y in x] #Flatten results
else:
mols = [func(smi) for smi in orig_smiles]
else:
# Actual standardization code, everything above here is for multiprocessing and list parsing
mols = Chem.MolFromSmiles(orig_smiles)
return mols
def base_smiles_from_smiles(orig_smiles, useIsomericSmiles=True, removeCharges=False, workers=1):
"""
Generate a standardized SMILES string for the largest fragment of the molecule specified by
orig_smiles. Replace any rare isotopes with the most common ones for each element.
If removeCharges is True, add hydrogens as needed to eliminate charges.
"""
if isinstance(orig_smiles,list):
from functools import partial
func = partial(base_smiles_from_smiles,useIsomericSmiles=useIsomericSmiles,removeCharges=removeCharges)
if workers > 1:
from multiprocessing import pool
batchsize = 200
batches = [orig_smiles[i:i+batchsize] for i in range(0, len(orig_smiles), batchsize)]
with pool.Pool(workers) as p:
base_smiles = p.map(func,batches)
base_smiles = [y for x in base_smiles for y in x] #Flatten results
else:
base_smiles = [func(smi) for smi in orig_smiles]
else:
# Actual standardization code, everything above here is for multiprocessing and list parsing
std_mol = base_mol_from_smiles(orig_smiles, useIsomericSmiles, removeCharges)
if std_mol is None:
base_smiles = ""
else:
base_smiles = Chem.MolToSmiles(std_mol, isomericSmiles=useIsomericSmiles)
return base_smiles
def kekulize_smiles(orig_smiles, useIsomericSmiles=True, workers=1):
"""
Generate a Kekulized SMILES string for the molecule specified by
orig_smiles.
"""
if isinstance(orig_smiles,list):
from functools import partial
func = partial(kekulize_smiles,useIsomericSmiles=useIsomericSmiles)
if workers > 1:
from multiprocessing import pool
batchsize = 200
batches = [orig_smiles[i:i+batchsize] for i in range(0, len(orig_smiles), batchsize)]
with pool.Pool(workers) as p:
kekulized_smiles = p.map(func,batches)
kekulized_smiles = [y for x in kekulized_smiles for y in x] #Flatten results
else:
kekulized_smiles = [func(smi) for smi in orig_smiles]
else:
std_mol = Chem.MolFromSmiles(orig_smiles)
if std_mol is None:
kekulized_smiles = ""
else:
Chem.Kekulize(std_mol)
kekulized_smiles = Chem.MolToSmiles(std_mol, kekuleSmiles=True, isomericSmiles=useIsomericSmiles)
return kekulized_smiles
def base_mol_from_smiles(orig_smiles, useIsomericSmiles=True, removeCharges=False):
"""
Generate a standardized RDKit Mol object for the largest fragment of the molecule specified by
orig_smiles. Replace any rare isotopes with the most common ones for each element.
If removeCharges is True, add hydrogens as needed to eliminate charges.
"""
if type(orig_smiles) != str:
return None
if len(orig_smiles) == 0:
return None
cmpd_mol = Chem.MolFromSmiles(orig_smiles)
if cmpd_mol is None:
return None
std_mol = stdizer.isotope_parent(stdizer.fragment_parent(cmpd_mol), skip_standardize=True)
if removeCharges:
std_mol = uncharger(std_mol)
return std_mol
def base_smiles_from_inchi(inchi_str, useIsomericSmiles=True, removeCharges=False, workers=1):
"""
Generate a standardized SMILES string for the largest fragment of the molecule specified by
InChi string inchi_str. Replace any rare isotopes with the most common ones for each element.
If removeCharges is True, add hydrogens as needed to eliminate charges. If useIsomericSmiles
is True (the default), retain stereochemistry info in the generated SMILES string.
Note that inchi_str may be a list, in which case a list of SMILES strings is generated.
If workers > 1 and inchi_str is a list, the calculations are parallelized over the given number
of worker threads.
"""
if isinstance(inchi_str,list):
from functools import partial
func = partial(base_smiles_from_inchi, useIsomericSmiles=useIsomericSmiles, removeCharges=removeCharges)
if workers > 1:
from multiprocessing import pool
batchsize = 200
batches = [inchi_str[i:i+batchsize] for i in range(0, len(inchi_str), batchsize)]
with pool.Pool(workers) as p:
base_smiles = p.map(func,batches)
base_smiles = [y for x in base_smiles for y in x] #Flatten results
else:
base_smiles = [func(inchi) for inchi in inchi_str]
else:
# Actual standardization code, everything above here is for multiprocessing and list parsing
std_mol = base_mol_from_inchi(inchi_str, useIsomericSmiles, removeCharges)
if std_mol is None:
base_smiles = ""
else:
base_smiles = Chem.MolToSmiles(std_mol, isomericSmiles=useIsomericSmiles)
return base_smiles
def base_mol_from_inchi(inchi_str, useIsomericSmiles=True, removeCharges=False):
"""
Generate a standardized RDKit Mol object for the largest fragment of the molecule specified by
InChi string inchi_str. Replace any rare isotopes with the most common ones for each element.
If removeCharges is True, add hydrogens as needed to eliminate charges.
"""
if type(inchi_str) != str:
return None
if len(inchi_str) == 0:
return None
cmpd_mol = Chem.inchi.MolFromInchi(inchi_str)
if cmpd_mol is None:
return None
std_mol = stdizer.isotope_parent(stdizer.fragment_parent(cmpd_mol), skip_standardize=True)
if removeCharges:
std_mol = uncharger(std_mol)
return std_mol
def draw_structure(smiles_str, image_path, image_size=500):
"""
Draw structure for the compound with the given SMILES string, in a PNG file
with the given path.
"""
mol = Chem.MolFromSmiles(smiles_str)
if mol is None:
print(("Unable to read original SMILES for %s" % cmpd_num))
else:
_discard = AllChem.Compute2DCoords(mol)
Draw.MolToFile(mol, image_path, size=(image_size,image_size), fitImage=False)
def standardize_chemistry(df,standard='rdkit',smiles_col='rdkit_smiles',workers=1):
smiles = list(df[smiles_col])
out = []
if standard.lower() == 'rdkit':
col = 'rdkit_smiles'
out = base_smiles_from_smiles(smiles,workers=workers)
elif standard.lower() == 'inchi':
col = 'InCHI'
for smi in smiles:
try:
mol = Chem.MolFromSmiles(smi)
out.append(Chem.inchi.MolToInchi(mol))
except:
out.append('Invalid SMILES: %s' % (smi))
elif std == 'name':
print('Name technique currently not implemented')
else:
raise Exception('Unrecognized standardization type: %s' % (standard))
df[col] = out
return df, col
def merge_values(values,strategy='list'):
try:
values.remove('')
except:
values = values
if values is None:
val = float('NaN')
elif strategy == 'list':
val = values
elif strategy == 'uniquelist':
val = list(set(values))
elif strategy == 'mean':
val = np.mean(values)
elif strategy == 'geomean':
val = np.geomean(values)
elif strategy == 'median':
val = np.median(values)
elif strategy == 'mode':
val = np.mode(values)
elif strategy == 'max':
val = max(values)
elif strategy == 'min':
val = min(values)
else:
raise Exception('Unknown column merge strategy: %s', columnmerge)
if type(val) is list and len(val) == 1:
val = val[0]
return val
def merge_dataframes_by_smiles(dataframes,smiles_col='rdkit_smiles',id_col='compound_id',how='outer',comparetype='rdkit',columnmerge=None,workers=1):
"""
Merge two dataframes labeled by SMILEs strings on a rdkit or InCHI cannonicalization to identify shared compounds
"""
left_df, joincol = standardize_chemistry(dataframes[0], standard=comparetype, workers=workers)
for idx,df in enumerate(dataframes[1:]):
df, joincol = standardize_chemistry(df, standard=comparetype, smiles_col='rdkit_smiles', workers=workers)
new_df = left_df.merge(df, how=how, on=[joincol])
new_df = new_df.fillna('')
if columnmerge is not None:
shared_cols = list(set(left_df.columns.values) & set(df.columns.values))
shared_cols.remove(joincol)
for col in shared_cols:
lCol = col + '_x'
rCol = col + '_y'
vals = list(zip(new_df[lCol],new_df[rCol]))
vals = [list(i) for i in vals]
if col == id_col:
vals = [merge_values(i,strategy='min') for i in vals]
else:
vals = [merge_values(i,strategy=columnmerge) for i in vals]
new_df[col] = vals
new_df = new_df.drop([lCol,rCol],axis=1)
left_df = new_df
return new_df
def smiles_to_inchi_key (smiles):
"""InChI key from SMILES string. SMILES > RDKit molecule object >
InChI string > InChI key.
Returns None if cannot convert SMILES string to RDKit molecule.
"""
m = Chem.MolFromSmiles (smiles)
if m:
inchi = Chem.MolToInchi (m)
inchi_key = Chem.InchiToInchiKey (inchi)
else:
inchi_key = None
return inchi_key
def fix_moe_smiles(smiles):
"""
Correct the SMILES strings generated by MOE to standardize | |
import matplotlib.pyplot as plt
import os
import pandas
import numpy as np
from spinedb import SpineDB
import pandas as pd
def get_participating_technologies_in_capacity_market(db_emlab_powerplantdispatchplans, years_to_generate, years_emlab,
db_emlab_powerplants):
"""
This function returns all participating technologies that get revenue from the Capacity Market.
It returns a set so all values are distinct.
:param db_emlab_powerplantdispatchplans: PPDPs as queried from SpineDB EMLab
:return: Set of technology names
"""
capacity_market_aggregated_per_tech = pd.DataFrame()
for year in years_emlab:
capacity_market_ppdps = [row['object_name'] for row in db_emlab_powerplantdispatchplans if
row['parameter_name'] == 'Market' and
row['parameter_value'] == 'DutchCapacityMarket' and
row['alternative'] == str(year)]
capacity_market_accepted_ppdps = [row['object_name'] for row in db_emlab_powerplantdispatchplans if
row['object_name'] in capacity_market_ppdps and
row['parameter_name'] == 'AcceptedAmount' and
row['parameter_value'] > 0]
list_of_tech_fuel_cominations = []
capacity_market_participating_capacities = []
for ppdp in capacity_market_accepted_ppdps:
plant_name = next(row['parameter_value'] for row in db_emlab_powerplantdispatchplans if
row['object_name'] == ppdp and
row['parameter_name'] == 'Plant')
plant_accepted_amount = next(row['parameter_value'] for row in db_emlab_powerplantdispatchplans if
row['object_name'] == ppdp and
row['parameter_name'] == 'AcceptedAmount')
plant_technology = next(row['parameter_value'] for row in db_emlab_powerplants if
row['object_name'] == plant_name and
row['parameter_name'] == 'TECHTYPENL')
plant_fuel = next(row['parameter_value'] for row in db_emlab_powerplants if
row['object_name'] == plant_name and
row['parameter_name'] == 'FUELNL')
list_of_tech_fuel_cominations.append(plant_technology + ', ' + plant_fuel)
capacity_market_participating_capacities.append(plant_accepted_amount)
df_year = pd.DataFrame({'technology_fuel': list_of_tech_fuel_cominations,
'capacity': capacity_market_participating_capacities})
capacity_market_aggregated_per_tech[year] = df_year.groupby('technology_fuel').sum()
years_dictionary = dict(zip(years_emlab, years_to_generate))
capacity_market_aggregated_per_tech.rename(columns=years_dictionary,
inplace=True)
return capacity_market_aggregated_per_tech.fillna(0)
def plot_mcps_with_filter(db_mcps, market, years_to_generate, path_to_plots, title, file_name, yl, ylim):
# MCP Plots
filtered_mcps = [i['object_name'] for i in db_mcps if
i['parameter_name'] == 'Market' and i['parameter_value'] == market]
mcp_x = []
mcp_y = []
print('Creating ' + str(market) + ' MCP plot')
for row in [i for i in db_mcps if i['object_name'] in filtered_mcps]:
if row['parameter_name'] == 'Price':
mcp_x.append(int(row['alternative']) + years_to_generate[0])
mcp_y.append(row['parameter_value'])
fig7 = plt.figure()
axs7 = plt.axes()
plt.grid(b=True)
axs7.plot(mcp_x, mcp_y, 'o')
axs7.set_axisbelow(True)
plt.xlabel('Years')
plt.ylabel(yl)
plt.ylim(ylim)
axs7.set_title(title)
fig7.savefig(path_to_plots + '/' + file_name, bbox_inches='tight', dpi=300)
def plot_annual_balances(annual_balance, years_to_generate, path_to_plots):
# Annual Balance
print('Create Annual Balance plot')
plt.figure()
annual_balance_df = pd.DataFrame(annual_balance, index=years_to_generate)
axs125 = annual_balance_df.plot.bar(stacked=True, rot=0, colormap='tab20', grid=True, legend=False)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Supply or Demand (MWh)', fontsize='medium')
axs125.set_title('NL Annual Balance per Technology')
axs125.set_axisbelow(True)
plt.ylim([-0.6e8, 1.75e8])
fig125 = axs125.get_figure()
fig125.savefig(path_to_plots + '/' + 'NL Annual Balance.png', bbox_inches='tight', dpi=300)
def plot_vre_nl_installed_capacity(vre_investment_sums, years_to_generate, path_to_plots):
# VRE Investments plot
print('Create VRE Investments plot')
plt.figure()
vre_investments_df = pd.DataFrame(vre_investment_sums, index=years_to_generate)
axs5 = vre_investments_df.plot.bar(stacked=True, rot=0, colormap='tab20', legend=False)
axs5.set_axisbelow(True)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Capacity (MW)', fontsize='medium')
axs5.set_title('NL VRE Installed Capacity')
fig5 = axs5.get_figure()
fig5.savefig(path_to_plots + '/' + 'NL Installed Capacity.png', bbox_inches='tight', dpi=300)
def plot_investments(investment_sums, years_to_generate, path_to_plots, look_ahead):
# Investments plot
print('Create Investments plot')
plt.figure()
investments_df = pd.DataFrame(investment_sums,
index=list(range(years_to_generate[0], years_to_generate[-1] + look_ahead + 1)))
axs6 = investments_df.plot.bar(stacked=True, rot=0, colormap='tab20', grid=True, legend=False)
axs6.set_axisbelow(True)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Capacity (MW)', fontsize='medium')
plt.ylim([-4.3e5, 5.5e5])
# leg = plt.legend(fontsize='medium', loc='upper left', bbox_to_anchor=(1, 1.1))
axs6.set_title('EU Capacity Investments per Technology')
fig6 = axs6.get_figure()
fig6.savefig(path_to_plots + '/' + 'EU Investments.png', bbox_inches='tight', dpi=300)
def plot_nl_investments(investment_sums, years_to_generate, path_to_plots, look_ahead):
# NL Investments plot
print('Create NL Investments plot')
plt.figure()
investments_df = pd.DataFrame(investment_sums,
index=list(range(years_to_generate[0], years_to_generate[-1] + look_ahead + 1)))
axs6 = investments_df.plot.bar(stacked=True, rot=0, colormap='tab20', grid=True, legend=False)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Capacity (MW)', fontsize='medium')
# plt.legend(fontsize='medium', loc='upper left', bbox_to_anchor=(1, 1.1))
axs6.set_title('NL Capacity Investments per Technology')
axs6.set_axisbelow(True)
plt.ylim([-20e3, 33e3])
fig6 = axs6.get_figure()
fig6.savefig(path_to_plots + '/' + 'NL Investments.png', bbox_inches='tight', dpi=300)
def plot_co2_emissions(co2_emission_sums, years_to_generate, path_to_plots):
# CO2 emissions plot
print('Create annual CO2 Emission per tech plot')
plt.figure()
co2_df = pd.DataFrame(co2_emission_sums, index=years_to_generate)
axs4 = co2_df.plot.bar(stacked=True, rot=0, colormap='tab20', grid=True, legend=False)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Emissions (ton CO2)', fontsize='medium')
axs4.set_title('NL CO2 Emissions per Technology')
axs4.set_axisbelow(True)
plt.ylim([0, 3.5e7])
# plt.legend(fontsize='medium', loc='upper left', bbox_to_anchor=(1, 1.1))
fig4 = axs4.get_figure()
fig4.savefig(path_to_plots + '/' + 'NL CO2 Emissions.png', bbox_inches='tight', dpi=300)
def plot_nl_unit_generation(path_and_filename_dispatch, year, path_to_plots):
print('Plot NL Unit Generation')
# Plot 3 NL Unit Generation curve
nl_unit_generation_df = pandas.read_excel(path_and_filename_dispatch, 'NL Unit Generation', skiprows=1, index_col=0,
header=0).transpose()
plt.figure()
axs3 = nl_unit_generation_df.plot()
axs3.set_axisbelow(True)
plt.xlabel('Hours', fontsize='medium')
plt.ylabel('Generation (MWh)', fontsize='medium')
plt.legend(fontsize='medium', loc='upper left', bbox_to_anchor=(1, 1.1))
axs3.set_title('NL Unit Generation ' + str(year))
fig3 = axs3.get_figure()
fig3.savefig(path_to_plots + '/' + 'NL Unit Generation ' + str(year) + '.png', bbox_inches='tight', dpi=300)
def plot_and_prepare_hourly_nodal_price_duration_curve(hourly_nodal_prices_df, year, path_to_plots,
price_duration_curves):
# Plot 2.5 Hourly Market Price Duration Curve
print('Create Hourly Nodal Price duration curve')
plt.figure()
axs25 = hourly_nodal_prices_df['NED'].sort_values(ascending=False).plot(use_index=False, grid=True, legend=False)
plt.xlabel('Hours')
plt.ylabel('Price (Euro / MWh)')
axs25.set_title('NL Hourly Electricity Spot Market Price Duration Curve ' + str(year))
axs25.set_axisbelow(True)
plt.ylim([0, min(hourly_nodal_prices_df['NED'].max() + 50, 250)])
fig25 = axs25.get_figure()
fig25.savefig(path_to_plots + '/' + 'NL Nodal Prices Duration Curve ' + str(year) + '.png', bbox_inches='tight', dpi=300)
price_duration_curves[year] = hourly_nodal_prices_df['NED'].sort_values(ascending=False).values
return price_duration_curves
def plot_hourly_nodal_prices(path_and_filename_dispatch, year, path_to_plots):
# Plot 2 Hourly Nodal Prices
print('Read and create hourly nodal prices plot')
hourly_nodal_prices_df = pandas.read_excel(path_and_filename_dispatch, 'Hourly Nodal Prices', skiprows=1,
index_col=0)
# hourly_nodal_prices_df[hourly_nodal_prices_df > 250] = 250
plt.figure()
axs2 = hourly_nodal_prices_df['NED'].plot(grid=True)
axs2.set_axisbelow(True)
plt.xlabel('Hours')
plt.ylabel('Price (Euro / MWh)')
plt.xlim([0, 8760])
plt.ylim([0, min(hourly_nodal_prices_df['NED'].max() + 50, 250)])
axs2.set_title('NL Hourly Electricity Spot Market Prices ' + str(year))
fig2 = axs2.get_figure()
fig2.savefig(path_to_plots + '/' + 'NL Nodal Prices ' + str(year) + '.png', bbox_inches='tight', dpi=300)
return hourly_nodal_prices_df
def plot_and_prepare_residual_load_duration_curve(hourly_nl_balance_demand, hourly_nl_balance_df, year, path_to_plots,
residual_load_curves):
# Plot 1.75: Residual Load Curve
print('Create Res Load duration curve')
plt.figure()
hourly_nl_balance_residual_load = hourly_nl_balance_demand.subtract(hourly_nl_balance_df['Wind Onshore']) \
.subtract(hourly_nl_balance_df['Wind Offshore']) \
.subtract(hourly_nl_balance_df['Sun']) \
.subtract(hourly_nl_balance_df['Hydro Conv.'])
axs175 = hourly_nl_balance_residual_load.sort_values(ascending=False).plot(use_index=False, grid=True, legend=False)
axs175.set_title('NL Residual Load Duration Curve ' + str(year))
axs175.set_axisbelow(True)
plt.xlabel('Hours')
plt.ylabel('Residual Load (MWh)')
plt.xlim([0, 8760])
fig175 = axs175.get_figure()
fig175.savefig(path_to_plots + '/' + 'NL Residual Load Duration Curve ' + str(year) + '.png', bbox_inches='tight', dpi=300)
residual_load_curves[year] = hourly_nl_balance_residual_load.sort_values(ascending=False).values
return residual_load_curves
def plot_and_prepare_load_duration_curve(hourly_nl_balance_demand, year, path_to_plots, load_duration_curves):
# Plot 1.5: Load duration curve
print('Create Load duration curve plot')
plt.figure()
axs15 = hourly_nl_balance_demand.sort_values(ascending=False).plot(use_index=False, grid=True, legend=False)
axs15.set_title('NL Load Duration Curve ' + str(year))
axs15.set_axisbelow(True)
plt.xlabel('Hours')
plt.ylabel('Load (MWh)')
plt.xlim([0, 8760])
fig15 = axs15.get_figure()
fig15.savefig(path_to_plots + '/' + 'NL Load Duration Curve ' + str(year) + '.png', bbox_inches='tight', dpi=300)
load_duration_curves[year] = hourly_nl_balance_demand.sort_values(ascending=False).values
return load_duration_curves
def prepare_annual_nl_balance(hourly_nl_balance_df, annual_balance, years_to_generate, year):
print('Prepare Annual NL Balance plot data')
hourly_nl_annual = hourly_nl_balance_df.sum()
for index, col in hourly_nl_annual.iteritems():
if index in annual_balance.keys():
annual_balance[index].append(col)
else:
annual_balance[index] = [0] * years_to_generate.index(year) + [col]
return annual_balance
def plot_hourly_nl_balance(path_and_filename_dispatch, path_to_plots, year):
# Plot 1 Hourly NL Balance (per year)
print('Read and Create Hourly NL Balance plot')
hourly_nl_balance_df = pandas.read_excel(path_and_filename_dispatch, 'Hourly NL Balance', skiprows=1, index_col=0,
skipfooter=2, usecols='A:W').replace(np.nan, 0)
hourly_nl_balance_demand = hourly_nl_balance_df['Demand']
hourly_nl_balance_df = hourly_nl_balance_df.drop(['Demand'], axis=1)
hourly_nl_balance_df['Exports'] = -1 * hourly_nl_balance_df['Exports']
hourly_nl_balance_df['H2'] = -1 * hourly_nl_balance_df['H2']
hourly_nl_balance_df['Heat'] = -1 * hourly_nl_balance_df['Heat']
hourly_nl_balance_df['HP'] = -1 * hourly_nl_balance_df['HP']
hourly_nl_balance_df['EVs'] = -1 * hourly_nl_balance_df['EVs']
hourly_nl_balance_df['Storage cons.'] = -1 * hourly_nl_balance_df['Storage cons.']
hourly_nl_balance_df_resampled = hourly_nl_balance_df.copy()
hourly_nl_balance_df_resampled['T'] = hourly_nl_balance_df_resampled.sum(axis=1)
hourly_nl_balance_df_resampled.index = pandas.to_timedelta(hourly_nl_balance_df_resampled.index, unit='H')
hourly_nl_balance_df_resampled = hourly_nl_balance_df_resampled.resample('50H').mean()
hourly_nl_balance_df_resampled = hourly_nl_balance_df_resampled.drop(['T'], axis=1)
hourly_nl_balance_df_resampled = hourly_nl_balance_df_resampled.interpolate(method='cubic')
hourly_nl_balance_df_resampled.index = [i * 50 for i in range(0, len(hourly_nl_balance_df_resampled))]
axs = hourly_nl_balance_df_resampled.plot.area(colormap='tab20', linewidth=0, legend=False)
axs.set_title('Hourly NL Balance - All Technologies ' + str(year))
axs.set_axisbelow(True)
plt.xlabel('Hours', fontsize='medium')
plt.ylabel('Supply or Demand (MWh)', fontsize='medium')
plt.xlim([0, 8760])
# plt.legend(fontsize='medium', loc='best', bbox_to_anchor=(1, 1.1))
fig = axs.get_figure()
fig.savefig(path_to_plots + '/' + 'NL Hourly Balance ' + str(year) + '.png', bbox_inches='tight', dpi=300)
return hourly_nl_balance_df, hourly_nl_balance_demand
def prepare_co2_emission_data(path_and_filename_dispatch, co2_emission_sums, years_to_generate, year):
# Preparing values for CO2 Emissions plot, plot after years iterations
print('Prepare CO2 Emission plot data')
co2_emissions = pandas.read_excel(path_and_filename_dispatch, 'CO2 Emissions tech', skiprows=1, index_col=0)
co2_emissions.columns = [i[0] + ',' + i[1] for i in zip(co2_emissions.columns.values, co2_emissions.iloc[0].values)]
for index, value in co2_emissions.loc['NED'].iteritems():
if index in co2_emission_sums.keys():
co2_emission_sums[index].append(value)
else:
co2_emission_sums[index] = [0] * years_to_generate.index(year) + [value]
# Add 0 to values if not in COMPETES results
for key in co2_emission_sums.keys():
if key not in co2_emissions.columns.values:
co2_emission_sums[key].append(0)
return co2_emission_sums
def prepare_vre_investment_data(path_and_filename_investments, vre_investment_sums, years_to_generate, year):
# Preparing values for VRE Investments plot, plot after years iterations
print('Preparing VRE Investment data')
vre_investments = pandas.read_excel(path_and_filename_investments, 'VRE investment', skiprows=2)
for index, row in vre_investments[vre_investments['Bus'] == 'NED'].iterrows():
if row['WindOn'] in vre_investment_sums.keys():
vre_investment_sums[row['WindOn']].append(row['Initial'])
else:
vre_investment_sums[row['WindOn']] = [0] * years_to_generate.index(year) + [row['Initial']]
# Add 0 to values if not in COMPETES results
for key in vre_investment_sums.keys():
if key not in vre_investments[vre_investments['Bus'] == 'NED']['WindOn'].values:
vre_investment_sums[key].append(0)
return vre_investment_sums
def prepare_investment_and_decom_data(path_and_filename_investments, investment_sums, years_to_generate, year,
emlab_spine_powerplants_tech_dict, emlab_spine_powerplants_fuel_dict,
emlab_spine_technologies, look_ahead, nl_investment_sums):
print('Loading investment and decom data')
decommissioning = pandas.read_excel(path_and_filename_investments, 'Decommissioning', skiprows=2, usecols='A:C')
decommissioning = decommissioning.dropna()
nl_decommissioning = decommissioning[decommissioning['node'] == 'NED'].copy()
investments = pandas.read_excel(path_and_filename_investments, 'New Generation Capacity', skiprows=2, usecols="A:D")
investments = investments.dropna()
nl_investments = investments[investments['Node'] == 'NED'].copy()
investment_sums, investments = prepare_investment_data(investments, investment_sums, years_to_generate, year,
emlab_spine_technologies, look_ahead)
nl_investment_sums, nl_investments = prepare_investment_data(nl_investments, nl_investment_sums, years_to_generate,
year, emlab_spine_technologies, look_ahead)
investment_sums = prepare_decom_data(decommissioning, emlab_spine_powerplants_tech_dict, investment_sums,
years_to_generate, year, | |
by `ptype` and can either be uniform, or always X, Y, or Z errors.
The dictionary returned is in the appropriate format for the
`circuit_simulator_for_tensored_independent_pauli_errors()` circuit simulator function.
Parameters
----------
pspec : QubitProcessorSpec
The QubitProcessorSpec that defines the device.
one_qubit_gate_errorrate : float
The 1-qubit gates error rate (the probability of a Pauli error on the target qubit) not including
idle gates.
two_qubit_gate_errorrate : float
The 2-qubit gates error rate (the total probability of a Pauli error on either qubit the gate acts
on -- each qubit has independent errors with equal probabilities).
idle_errorrate : float
The idle gates error rate.
measurement_errorrate : flip
The measurement error rate for all of the qubits. This is the probability that a qubits measurement
result is bit-flipped.
ptype : str, optional
Can be 'uniform', 'X', 'Y' or 'Z'. If 'uniform' then 3 Pauli errors are equally likely, if 'X', 'Y' or
'Z' then the errors are always Pauli X, Y or Z errors, respectively.
TODO: docstring idle1q_placeholder
Returns
-------
dict
An dict that encodes the error model described above in the format required for the simulator
`circuit_simulator_for_tensored_independent_pauli_errors()`.
"""
if ptype == 'uniform':
def error_row(er): return _np.array([1 - er, er / 3, er / 3, er / 3])
elif ptype == 'X':
def error_row(er): return _np.array([1 - er, er, 0., 0.])
elif ptype == 'Y':
def error_row(er): return _np.array([1 - er, 0., er, 0.])
elif ptype == 'Z':
def error_row(er): return _np.array([1 - er, 0., 0., er])
else:
raise ValueError("Error model type not understood! Set `ptype` to a valid option.")
perQ_twoQ_errorrate = 1 - (1 - two_qubit_gate_errorrate)**(1 / 2)
n = pspec.num_qubits
errormodel = {}
if idle1q_placeholder is not None:
#Added by EGN: special behavior needed when Model has
# an gate name used to designate a perfect 1-qubit idle op (used as placeholder).
# This translates to a set of "<gatename>:X" operation labels all w/idle_errorrate
idleLbl = idle1q_placeholder
for q in pspec.qubit_labels:
gl = _Lbl(idleLbl, q)
errormodel[gl] = _np.zeros((n, 4), float)
errormodel[gl][:, 0] = _np.ones(n, float)
errormodel[gl][pspec.qubit_labels.index(q), :] = error_row(idle_errorrate)
for gate in pspec.models['clifford'].primitive_op_labels:
errormodel[gate] = _np.zeros((n, 4), float)
errormodel[gate][:, 0] = _np.ones(n, float)
# If not a CNOT, it is a 1-qubit gate / idle.
if gate.num_qubits == 2:
q1 = gate.qubits[0]
q2 = gate.qubits[1]
er = perQ_twoQ_errorrate
errormodel[gate][pspec.qubit_labels.index(q1), :] = error_row(er)
errormodel[gate][pspec.qubit_labels.index(q2), :] = error_row(er)
elif gate.num_qubits == 1:
q = gate.qubits[0]
er = one_qubit_gate_errorrate
errormodel[gate][pspec.qubit_labels.index(q), :] = error_row(er)
else:
raise ValueError("The QubitProcessorSpec must only contain 1- and 2- qubit gates!")
errormodel['measure'] = _np.array([measurement_errorrate for q in range(n)])
return errormodel
def create_locally_gate_independent_pauli_error_model(pspec, gate_errorrate_dict, measurement_errorrate_dict={},
ptype='uniform', idle1q_placeholder='I'):
"""
Returns a dictionary encoding a Pauli-stochastic error model whereby the errors are independent of the gates,
with a qubit subject to an error after a circuit layer with the probabilities specified by the dict
`gate_errorrate_dict`. The probability of the 3 different Pauli errors on each qubit is specified by
`ptype` and can either be uniform, or always X, Y, or Z errors.
The dictionary returned is in the appropriate format for the
`circuit_simulator_for_tensored_independent_pauli_errors()` circuit simulator function.
Parameters
----------
pspec : QubitProcessorSpec
The QubitProcessorSpec that defines the device.
gate_errorrate_dict : dict
A dict where the keys are elements of pspec.qubit_labels and the values are floats in [0,1].
The element for qubit with label `q` is the error probability for that qubit.
measurement_errorrate_dict : dict
A dict where the keys are elements of pspec.qubit_labels and the values are floats in [0,1].
The element for qubit with label `q` is the measurement bit-flip error probability for that qubit.
All qubits that do not have a measurement error rate specified are assumed to have perfect measurements.
ptype : str, optional
Can be 'uniform', 'X', 'Y' or 'Z'. If 'uniform' then 3 Pauli errors are equally likely, if 'X', 'Y' or
'Z' then the errors are always Pauli X, Y or Z errors, respectively.
TODO: docstring: idle1q_placeholder
Returns
-------
dict
An dict that encodes the error model described above in the format required for the simulator
`circuit_simulator_for_tensored_independent_pauli_errors()`.
"""
if ptype == 'uniform':
def error_row(er): return _np.array([1 - er, er / 3, er / 3, er / 3])
elif ptype == 'X':
def error_row(er): return _np.array([1 - er, er, 0., 0.])
elif ptype == 'Y':
def error_row(er): return _np.array([1 - er, 0., er, 0.])
elif ptype == 'Z':
def error_row(er): return _np.array([1 - er, 0., 0., er])
else:
raise ValueError("Error model type not understood! Set `ptype` to a valid option.")
n = pspec.num_qubits
errormodel = {}
if idle1q_placeholder is not None:
#Added by EGN: special behavior needed when Model has
# an gate name used to designate a perfect 1-qubit idle op (used as placeholder).
# This translates to a set of "<gatename>:X" operation labels all w/appropriate errorrate
idleLbl = idle1q_placeholder
for q in pspec.qubit_labels:
gl = _Lbl(idleLbl, q)
er = gate_errorrate_dict[q]
errormodel[gl] = _np.zeros((n, 4), float)
errormodel[gl][:, 0] = _np.ones(n, float)
errormodel[gl][pspec.qubit_labels.index(q), :] = error_row(er)
for gate in pspec.models['clifford'].primitive_op_labels:
errormodel[gate] = _np.zeros((n, 4), float)
errormodel[gate][:, 0] = _np.ones(n, float)
for q in gate.qubits:
er = gate_errorrate_dict[q]
errormodel[gate][pspec.qubit_labels.index(q)] = error_row(er)
errormodel['measure'] = _np.array([measurement_errorrate_dict.get(q, 0.) for q in pspec.qubit_labels])
return errormodel
#
# TODO : DOES THIS NEED AND IDLE PLACEHOLDER?
#
def create_local_pauli_error_model(pspec, one_qubit_gate_errorrate_dict, two_qubit_gate_errorrate_dict,
measurement_errorrate_dict={}, ptype='uniform'):
"""
Returns a dictionary encoding a Pauli-stochastic error model whereby the errors caused by a gate act
only on the "target" qubits of the gate, all the 1-qubit gates on a qubit have the same error rate,
and all the 2-qubit gates on a qubit have the same error rate. The probability of the 3 different Pauli
errors on each qubit is specified by `ptype` and can either be uniform, or always X, Y, or Z errors.
The dictionary returned is in the appropriate format for the
`circuit_simulator_for_tensored_independent_pauli_errors()` circuit simulator function.
Parameters
----------
pspec : QubitProcessorSpec
The QubitProcessorSpec that defines the device.
one_qubit_gate_errorrate_dict : dict
A dict where the keys are elements of pspec.qubit_labels and the values are floats in [0,1].
The element for qubit with label `q` is the error probability for all 1-qubit gates on that qubit
two_qubit_gate_errorrate_dict : dict
A dict where the keys are 2-qubit gates in pspec and the values are floats in [0,1]. This is the
error probability for the 2-qubit gate, split evenly into independent Pauli errors on each of the
qubits the gate is intended to act on.
measurement_errorrate_dict : dict
A dict where the keys are elements of pspec.qubit_labels and the values are floats in [0,1].
The element for qubit with label `q` is the measurement bit-flip error probability for that qubit.
All qubits that do not have a measurement error rate specified are assumed to have perfect measurements.
ptype : str, optional
Can be 'uniform', 'X', 'Y' or 'Z'. If 'uniform' then 3 Pauli errors are equally likely, if 'X', 'Y' or
'Z' then the errors are always Pauli X, Y or Z errors, respectively.
Returns
-------
dict
An dict that encodes the error model described above in the format required for the simulator
`circuit_simulator_for_tensored_independent_pauli_errors()`.
"""
if ptype == 'uniform':
def error_row(er): return _np.array([1 - er, er / 3, er / 3, er / 3])
elif ptype == 'X':
def error_row(er): return _np.array([1 - er, er, 0., 0.])
elif ptype == 'Y':
def error_row(er): return _np.array([1 - er, 0., er, 0.])
elif ptype == 'Z':
def error_row(er): return _np.array([1 - er, 0., 0., er])
else:
raise ValueError("Error model type not understood! Set `ptype` to a valid option.")
n = pspec.num_qubits
errormodel = {}
for gate in list(pspec.models['clifford'].primitive_op_labels):
errormodel[gate] = _np.zeros((n, 4), float)
errormodel[gate][:, 0] = _np.ones(n, float)
if gate.num_qubits == 1:
er = one_qubit_gate_errorrate_dict[gate.qubits[0]]
elif gate.num_qubits == 2:
er = 1 - (1 - two_qubit_gate_errorrate_dict[gate])**(0.5)
else: raise ValueError("Only 1- and 2-qubit gates supported!")
for q in gate.qubits:
errormodel[gate][pspec.qubit_labels.index(q)] = error_row(er)
errormodel['measure'] = _np.array([measurement_errorrate_dict.get(q, 0.) for q in pspec.qubit_labels])
return | |
cor updates automatically in pipeline
self.setCenterButton.hide()
plabel = QtGui.QLabel('Overlay Projection No:')
plabel.setAlignment(QtCore.Qt.AlignRight)
spinBox = QtGui.QSpinBox(parent=self.cor_widget)
spinBox.setRange(0, data.shape[0]-1)
slider = QtGui.QSlider(orientation=QtCore.Qt.Horizontal, parent=self.cor_widget)
slider.setRange(0, data.shape[0]-1)
spinBox.setValue(data.shape[0]-1)
slider.setValue(data.shape[0]-1)
flipCheckBox = QtGui.QCheckBox('Flip Overlay', parent=self.cor_widget)
flipCheckBox.setChecked(True)
constrainYCheckBox = QtGui.QCheckBox('Constrain Y', parent=self.cor_widget)
constrainYCheckBox.setChecked(True)
constrainXCheckBox = QtGui.QCheckBox('Constrain X', parent=self.cor_widget)
constrainXCheckBox.setChecked(False)
# rotateCheckBox = QtGui.QCheckBox('Enable Rotation', parent=self.cor_widget)
# rotateCheckBox.setChecked(False)
self.normCheckBox = QtGui.QCheckBox('Normalize', parent=self.cor_widget)
h2 = QtGui.QHBoxLayout()
h2.setAlignment(QtCore.Qt.AlignLeft)
h2.setContentsMargins(0, 0, 0, 0)
h2.addWidget(plabel)
h2.addWidget(spinBox)
h2.addWidget(flipCheckBox)
h2.addWidget(constrainXCheckBox)
h2.addWidget(constrainYCheckBox)
# h2.addWidget(rotateCheckBox) # This needs to be implemented correctly
h2.addWidget(self.normCheckBox)
h2.addStretch(1)
spinBox.setFixedWidth(spinBox.width())
v2 = QtGui.QVBoxLayout(self.cor_widget)
v2.addLayout(h1)
v2.addLayout(h2)
v2.addWidget(slider)
l = QtGui.QGridLayout(self)
l.setContentsMargins(0, 0, 0, 0)
cor_holder = QtGui.QSplitter()
cor_holder.setOrientation(QtCore.Qt.Vertical)
cor_holder.addWidget(self.cor_box)
cor_holder.addWidget(self.stackViewer)
l.addWidget(self.cor_button_holder)
l.addWidget(cor_holder)
l.addWidget(self.mbir_viewer)
self.hideMBIR()
# self.mbir_viewer.hide()
slider.valueChanged.connect(spinBox.setValue)
slider.valueChanged.connect(self.stackViewer.resetImage)
spinBox.valueChanged.connect(self.changeOverlayProj)
flipCheckBox.stateChanged.connect(self.flipOverlayProj)
constrainYCheckBox.stateChanged.connect(lambda v: self.imgoverlay_roi.constrainY(v))
constrainXCheckBox.stateChanged.connect(lambda v: self.imgoverlay_roi.constrainX(v))
# rotateCheckBox.stateChanged.connect(self.addRotateHandle)
self.normCheckBox.stateChanged.connect(self.normalize)
self.stackViewer.sigTimeChanged.connect(lambda: self.normalize(False))
self.imgoverlay_roi.sigTranslated.connect(self.setCenter)
self.imgoverlay_roi.sigTranslated.connect(lambda x, y: originBox.setText('x={} y={}'.format(x, y)))
self.hideCenterDetection()
self.bounds = None
# self.normalize(True)
def updateCORChoice(self, boolean):
"""
Slot to receive signal emitted when user chooses to use either automatic or manual COR detection in
function pipeline
"""
if self.toolbar and self.toolbar.actionCenter.isChecked():
if boolean:
self.cor_box.setCurrentWidget(self.auto_cor_widget)
self.stackViewer.hide()
self.auto_cor_button.setChecked(True)
else:
self.cor_box.setCurrentWidget(self.cor_widget)
self.stackViewer.show()
self.manual_cor_button.setChecked(True)
def writeCOR(self):
"""
Writes COR value acquired from user to metadata of input file
"""
cor = QtGui.QInputDialog.getDouble(self.cor_box, 'Write COR value to file',
'Write COR value to file',self.data.shape[1]/2)
if cor[1]:
self.data.fabimage.change_dataset_attribute('center', cor[0])
def manualCOR(self):
"""
Slot to receive signal when manual COR detection button is clicked in CORSelectionWidget
"""
self.cor_box.setCurrentWidget(self.cor_widget)
self.stackViewer.show()
self.sigCORChanged.emit(False)
def autoCOR(self):
"""
Slot to receive signal when auto COR detection button is clicked in CORSelectionWidget
"""
self.cor_box.setCurrentWidget(self.auto_cor_widget)
self.stackViewer.hide()
self.sigCORChanged.emit(True)
def changeOverlayProj(self, idx):
"""
Changes the image in the overlay. This is connected to the slider in the cor_widget
"""
self.normCheckBox.setChecked(False)
self.imgoverlay_roi.setCurrentImage(idx)
self.imgoverlay_roi.updateImage()
def setCenter(self, x, y):
"""
Sets the center in the centerBox based on the position of the imageoverlay
Parameters
----------
x : float
x-coordinate of overlay image in the background images coordinates
y : float
x-coordinate of overlay image in the background images coordinates
"""
center = (self.data.shape[1] + x - 1)/2.0 # subtract half a pixel out of 'some' convention?
self.centerBox.setValue(center)
self.sigCenterChanged.emit(center)
def hideCenterDetection(self):
"""
Hides the center detection widget and corresponding histogram
"""
self.normalize(False)
self.cor_box.hide()
self.cor_button_holder.hide()
self.roi_histogram.hide()
self.setButton.hide()
self.imgoverlay_roi.setVisible(False)
self.stackViewer.show()
def showCenterDetection(self):
"""
Shows the center detection widget and corresponding histogram
"""
# self.normalize(True)
self.cor_box.show()
self.cor_button_holder.show()
self.roi_histogram.show()
self.setButton.show()
self.imgoverlay_roi.setVisible(True)
if self.auto_cor_button.isChecked():
self.stackViewer.hide()
else:
self.stackViewer.show()
def showMBIR(self):
"""
Slot to receive signal and show MBIR menu when it is requested
"""
self.mbir_viewer.show()
self.cor_button_holder.hide()
# self.hideCenterDetection()
self.stackViewer.hide()
def hideMBIR(self):
"""
Slot to receive signal and show MBIR menu when it is requested
"""
self.mbir_viewer.hide()
self.stackViewer.show()
def updateROIFromCenter(self, center):
"""
Updates the position of the ROIImageOverlay based on the given center
Parameters
----------
center : float
Location of center of rotation
"""
s = self.imgoverlay_roi.pos()[0]
self.imgoverlay_roi.translate(pg.Point((2 * center + 1 - self.data.shape[1] - s, 0))) # 1 again due to the so-called COR
# conventions...
def flipOverlayProj(self, val):
"""
Flips the image show in the ROIImageOverlay
"""
self.imgoverlay_roi.flipCurrentImage()
self.imgoverlay_roi.updateImage()
def toggleRotateHandle(self, val):
"""
Adds/ removes a handle on the ROIImageOverlay to be able to rotate the image (Rotation is not implemented
correctly yet)
Parameters
----------
val : bool
Boolean specifying to add or remove the handle
"""
if val:
self.toggleRotateHandle.handle = self.imgoverlay_roi.addRotateHandle([0, 1], [0.2, 0.2])
else:
self.imgoverlay_roi.removeHandle(self.toggleRotateHandle.handle)
def addROIselection(self):
"""
Adds/ removes a rectangular ROI to select a region of interest for reconstruction. Not implemented yet
"""
if self.selection_roi:
self.stackViewer.view.removeItem(self.selection_roi)
self.selection_roi = pg.ROI([0, 0], [100, 100])
self.stackViewer.view.addItem(self.selection_roi)
self.selection_roi.addScaleHandle([1, 1], [0, 0])
self.selection_roi.addScaleHandle([0, 0], [1, 1])
self.sigROIWidgetChanged.emit(self.selection_roi)
def normalize(self, val):
"""
Toggles the normalization of the ROIImageOverlay.
Parameters
----------
val : bool
Boolean specifying to normalize image
"""
# self.roi_histogram.setLevels(0,1)
if val and not self.normalized:
if not hasattr(self.data, 'flats') or not hasattr(self.data, 'darks'):
msg.showMessage('Must load flats and darks to normalize dataset', timeout=10)
return
flats = np.array(self.data.flats.values())
darks = np.array(self.data.darks.values())
self.flat = np.median(flats, axis=0).transpose()
self.dark = np.median(darks, axis=0).transpose()
proj = (self.imageItem.image - self.dark)/(self.flat - self.dark)
overlay = self.imgoverlay_roi.currentImage
if self.imgoverlay_roi.flipped:
overlay = np.flipud(overlay)
overlay = (overlay - self.dark)/(self.flat - self.dark)
if self.imgoverlay_roi.flipped:
overlay = np.flipud(overlay)
self.imgoverlay_roi.currentImage = overlay
self.imgoverlay_roi.updateImage(autolevels=True)
self.stackViewer.setImage(proj, autoRange=False, autoLevels=True)
self.stackViewer.updateImage()
self.normalized = True
self.roi_histogram.setLevels(-1, 1) # lazy solution, could be improved with some sampling methods
self.roi_histogram.vb.setRange(yRange=(-1.5, 1.5))
self.normCheckBox.setChecked(True)
elif not val and self.normalized:
self.stackViewer.resetImage()
self.imgoverlay_roi.resetImage()
min, max = self.stackViewer.quickMinMax(self.imgoverlay_roi.imageItem.image)
self.roi_histogram.setLevels(min, max)
self.normalized = False
self.normCheckBox.setChecked(False)
def keyPressEvent(self, ev):
"""
Override QWidgets key pressed event to send the event to the ROIImageOverlay when it is pressed
"""
super(ProjectionViewer, self).keyPressEvent(ev)
if self.imgoverlay_roi.isVisible():
self.imgoverlay_roi.keyPressEvent(ev)
else:
super(StackViewer, self.stackViewer).keyPressEvent(ev)
ev.accept()
class PreviewViewer(QtGui.QSplitter):
"""
Viewer class to show reconstruction previews in a PG ImageView, along with the function pipeline settings for the
corresponding preview
Attributes
----------
previews : ArrayDeque
ArrayDeque to hold slice reconstruction previews
data : deque of dicts
deque holding preview dicts corresponding to the reconstruction in previews
datatrees : deque of DataTree widgets
deque holding DataTree widgets to show the data in data deque
slice_numbers : deque
deque with sinogram index that was reconstructed for that preview
imageview : widgets.ImageView
ImageView to display preview reconstructions
Signals
-------
sigSetDefaults(dict)
Emits dictionary of current preview. Used to set the workflow pipeline according to the emitted dict
Parameters
----------
dim : int
Dimensions of arrays in preview array deque. This is no longer used because array deque can hold arrays of
different size.
maxpreviews : int
Maximum number of preview arrrays that can be held
args
kwargs
"""
sigSetDefaults = QtCore.Signal(dict)
def __init__(self, dim, maxpreviews=None, *args, **kwargs):
super(PreviewViewer, self).__init__(*args, **kwargs)
self.maxpreviews = maxpreviews if maxpreviews is not None else 40
self.dim = dim
self.previews = ArrayDeque(arrayshape=(dim, dim), maxlen=self.maxpreviews)
self.datatrees = deque(maxlen=self.maxpreviews)
self.data = deque(maxlen=self.maxpreviews)
self.slice_numbers = deque(maxlen=self.maxpreviews)
self.setOrientation(QtCore.Qt.Horizontal)
self.functionform = QtGui.QStackedWidget()
self.deleteButton = QtGui.QToolButton(self)
self.deleteButton.setToolTip('Delete this preview')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("xicam/gui/icons_36.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.deleteButton.setIcon(icon)
self.setPipelineButton = QtGui.QToolButton(self)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("xicam/gui/icons_45.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setPipelineButton.setIcon(icon)
self.setPipelineButton.setToolTip('Set as pipeline')
ly = QtGui.QVBoxLayout()
ly.setContentsMargins(0, 0, 0, 0)
ly.setSpacing(0)
ly.addWidget(self.functionform)
h = QtGui.QHBoxLayout()
h.setContentsMargins(0, 0, 0, 0)
h.addWidget(self.setPipelineButton)
h.addWidget(self.deleteButton)
ly.addLayout(h)
panel = QtGui.QWidget(self)
panel.resize(375,480)
panel.setLayout(ly)
self.setPipelineButton.hide()
self.deleteButton.hide()
self.imageview = ImageView(self)
self.imageview.ui.roiBtn.setParent(None)
self.imageview.ui.menuBtn.setParent(None)
self.view_label = QtGui.QLabel(self)
self.view_label.setText('No: ')
self.view_number = QtGui.QSpinBox(self)
self.view_number.setReadOnly(True)
self.setButton = histDialogButton('Set', parent=self)
self.setButton.connectToHistWidget(self.imageview.getHistogramWidget())
self.view_number.setMaximum(5000) # Large enough number
self.imageview.ui.gridLayout.addWidget(self.setButton, 1, 1, 1, 2)
self.imageview.ui.gridLayout.addWidget(self.view_label, 2, 1, 1, 1)
self.imageview.ui.gridLayout.addWidget(self.view_number, 2, 2, 1, 1)
self.setCurrentIndex = self.imageview.setCurrentIndex
self.addWidget(panel)
self.addWidget(self.imageview)
self.imageview.sigDeletePressed.connect(self.removePreview)
self.setPipelineButton.clicked.connect(self.defaultsButtonClicked)
self.deleteButton.clicked.connect(self.removePreview)
self.imageview.sigTimeChanged.connect(self.indexChanged)
@ QtCore.Slot(object, object)
def indexChanged(self, index, time):
"""Slot connected to the ImageViews sigChanged"""
try:
self.functionform.setCurrentWidget(self.datatrees[index])
self.view_number.setValue(self.slice_numbers[index])
except IndexError as e:
print('index {} does not exist'.format(index))
# Could be leaking memory if I don't explicitly delete the datatrees that are being removed
# from the previewdata deque but are still in the functionform widget? Hopefully python gc is taking good care of me
def addPreview(self, image, funcdata, slice_number):
"""
Adds a preview
Parameters
----------
image : ndarray
Reconstructed image
funcdata : dict
Dictionary summarizing pipeline used for reconstruction
slice_number : int
Index of sinogram reconstructed
"""
self.deleteButton.show()
self.setPipelineButton.show()
self.previews.appendleft(np.flipud(image))
functree = DataTreeWidget()
functree.setHeaderHidden(False)
functree.setHeaderLabels(['Function', 'Params'])
functree.setData(funcdata, hideRoot=True)
functree.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
functree.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
functree.setColumnWidth(0, 140)
functree.setColumnWidth(1, 235)
self.data.appendleft(funcdata)
self.datatrees.appendleft(functree)
self.slice_numbers.appendleft(slice_number)
self.view_number.setValue(slice_number)
self.functionform.addWidget(functree)
levels = False if len(self.data) > 1 else True
self.imageview.setImage(self.previews, autoRange=False, autoLevels=levels, autoHistogramRange=False)
# disable autoscaling for histogram
hist = self.imageview.getHistogramWidget()
hist.vb.enableAutoRange(hist.vb.YAxis, False)
self.functionform.setCurrentWidget(functree)
def removePreview(self):
"""
Removes the current preview
"""
if len(self.previews) > 0:
idx = self.imageview.currentIndex
self.functionform.removeWidget(self.datatrees[idx])
del self.previews[idx]
del self.datatrees[idx]
del self.data[idx]
del self.slice_numbers[idx]
if len(self.previews) == 0:
self.imageview.clear()
self.deleteButton.hide()
self.setPipelineButton.hide()
else:
self.imageview.setImage(self.previews)
def defaultsButtonClicked(self):
"""
Emits the dict of current preview
"""
current_data = self.data[self.imageview.currentIndex]
self.sigSetDefaults.emit(current_data)
class Preview3DViewer(QtGui.QSplitter):
"""
Viewer class to show 3D reconstruction previews, along with the function pipeline settings for the
corresponding preview
Attributes
----------
volumviewer : widgets,volumeviewers.VolumeViewer
VolumeViewer widget to render 3D preview reconstruction volume
fdata : dict
dict corresponding to the reconstruction functions
pipelinetree : DataTree widget
Datatree for displaying data dict
data : ndarray
Array of reconstructed volume
Signals
-------
sigSetDefaults(dict)
Emits dictionary of preview. Used to set the workflow pipeline according to the emitted dict
"""
sigSetDefaults = QtCore.Signal(dict)
def __init__(self, *args, **kwargs):
| |
# Copyright (C) 2022 <NAME> and <NAME>.
# Schutweg 15a
# 5145 NP Waalwijk
# The Netherlands
# http://www.hbm.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Channel API functional test."""
import os
import sys
import time
import unittest
import HtmlTestRunner
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from src.ghsapi import ghsapi
IP_ADDRESS = "localhost"
PORT_NO = 8006
class TestChannel(unittest.TestCase):
"""Channel API functional test."""
gen = ghsapi.GHS()
@classmethod
def setUpClass(cls):
# run connect api at start of test file
cls.gen.ghs_connect(IP_ADDRESS, PORT_NO)
@classmethod
def tearDownClass(cls):
# run disconnect api at end of test file
cls.gen.ghs_disconnect()
def setUp(self):
# runs before each test
pass
def tearDown(self):
# runs after each test
self.gen.ghs_stop_recording()
time.sleep(2)
self.gen.ghs_set_recorder_enabled("A", "Enable")
# Functions
def test_get_channel_type(self):
"""Test get channel type and it's return value"""
return_var, channel_type = self.gen.ghs_get_channel_type("A", 1)
self.assertEqual(
channel_type in ghsapi.GHSChannelType,
True,
"Failed on get channel type.",
)
self.assertEqual(
return_var,
"OK",
"Failed on get channel type.",
)
def test_get_channel_type_recorder_disabled(self):
"""Test get channel type when recorder is disabled"""
return_var, channel_type = self.gen.ghs_get_channel_type("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get channel type.",
)
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var, channel_type_dis = self.gen.ghs_get_channel_type("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get channel type.",
)
self.assertEqual(
channel_type == channel_type_dis,
True,
"Failed on get channel type when recorder disabled.",
)
def test_get_invalid_channel_type(self):
"""Test get type of invalid channel"""
return_var = self.gen.ghs_get_channel_type("Z", 100)
self.assertEqual(
return_var[0],
"InvalidSlotID",
"Failed on get type of invalid channel.",
)
def test_set_get_channel_name(self):
"""Test set and get channel name"""
return_var = self.gen.ghs_set_channel_name("A", 1, "TestName")
self.assertEqual(
return_var,
"OK",
"Failed on set channel name.",
)
return_var, channel_name = self.gen.ghs_get_channel_name("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get channel name.",
)
self.assertEqual(
channel_name,
"TestName",
"Failed to set correct channel name.",
)
def test_set_invalid_channel_name(self):
"""Test set invalid channel name"""
return_var = self.gen.ghs_set_channel_name("A", 1, 123)
self.assertEqual(
return_var,
"InvalidDataType",
"Failed on set invalid channel name.",
)
def test_set_to_invalid_channel_name(self):
"""Test set name to invalid channel"""
return_var = self.gen.ghs_set_channel_name("Z", 100, "TestName")
self.assertEqual(
return_var,
"InvalidSlotID",
"Failed on set name to invalid channel.",
)
def test_set_channel_name_disabled_recorder(self):
"""Test set channel name of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_set_channel_name("A", 1, "TestName2")
self.assertEqual(
return_var,
"OK",
"Failed on set channel name of disabled recorder.",
)
return_var, channel_name = self.gen.ghs_get_channel_name("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get channel name of disabled recorder.",
)
self.assertEqual(
channel_name,
"TestName2",
"Failed to set correct channel name of disabled recorder.",
)
def test_set_duplicate_channel_name(self):
"""Test set duplicate channel name on two channel"""
return_var = self.gen.ghs_set_channel_name("A", 1, "TestName")
self.assertEqual(
return_var,
"OK",
"Failed on set name to channel 1.",
)
return_var = self.gen.ghs_set_channel_name("A", 2, "TestName")
self.assertEqual(
return_var,
"DuplicateChannelName",
"Failed on set duplicate name to channel 2.",
)
def test_get_invalid_channel_name(self):
"""Test get invalid channel's name"""
return_var = self.gen.ghs_get_channel_name("Z", 100)
self.assertEqual(
return_var[0],
"InvalidSlotID",
"Failed on get name of invalid channel.",
)
def test_get_channel_name_disabled_recorder(self):
"""Test get channel name of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_get_channel_name("A", 1)
self.assertEqual(
return_var[0],
"OK",
"Failed on get channel name of disabled recorder.",
)
def test_get_channel_name_type(self):
"""Test get channel name type"""
return_var, channel_name = self.gen.ghs_get_channel_name("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get channel name.",
)
self.assertEqual(
isinstance(channel_name, str),
True,
"Failed on get channel name type.",
)
def test_set_get_channel_storage(self):
"""Test set and get channel storage"""
return_var = self.gen.ghs_set_channel_storage_enabled("A", 1, "Enable")
self.assertEqual(
return_var,
"OK",
"Failed on set channel storage.",
)
return_var, channel_storage = self.gen.ghs_get_channel_storage_enabled(
"A", 1
)
self.assertEqual(
return_var,
"OK",
"Failed on get channel storage.",
)
self.assertEqual(
channel_storage,
"Enable",
"Failed to set correct channel storage.",
)
def test_set_invalid_channel_storage(self):
"""Test set storage of invalid channel"""
return_var = self.gen.ghs_set_channel_storage_enabled(
"Z", 100, "Enable"
)
self.assertEqual(
return_var,
"InvalidSlotID",
"Failed on set storage of invalid channel.",
)
def test_incorrect_set_channel_storage(self):
"""Test incorrectly set storage of channel"""
return_var = self.gen.ghs_set_channel_storage_enabled("A", 1, "On")
self.assertEqual(
return_var,
"InvalidDataType",
"Failed on incorrect setting storage of channel.",
)
def test_set_channel_storage_not_idle(self):
"""Test set storage of channel system not idle"""
return_var = self.gen.ghs_set_storage_location("Local1")
self.assertEqual(
return_var,
"OK",
"Failed on set mainframe as storage location.",
)
return_var = self.gen.ghs_start_recording()
self.assertEqual(
return_var,
"OK",
"Failed to start a recording.",
)
return_var = self.gen.ghs_set_channel_storage_enabled("A", 1, "Enable")
self.assertEqual(
return_var,
"SystemNotIdle",
"Failed on set storage of channel when system not idle.",
)
return_var = self.gen.ghs_stop_recording()
self.assertEqual(
return_var,
"OK",
"Failed to stop recording.",
)
time.sleep(2)
def test_set_channel_storage_disabled_recorder(self):
"""Test set channel storage of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_set_channel_storage_enabled("A", 1, "Enable")
self.assertEqual(
return_var,
"OK",
"Failed on set channel storage of disabled recorder.",
)
return_var, channel_storage = self.gen.ghs_get_channel_storage_enabled(
"A", 1
)
self.assertEqual(
return_var,
"OK",
"Failed on get channel storage of disabled recorder.",
)
self.assertEqual(
channel_storage,
"Enable",
"Failed to set correct channel storage of disabled recorder.",
)
def test_get_invalid_channel_storage(self):
"""Test get storage of invalid channel"""
return_var = self.gen.ghs_get_channel_storage_enabled("Z", 100)
self.assertEqual(
return_var[0],
"InvalidSlotID",
"Failed on get storage of invalid channel.",
)
def test_get_channel_storage_disabled_recorder(self):
"""Test get channel storage of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_get_channel_storage_enabled("A", 1)
self.assertEqual(
return_var[0],
"OK",
"Failed on get channel storage of disabled recorder.",
)
def test_get_channel_storage_valid(self):
"""Test get channel storage and check return value"""
return_var, enabled = self.gen.ghs_get_channel_storage_enabled("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get channel storage.",
)
self.assertEqual(
enabled in ghsapi.GHSEnableDisable,
True,
"Failed on check correct channel storage value.",
)
def test_cmd_zeroing(self):
"""Test set zeroing of channel"""
return_var = self.gen.ghs_cmd_zeroing("A", 1, "Enable")
self.assertEqual(
return_var,
"OK",
"Failed on set channel zeroing.",
)
def test_cmd_zeroing_invalid_channel(self):
"""Test set zeroing of invalid channel"""
return_var = self.gen.ghs_cmd_zeroing("Z", 100, "Enable")
self.assertEqual(
return_var,
"InvalidSlotID",
"Failed on set invalid channel zeroing.",
)
def test_cmd_incorrect_zeroing(self):
"""Test set incorrect zeroing of channel"""
return_var = self.gen.ghs_cmd_zeroing("A", 1, "On")
self.assertEqual(
return_var,
"InvalidDataType",
"Failed on set incorrect channel zeroing.",
)
def test_cmd_zeroing_not_idle(self):
"""Test set zeroing of channel system not idle"""
return_var = self.gen.ghs_set_storage_location("Local1")
self.assertEqual(
return_var,
"OK",
"Failed on set mainframe as storage location.",
)
return_var = self.gen.ghs_start_recording()
self.assertEqual(
return_var,
"OK",
"Failed to start a recording.",
)
return_var = self.gen.ghs_cmd_zeroing("A", 1, "Enable")
self.assertEqual(
return_var,
"OK",
"Failed on set zeroing of channel when system not idle.",
)
return_var = self.gen.ghs_stop_recording()
self.assertEqual(
return_var,
"OK",
"Failed to stop recording.",
)
time.sleep(2)
# Analog module
def test_set_get_trigger_settings(self):
"""Test set and get trigger settings"""
return_var = self.gen.ghs_set_trigger_settings(
"A", 1, "Dual", 10.0, 20.0, 30.0, "RisingEdge"
)
self.assertEqual(
return_var,
"OK",
"Failed on set trigger settings.",
)
(
return_var,
trigger_mode,
primary_level,
secondary_level,
hysteresis,
direction,
) = self.gen.ghs_get_trigger_settings("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get trigger settings.",
)
self.assertEqual(
trigger_mode,
"Dual",
"Failed to set trigger settings - trigger mode.",
)
self.assertEqual(
primary_level,
10.0,
"Failed to set trigger settings - primary level.",
)
self.assertEqual(
secondary_level,
20.0,
"Failed to set trigger settings - secondary level.",
)
self.assertEqual(
hysteresis,
30.0,
"Failed to set trigger settings - hysteresis.",
)
self.assertEqual(
direction,
"RisingEdge",
"Failed to set trigger settings - direction.",
)
def test_set_trigger_settings_invalid_channel(self):
"""Test set trigger settings on invalid channel"""
return_var = self.gen.ghs_set_trigger_settings(
"Z", 100, "Dual", 10.0, 20.0, 30.0, "RisingEdge"
)
self.assertEqual(
return_var,
"InvalidSlotID",
"Failed on set trigger settings on invalid channel.",
)
def test_set_trigger_settings_non_analog_channel(self):
"""Test set trigger | |
self.path['LEV'][i,0,:ilev+1]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev+1], self.path['LEV'][i,1,:ilev+1]), self.path['FREE'][i,1,:])
W = self.airfoil_downwash(circulation, xw, zw, i)
# Compute A0, A1 coefficients
A0 = - 1/pi * np.trapz(W/Uinf , theta_panel)
A1 = 2/pi * np.trapz(W/Uinf*np.cos(theta_panel), theta_panel)
# Get f for Newton-Raphson
circulation_bound = Uinf*chord*pi*(A0 + A1/2)
f = circulation_bound + np.sum(self.circulation['TEV'][:itev+1]) + np.sum(self.circulation['LEV'][:ilev+1]) + \
np.sum(self.circulation['FREE']) - self.circulation['IC']
# We set now gamma_TEV = gamma_TEV + epsilon
self.circulation['TEV'][itev] = shed_vortex_gamma + epsilon
# Get f + delta for Newton-Raphson: we need to compute again W, A0, A1
circulation = np.append(np.append(self.circulation['TEV'][:itev+1], self.circulation['LEV'][:ilev+1]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev+1], self.path['LEV'][i,0,:ilev+1]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev+1], self.path['LEV'][i,1,:ilev+1]), self.path['FREE'][i,1,:])
W = self.airfoil_downwash(circulation, xw, zw, i)
A0 = - 1/pi * np.trapz(W/Uinf , theta_panel)
A1 = 2/pi * np.trapz(W/Uinf*np.cos(theta_panel), theta_panel)
circulation_bound = Uinf*chord*pi*(A0 + A1/2)
fdelta = circulation_bound + np.sum(self.circulation['TEV'][:itev+1]) + np.sum(self.circulation['LEV'][:ilev+1]) + \
np.sum(self.circulation['FREE']) - self.circulation['IC']
# Newton-Raphson:
fprime = (fdelta - f) / epsilon # numerical df/dGamma
shed_vortex_gamma = shed_vortex_gamma - f / fprime # update solution with Newton
self.circulation['TEV'][itev] = shed_vortex_gamma # Restoring TEV circulation
if niter >= self.maxiter:
print('The solution did not converge during the Newton-Raphson iteration')
# break
niter = niter + 1
# Solution after convergenge:
circulation = np.append(np.append(self.circulation['TEV'][:itev+1], self.circulation['LEV'][:ilev+1]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev+1], self.path['LEV'][i,0,:ilev+1]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev+1], self.path['LEV'][i,1,:ilev+1]), self.path['FREE'][i,1,:])
W = self.airfoil_downwash(circulation, xw, zw, i)
A0 = - 1/pi * np.trapz(W/Uinf , theta_panel)
A1 = 2/pi * np.trapz(W/Uinf*np.cos(theta_panel), theta_panel)
self.fourier[i,0,:2] = A0, A1
self.circulation['bound'][itev] = Uinf*chord*pi*(A0 + A1/2)
# Now we compute the rest of fourier coefficients (from A2 to An)
for n in range(2,self.Ncoeffs):
self.fourier[i,0,n] = 2/pi * np.trapz(W/Uinf*np.cos(n*theta_panel), theta_panel)
for n in range(self.Ncoeffs): # and their derivatives
self.fourier[i,1,n] = (self.fourier[i,0,n] - self.fourier[i-1,0,n])/dt
elif self.method == 'Faure': # without iterating
# Contribution of existing vortices
circulation = np.append(np.append(self.circulation['TEV'][:itev], self.circulation['LEV'][:ilev]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev], self.path['LEV'][i,0,:ilev]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev], self.path['LEV'][i,1,:ilev]), self.path['FREE'][i,1,:])
T1 = self.airfoil_downwash(circulation, xw, zw, i)
# We compute the intensity of the shed TEV
xa, za = self.path['airfoil_gamma_points'][i,0,:], self.path['airfoil_gamma_points'][i,1,:]
xtevi, ztevi = self.path['TEV'][i,0,itev], self.path['TEV'][i,1,itev]
utevi, wtevi = self.induced_velocity(np.array([1]), np.array([xtevi]), np.array([ztevi]), xa, za)
ut = utevi*np.cos(self.alpha[i]) - wtevi*np.sin(self.alpha[i]) # tangential to chord
un = utevi*np.sin(self.alpha[i]) + wtevi*np.cos(self.alpha[i]) # normal to chord
T2 = self.airfoil['detadx_panel']*ut - un
I1 = np.trapz(T1*(np.cos(theta_panel)-1), theta_panel)
I2 = np.trapz(T2*(np.cos(theta_panel)-1), theta_panel)
self.circulation['TEV'][itev] = - (I1 + np.sum(self.circulation['TEV'][:itev]) \
+ np.sum(self.circulation['LEV'][:ilev]) + np.sum(self.circulation['FREE']) \
- self.circulation['IC'])/(1+I2)
self.circulation['bound'][itev] = I1 + self.circulation['TEV'][itev]*I2
J1 = - 1/np.pi*np.trapz(T1, theta_panel)
J2 = - 1/np.pi*np.trapz(T2, theta_panel)
W = T1 + self.circulation['TEV'][itev]*T2
# self.fourier[i,0,0] = J1 + self.circulation['TEV'][itev]*J2
self.fourier[i,0,0] = - 1/pi * np.trapz(W/Uinf, theta_panel)
for n in range(1,self.Ncoeffs):
self.fourier[i,0,n] = 2/pi * np.trapz(W/Uinf*np.cos(n*theta_panel), theta_panel)
for n in range(self.Ncoeffs): # and their derivatives
self.fourier[i,1,n] = (self.fourier[i,0,n] - self.fourier[i-1,0,n])/dt
self.LESP_prev[itev] = self.fourier[i,0,0] # LESP before being modulated (if it is the case)
'''--------------------------------------------------------------'''
'''-------------------- TEV, LEV computation --------------------'''
'''--------------------------------------------------------------'''
if abs(self.fourier[i,0,0]) >= abs(LESPcrit): # if A0 exceeds the LESPcrit: shedding occurs
LEV_shed_gamma = self.circulation['TEV'][itev] # initial guess for Newton
TEV_shed_gamma = self.circulation['TEV'][itev] # initial guess for Newton
LEV_shed[i] = ilev # indicator for knowing when shedding occurs
# LEV_shed will be 'ilev' when shedding occurs and '-1' otherwise
# Compute the position of the shed LEV
if LEV_shed[i] == 0: # First LEV
#Shedding of the Leading Edge Vortex (TEV)
self.path['LEV'][i,:,ilev] = self.path['airfoil'][i,:,0]
elif LEV_shed[i] > 0:
if LEV_shed[i-1] != -1: # if a lev has been shed previously
# Shedding of the Leading Edge Vortex (TEV)
# (X,Z)_lev_i = (X,Z)_LE + 1/3[(X,Z)_lev_i-1 - (X,Z)_LE]
# At 1/3 of the distance between the shedding edge and the
# previously shed vortex (in this dt).
self.path['LEV'][i,:,ilev] = self.path['airfoil'][i,:,0] + \
1/3*(self.path['LEV'][i,:,ilev-1] - self.path['airfoil'][i,:,0])
else: # not shed previously -> place it on the LE
self.path['LEV'][i,:,ilev] = self.path['airfoil'][i,:,0]
if self.fourier[i,0,0] < 0: # if A0 < 0:
LESPcrit = -abs(LESPcrit)
else:
LESPcrit = abs(LESPcrit)
if self.method == 'Ramesh':
f1, f2 = 0.1, 0.1 #initializing for the while
niter = 1
# Newton method for nonlinear systems
while (abs(f1) > self.maxerror or abs(f2) > self.maxerror) and \
niter < self.maxiter:
self.circulation['TEV'][itev] = TEV_shed_gamma #initial guess
self.circulation['LEV'][ilev] = LEV_shed_gamma #initial guess
circulation = np.append(np.append(self.circulation['TEV'][:itev+1], self.circulation['LEV'][:ilev+1]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev+1], self.path['LEV'][i,0,:ilev+1]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev+1], self.path['LEV'][i,1,:ilev+1]), self.path['FREE'][i,1,:])
W = self.airfoil_downwash(circulation, xw, zw, i)
# Compute A0, A1 coefficients
A0 = - 1/pi * np.trapz(W/Uinf , theta_panel)
A1 = 2/pi * np.trapz(W/Uinf*np.cos(theta_panel), theta_panel)
# Get f1 for Newton method
cbound = Uinf*chord*pi*(A0 + A1/2)
f1 = cbound + np.sum(self.circulation['TEV'][:itev+1]) + np.sum(self.circulation['LEV'][:ilev+1]) + \
np.sum(self.circulation['FREE']) - self.circulation['IC']
# Get f2 for Newton method
f2 = LESPcrit - A0
# Now we need to compute f1delta and f2delta
self.circulation['TEV'][itev] = TEV_shed_gamma + epsilon #initial guess
self.circulation['LEV'][ilev] = LEV_shed_gamma #initial guess
circulation = np.append(np.append(self.circulation['TEV'][:itev+1], self.circulation['LEV'][:ilev+1]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev+1], self.path['LEV'][i,0,:ilev+1]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev+1], self.path['LEV'][i,1,:ilev+1]), self.path['FREE'][i,1,:])
W = self.airfoil_downwash(circulation, xw, zw, i)
# Compute A0, A1 coefficients
A0 = - 1/pi * np.trapz(W/Uinf , theta_panel)
A1 = 2/pi * np.trapz(W/Uinf*np.cos(theta_panel), theta_panel)
# Get f1 for Newton method
circulation_bound = Uinf*chord*pi*(A0 + A1/2)
f1_delta_TEV = circulation_bound + np.sum(self.circulation['TEV'][:itev+1]) + np.sum(self.circulation['LEV'][:ilev+1]) + \
np.sum(self.circulation['FREE']) - self.circulation['IC']
f2_delta_TEV = LESPcrit - A0
self.circulation['TEV'][itev] = TEV_shed_gamma #initial guess
self.circulation['LEV'][ilev] = LEV_shed_gamma + epsilon #initial guess
circulation = np.append(np.append(self.circulation['TEV'][:itev+1], self.circulation['LEV'][:ilev+1]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev+1], self.path['LEV'][i,0,:ilev+1]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev+1], self.path['LEV'][i,1,:ilev+1]), self.path['FREE'][i,1,:])
W = self.airfoil_downwash(circulation, xw, zw, i)
# Compute A0, A1 coefficients
A0 = - 1/pi * np.trapz(W/Uinf , theta_panel)
A1 = 2/pi * np.trapz(W/Uinf*np.cos(theta_panel), theta_panel)
# Get f1 for Newton method
circulation_bound = Uinf*chord*pi*(A0 + A1/2)
f1_delta_LEV = circulation_bound + np.sum(self.circulation['TEV'][:itev+1]) + np.sum(self.circulation['LEV'][:ilev+1]) + \
np.sum(self.circulation['FREE']) - self.circulation['IC']
f2_delta_LEV = LESPcrit - A0
# Build the Jacobian
# J = [J11 J12] = [df1/dGamma_LEV df1/dGamma_TEV]
# [J21 J22] [df2/dGamma_LEV df2/dGamma_TEV]
# J11 = df1/dGamma_LEV = (f1(Gamma_LEV+eps) - f1(Gamma_LEV))/(Gamma_LEV+eps - Gamma_LEV)
# J12 = df1/dGamma_TEV = (f1(Gamma_TEV+eps) - f1(Gamma_TEV))/(Gamma_TEV+eps - Gamma_TEV)
# J21 = df2/dGamma_LEV = (f2(Gamma_LEV+eps) - f2(Gamma_LEV))/(Gamma_LEV+eps - Gamma_LEV)
# J22 = df2/dGamma_TEV = (f2(Gamma_TEV+eps) - f2(Gamma_TEV))/(Gamma_TEV+eps - Gamma_TEV)
# Where all the denominators are equal to epsilon -> Gamma+eps-Gamma
# Newton for nonlinear systems:
# J*p_k = -f -> p_k = - J^-1 *f (solve a linear system at each iteration)
# p_k is the direction of search in the Newton method for nonlinear systems
# [Gamma_LEV, Gamma_TEV]_k+1 = [Gamma_LEV, Gamma_TEV]_k + pk
J11 = (f1_delta_LEV - f1) / epsilon
J12 = (f1_delta_TEV - f1) / epsilon
J21 = (f2_delta_LEV - f2) / epsilon
J22 = (f2_delta_TEV - f2) / epsilon
J = np.array([[J11, J12], [J21, J22]])
pk = - np.linalg.solve(J, np.array([f1,f2])) #direction of search
shed_gamma = np.array([LEV_shed_gamma, TEV_shed_gamma]) + pk
LEV_shed_gamma = shed_gamma[0]
TEV_shed_gamma = shed_gamma[1]
self.circulation['TEV'][itev] = TEV_shed_gamma
self.circulation['LEV'][ilev] = LEV_shed_gamma
self.circulation['bound'][itev] = cbound
if niter >= self.maxiter:
print('The solution did not converge when solving the LEV-TEV nonlinear system')
# break
niter = niter + 1
# Solution after convergence:
circulation = np.append(np.append(self.circulation['TEV'][:itev+1], self.circulation['LEV'][:ilev+1]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev+1], self.path['LEV'][i,0,:ilev+1]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev+1], self.path['LEV'][i,1,:ilev+1]), self.path['FREE'][i,1,:])
W = self.airfoil_downwash(circulation, xw, zw, i)
A0 = - 1/pi * np.trapz(W/Uinf , theta_panel)
A1 = 2/pi * np.trapz(W/Uinf*np.cos(theta_panel), theta_panel)
self.fourier[i,0,:2] = A0, A1
self.circulation['bound'][itev] = Uinf*chord*pi*(A0 + A1/2)
# Now we compute the rest of fourier coefficients (from A2 to An)
for n in range(2,self.Ncoeffs):
self.fourier[i,0,n] = 2/pi * np.trapz(W/Uinf*np.cos(n*theta_panel), theta_panel)
# Not updating the derivatives since A0(t) is no longer differentiable
# Use the derivatives of the coefficients before the TEV
# for n in range(self.Ncoeffs): # and their derivatives
# self.fourier[i,1,n] = (self.fourier[i,0,n] - self.fourier[i-1,0,n])/dt
elif self.method == 'Faure': # without iterating
# Contribution of existing vortices
circulation = np.append(np.append(self.circulation['TEV'][:itev], self.circulation['LEV'][:ilev]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev], self.path['LEV'][i,0,:ilev]), self.path['FREE'][i,0,:])
zw = np.append(np.append(self.path['TEV'][i,1,:itev], self.path['LEV'][i,1,:ilev]), self.path['FREE'][i,1,:])
T1 = self.airfoil_downwash(circulation, xw, zw, i)
# We compute the intensity of the shed TEV and LEV
xa, za = self.path['airfoil_gamma_points'][i,0,:], self.path['airfoil_gamma_points'][i,1,:]
xtevi, ztevi = self.path['TEV'][i,0,itev], self.path['TEV'][i,1,itev]
utevi, wtevi = self.induced_velocity(np.array([1]), np.array([xtevi]), np.array([ztevi]), xa, za)
ut_tevi = utevi*np.cos(self.alpha[i]) - wtevi*np.sin(self.alpha[i]) # tangential to chord
un_tevi = | |
<gh_stars>0
from keras.layers import Conv2D
from keras import backend as K
from keras.engine.topology import Layer
from keras.activations import softmax
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
class Weighted_feture_map(Layer):
def __init__(self, filters=1, kernel_size=(1,1), type_flag='patch', **kwargs):
self.filters = filters
self.kernel_size = kernel_size
self.strides = (1, 1)
self.padding = 'valid'
self.data_format = 'channels_last'
self.dilation_rate = (1, 1)
self.activation = None
self.use_bias = False
self.kernel_initializer = 'he_normal'
#self.kernel_initializer = 'ones'
self.bias_initializer = 'zeros'
self.kernel_regularizer = None
self.bias_regularizer = None
self.activity_regularizer = None
self.kernel_constraint = None
self.bias_constraint = None
self.type_flag = type_flag
super(Weighted_feture_map, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
self.size_of_inputs = len(input_shape)
self.input_shape_ = input_shape[0]
self.output_filter = 1
# print (input_shape)
self.kernels = []
self.bias = []
input_dim = input_shape[-1][-1]
kernel_shape = self.kernel_size + (input_dim, self.filters)
for input_idx in range(self.size_of_inputs):
kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel'+str(input_idx),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernels.append(kernel)
if self.use_bias == True:
bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias'+str(input_idx),
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.bias.append(bias)
super(Weighted_feture_map, self).build(input_shape) # Be sure to call this somewhere!
def call(self, inputs):
# print inputs
add_output_list = None
for input_idx in range(self.size_of_inputs):
if input_idx==0:
outputs = K.conv2d(
inputs[input_idx],
self.kernels[input_idx],
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias[input_idx],
data_format=self.data_format)
# print K.print_tensor(outputs, message='output_conv = ')
if self.type_flag == 'patch':
# print 'patch case'
add_output_list = K.mean(outputs,axis=[1,2])
#print K.print_tensor(add_output_list, message='add_output = ')
else:
# print 'pixel case'
add_output_list = K.mean(outputs,axis=-1)
add_output_list = K.reshape(add_output_list, (-1,self.input_shape_[1],self.input_shape_[2],1))
#print K.print_tensor(add_output_list, message='add_output = ')
else:
outputs = K.conv2d(
inputs[input_idx],
self.kernels[input_idx],
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias[input_idx],
data_format=self.data_format)
#print K.print_tensor(outputs, message='output_conv = ')
if self.type_flag == 'patch':
add_output = K.mean(outputs,axis=[1,2])
add_output_list = K.concatenate([add_output_list,add_output],axis=-1)
#print K.print_tensor(add_output_list, message='add_output = ')
else:
add_output = K.mean(outputs,axis=-1)
#print 'add_output shape print', K.print_tensor(add_output, message='add_output = ')
add_output = K.reshape(add_output,(-1,self.input_shape_[1],self.input_shape_[2],1))
add_output_list = K.concatenate([add_output_list,add_output],axis=-1)
#print K.print_tensor(add_output_list, message='add_output = ')
softmax_output = softmax(add_output_list, axis=-1)
#print 'sotmax', K.print_tensor(softmax_output, message='softmax_output = ')
if self.type_flag == 'patch':
softmax_col_shape = (-1,)
for input_idx in range(self.size_of_inputs):
if input_idx == 0:
softmax_col = softmax_output[:,input_idx]
softmax_col = K.reshape(softmax_col, softmax_col_shape)
output = inputs[input_idx] * softmax_col
#print 'get output 1', K.print_tensor(output, message='softmax_output = ')
else:
softmax_col = softmax_output[:,input_idx]
softmax_col = K.reshape(softmax_col, softmax_col_shape)
output += inputs[input_idx] * softmax_col
#print 'get output 2', K.print_tensor(output, message='softmax_output = ')
#print 'output shape'
#print K.print_tensor(output, message='output = ')
else:
#softmax_col_shape = (-1,)
for input_idx in range(self.size_of_inputs):
if input_idx == 0:
softmax_col = softmax_output[:,:,:,input_idx]
softmax_col = K.reshape(softmax_col, (-1,self.input_shape_[1],self.input_shape_[2],1))
output = inputs[input_idx] * softmax_col
#print 'get output 1', K.print_tensor(output, message='softmax_output = ')
else:
softmax_col = softmax_output[:,:,:,input_idx]
softmax_col = K.reshape(softmax_col, (-1,self.input_shape_[1],self.input_shape_[2],1))
output += inputs[input_idx] * softmax_col
#print 'get output 2', K.print_tensor(output, message='softmax_output = ')
return output
class NAIDE_Conv2D_A(Conv2D):
"""
Mask Shape :
filter_shape = (3,3)
1 1 1
1 0 1
1 1 1
For various filter size
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
##################################################################
shape = kernel_shape
self.mask = np.ones(shape)
k = shape[0]
self.mask[k/2,k/2,:] = 0
self.mask = K.variable(self.mask)
##################################################################
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs):
if self.rank == 2:
outputs = K.conv2d(
inputs,
self.kernel*self.mask, ### add mask multiplication
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def get_config(self):
config = super(Conv2D, self).get_config()
config.pop('rank')
return config
class NAIDE_Conv2D_B(Conv2D):
"""
Mask Shape :
filter_shape = (5,5)
1 1 1 1 1
1 0 0 0 1
1 0 1 0 1
1 0 0 0 1
1 1 1 1 1
For various filter size
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
##################################################################
shape = kernel_shape
self.mask = np.ones(shape)
k = shape[0]
self.mask[1:k-1,1:k-1,:] = 0
self.mask[k//2,k//2,:] = 1
self.mask = K.variable(self.mask)
##################################################################
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs):
if self.rank == 2:
outputs = K.conv2d(
inputs,
self.kernel*self.mask, ### add mask multiplication
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def get_config(self):
config = super(Conv2D, self).get_config()
config.pop('rank')
return config
class NAIDE_Conv2D_DDR(Conv2D):
"""
Mask Shape :
filter_shape = (3,3)
0 1 0
1 0 1
0 1 0
Only for (3,3) filter shape
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=(3,3), # only for (3,3)
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
##################################################################
shape = kernel_shape
self.mask = np.zeros((shape))
k = shape[0]
for i in range(k):
for j in range(k):
if (i+j) % 2 == 1:
self.mask[i,j] = 1
self.mask = K.variable(self.mask)
##################################################################
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs):
if self.rank == 2:
outputs = K.conv2d(
inputs,
self.kernel*self.mask, ### add mask multiplication
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def get_config(self):
config = super(Conv2D, self).get_config()
config.pop('rank')
return config
class NAIDE_Conv2D_PUMP(Conv2D):
"""
Mask Shape :
filter_shape = (3,3)
1 0 1
0 1 1
1 0 1
Only for (3,3) filter shape
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=(3,3), # only for (3,3)
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
##################################################################
shape = kernel_shape
self.mask = np.zeros((shape))
k = shape[0]
for i in range(k):
for j in range(k):
if (i+j) % 2 == 0:
self.mask[i,j] = 1
self.mask | |
<filename>typings/bl_ui/properties_physics_dynamicpaint.py<gh_stars>1-10
import sys
import typing
import bpy_types
class PHYSICS_UL_dynapaint_surfaces(bpy_types.UIList, bpy_types._GenericUI):
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw_item(self, _context, layout, _data, item, icon, _active_data,
_active_propname, _index):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PhysicButtonsPanel:
bl_context = None
''' '''
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
class PHYSICS_PT_dp_brush_source(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_source_color_ramp(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_velocity(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_velocity_color_ramp(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_velocity_smudge(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options | |
@type top: int
@type right: int
@type bottom: int
"""
return self._append(emr._ELLIPSE(((left, top), (right, bottom))))
def Rectangle(self, left, top, right, bottom):
"""
Draw a rectangle using the current pen.
@param left: x position of left side of ellipse bounding box.
@param top: y position of top side of ellipse bounding box.
@param right: x position of right edge of ellipse bounding box.
@param bottom: y position of bottom edge of ellipse bounding box.
@return: true if rectangle was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
"""
return self._append(emr._RECTANGLE(((left, top), (right, bottom))))
def RoundRect(self, left, top, right, bottom, cornerwidth, cornerheight):
"""
Draw a rectangle with rounded corners using the current pen.
@param left: x position of left side of ellipse bounding box.
@param top: y position of top side of ellipse bounding box.
@param right: x position of right edge of ellipse bounding box.
@param bottom: y position of bottom edge of ellipse bounding box.
@param cornerwidth: width of the ellipse that defines the roundness of the corner.
@param cornerheight: height of ellipse
@return: true if rectangle was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type cornerwidth: int
@type cornerheight: int
"""
return self._append(
emr._ROUNDRECT((((left, top), (right, bottom)), cornerwidth, cornerheight))
)
def Arc(self, left, top, right, bottom, xstart, ystart, xend, yend):
"""
Draw an arc of an ellipse. The ellipse is specified by its bounding
rectange and two lines from its center to indicate the start and end
angles. left, top, right, bottom describe the bounding rectangle of
the ellipse. The start point given by xstart,ystert defines a ray
from the center of the ellipse through the point and out to infinity.
The point at which this ray intersects the ellipse is the starting
point of the arc. Similarly, the infinite radial ray from the center
through the end point defines the end point of the ellipse. The arc
is drawn in a counterclockwise direction, and if the start and end
rays are coincident, a complete ellipse is drawn.
@param left: x position of left edge of arc box.
@param top: y position of top edge of arc box.
@param right: x position of right edge of arc box.
@param bottom: y position bottom edge of arc box.
@param xstart: x position of arc start.
@param ystart: y position of arc start.
@param xend: x position of arc end.
@param yend: y position of arc end.
@return: true if arc was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type xstart: int
@type ystart: int
@type xend: int
@type yend: int
"""
return self._append(
emr._ARC(((left, top), (right, bottom)), xstart, ystart, xend, yend)
)
def Chord(self, left, top, right, bottom, xstart, ystart, xend, yend):
"""
Draw a chord of an ellipse. A chord is a closed region bounded by an
arc and the [straight] line between the two points that define the arc
start and end. The arc start and end points are defined as in L{Arc}.
@param left: x position of left edge of arc box.
@param top: y position of top edge of arc box.
@param right: x position of right edge of arc box.
@param bottom: y position bottom edge of arc box.
@param xstart: x position of arc start.
@param ystart: y position of arc start.
@param xend: x position of arc end.
@param yend: y position of arc end.
@return: true if arc was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type xstart: int
@type ystart: int
@type xend: int
@type yend: int
"""
return self._append(
emr._CHORD(((left, top), (right, bottom)), xstart, ystart, xend, yend)
)
def Pie(self, left, top, right, bottom, xstart, ystart, xend, yend):
"""
Draw a pie slice of an ellipse. The ellipse is specified as in
L{Arc}, and it is filled with the current brush.
@param left: x position of left edge of arc box.
@param top: y position of top edge of arc box.
@param right: x position of right edge of arc box.
@param bottom: y position bottom edge of arc box.
@param xstart: x position of arc start.
@param ystart: y position of arc start.
@param xend: x position of arc end.
@param yend: y position of arc end.
@return: true if arc was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type xstart: int
@type ystart: int
@type xend: int
@type yend: int
"""
if xstart == xend and ystart == yend:
# Fix for OpenOffice: doesn't render a full ellipse when
# the start and end angles are the same
e = emr._ELLIPSE(((left, top), (right, bottom)))
else:
e = emr._PIE(((left, top), (right, bottom)), xstart, ystart, xend, yend)
return self._append(e)
def PolyBezier(self, points):
"""
Draw cubic Bezier curves using the list of points as both endpoints
and control points. The first point is used as the starting point,
the second and thrird points are control points, and the fourth point
is the end point of the first curve. Subsequent curves need three
points each: two control points and an end point, as the ending point
of the previous curve is used as the starting point for the next
curve.
@param points: list of x,y tuples that are either end points or control points
@return: true if bezier curve was successfully rendered.
@rtype: int
@type points: tuple
"""
return self._appendOptimize16(points, emr._POLYBEZIER16, emr._POLYBEZIER)
def BeginPath(self):
"""
Begin defining a path. Any previous unclosed paths are discarded.
@return: true if successful.
@rtype: int
"""
# record next record number as first item in path
self.pathstart = len(self.records)
return self._append(emr._BEGINPATH())
def EndPath(self):
"""
End the path definition.
@return: true if successful.
@rtype: int
"""
return self._append(emr._ENDPATH())
def MoveTo(self, x, y):
"""
Move the current point to the given position and implicitly begin a
new figure or path.
@param x: new x position.
@param y: new y position.
@return: true if position successfully changed (can this fail?)
@rtype: int
@type x: int
@type y: int
"""
return self._append(emr._MOVETOEX(x, y))
def LineTo(self, x, y):
"""
Draw a straight line using the current pen from the current point to
the given position.
@param x: x position of line end.
@param y: y position of line end.
@return: true if line is drawn (can this fail?)
@rtype: int
@type x: int
@type y: int
"""
return self._append(emr._LINETO(x, y))
def PolylineTo(self, points):
"""
Draw a sequence of connected lines starting from the current
position and update the position to the final point in the list.
@param points: list of x,y tuples
@return: true if polyline is successfully rendered.
@rtype: int
@type points: tuple
"""
return self._appendOptimize16(points, emr._POLYLINETO16, emr._POLYLINETO)
def ArcTo(self, left, top, right, bottom, xstart, ystart, xend, yend):
"""
Draw an arc and update the current position. The arc is drawn as
described in L{Arc}, but in addition the start of the arc will be
connected to the previous position and the current position is updated
to the end of the arc so subsequent path operations such as L{LineTo},
L{PolylineTo}, etc. will connect to the end.
B{Note:} Currently appears unsupported in OpenOffice.
@param left: x position of left edge of arc box.
@param top: y position of top edge of arc box.
@param right: x position of right edge of arc box.
@param bottom: y position bottom edge of arc box.
@param xstart: x position of arc start.
@param ystart: y position of arc start.
@param xend: x position of arc end.
@param yend: y position of arc end.
@return: true if arc was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: | |
+ m.x117 == 0.2)
m.c21 = Constraint(expr= m.x19 + m.x22 + m.x25 + m.x28 - m.x117 + m.x118 == 0.8)
m.c22 = Constraint(expr= - m.x3 + m.x30 + m.x33 + m.x36 + m.x39 + m.x42 - m.x45 - m.x60 - m.x75 - m.x119 + m.x120 == 0)
m.c23 = Constraint(expr= - m.x4 + m.x31 + m.x34 + m.x37 + m.x40 + m.x43 - m.x46 - m.x61 - m.x76 - m.x120 + m.x121 == 0)
m.c24 = Constraint(expr= - m.x6 - m.x18 - m.x30 + m.x45 + m.x48 + m.x51 + m.x54 + m.x57 - m.x63 - m.x78 - m.x122
+ m.x123 == 0)
m.c25 = Constraint(expr= - m.x7 - m.x19 - m.x31 + m.x46 + m.x49 + m.x52 + m.x55 + m.x58 - m.x64 - m.x79 - m.x123
+ m.x124 == 0)
m.c26 = Constraint(expr= - m.x9 - m.x21 - m.x33 - m.x48 + m.x60 + m.x63 + m.x66 + m.x69 + m.x72 - m.x81 - m.x125
+ m.x126 == 0)
m.c27 = Constraint(expr= - m.x10 - m.x22 - m.x34 - m.x49 + m.x61 + m.x64 + m.x67 + m.x70 + m.x73 - m.x82 - m.x126
+ m.x127 == 0)
m.c28 = Constraint(expr= - m.x12 - m.x24 - m.x36 - m.x51 - m.x66 + m.x75 + m.x78 + m.x81 + m.x84 + m.x87 - m.x128
+ m.x129 == 0)
m.c29 = Constraint(expr= - m.x13 - m.x25 - m.x37 - m.x52 - m.x67 + m.x76 + m.x79 + m.x82 + m.x85 + m.x88 - m.x129
+ m.x130 == 0)
m.c30 = Constraint(expr= - m.x39 - m.x54 - m.x69 - m.x84 - m.x131 + m.x132 == -0.17)
m.c31 = Constraint(expr= - m.x40 - m.x55 - m.x70 - m.x85 - m.x132 + m.x133 == -0.73)
m.c32 = Constraint(expr= - m.x15 - m.x27 - m.x42 - m.x57 - m.x72 - m.x87 - m.x134 + m.x135 == -0.65)
m.c33 = Constraint(expr= - m.x16 - m.x28 - m.x43 - m.x58 - m.x73 - m.x88 - m.x135 + m.x136 == -0.65)
m.c34 = Constraint(expr=m.x90*m.x120 - (m.x89*m.x119 + m.x92*m.x45 + m.x95*m.x60 + m.x98*m.x75 - (m.x89*m.x30 + m.x89*
m.x33 + m.x89*m.x36 + m.x89*m.x39 + m.x89*m.x42)) - 0.4*m.x3 == 0)
m.c35 = Constraint(expr=m.x91*m.x121 - (m.x90*m.x120 + m.x93*m.x46 + m.x96*m.x61 + m.x99*m.x76 - (m.x90*m.x31 + m.x90*
m.x34 + m.x90*m.x37 + m.x90*m.x40 + m.x90*m.x43)) - 0.4*m.x4 == 0)
m.c36 = Constraint(expr=m.x93*m.x123 - (m.x92*m.x122 + m.x89*m.x30 + m.x95*m.x63 + m.x98*m.x78 - (m.x92*m.x45 + m.x92*
m.x48 + m.x92*m.x51 + m.x92*m.x54 + m.x92*m.x57)) - 0.4*m.x6 - 0.1*m.x18 == 0)
m.c37 = Constraint(expr=m.x94*m.x124 - (m.x93*m.x123 + m.x90*m.x31 + m.x96*m.x64 + m.x99*m.x79 - (m.x93*m.x46 + m.x93*
m.x49 + m.x93*m.x52 + m.x93*m.x55 + m.x93*m.x58)) - 0.4*m.x7 - 0.1*m.x19 == 0)
m.c38 = Constraint(expr=m.x96*m.x126 - (m.x95*m.x125 + m.x89*m.x33 + m.x92*m.x48 + m.x98*m.x81 - (m.x95*m.x60 + m.x95*
m.x63 + m.x95*m.x66 + m.x95*m.x69 + m.x95*m.x72)) - 0.4*m.x9 - 0.1*m.x21 == 0)
m.c39 = Constraint(expr=m.x97*m.x127 - (m.x96*m.x126 + m.x90*m.x34 + m.x93*m.x49 + m.x99*m.x82 - (m.x96*m.x61 + m.x96*
m.x64 + m.x96*m.x67 + m.x96*m.x70 + m.x96*m.x73)) - 0.4*m.x10 - 0.1*m.x22 == 0)
m.c40 = Constraint(expr=m.x99*m.x129 - (m.x98*m.x128 + m.x89*m.x36 + m.x92*m.x51 + m.x95*m.x66 - (m.x98*m.x75 + m.x98*
m.x78 + m.x98*m.x81 + m.x98*m.x84 + m.x98*m.x87)) - 0.4*m.x12 - 0.1*m.x24 == 0)
m.c41 = Constraint(expr=m.x100*m.x130 - (m.x99*m.x129 + m.x90*m.x37 + m.x93*m.x52 + m.x96*m.x67 - (m.x99*m.x76 + m.x99*
m.x79 + m.x99*m.x82 + m.x99*m.x85 + m.x99*m.x88)) - 0.4*m.x13 - 0.1*m.x25 == 0)
m.c42 = Constraint(expr=m.x102*m.x120 - (m.x101*m.x119 + m.x104*m.x45 + m.x107*m.x60 + m.x110*m.x75 - (m.x101*m.x30 +
m.x101*m.x33 + m.x101*m.x36 + m.x101*m.x39 + m.x101*m.x42)) - 0.1*m.x3 == 0)
m.c43 = Constraint(expr=m.x103*m.x121 - (m.x102*m.x120 + m.x105*m.x46 + m.x108*m.x61 + m.x111*m.x76 - (m.x102*m.x31 +
m.x102*m.x34 + m.x102*m.x37 + m.x102*m.x40 + m.x102*m.x43)) - 0.1*m.x4 == 0)
m.c44 = Constraint(expr=m.x105*m.x123 - (m.x104*m.x122 + m.x101*m.x30 + m.x107*m.x63 + m.x110*m.x78 - (m.x104*m.x45 +
m.x104*m.x48 + m.x104*m.x51 + m.x104*m.x54 + m.x104*m.x57)) - 0.1*m.x6 - 0.9*m.x18 == 0)
m.c45 = Constraint(expr=m.x106*m.x124 - (m.x105*m.x123 + m.x102*m.x31 + m.x108*m.x64 + m.x111*m.x79 - (m.x105*m.x46 +
m.x105*m.x49 + m.x105*m.x52 + m.x105*m.x55 + m.x105*m.x58)) - 0.1*m.x7 - 0.9*m.x19 == 0)
m.c46 = Constraint(expr=m.x108*m.x126 - (m.x107*m.x125 + m.x101*m.x33 + m.x104*m.x48 + m.x110*m.x81 - (m.x107*m.x60 +
m.x107*m.x63 + m.x107*m.x66 + m.x107*m.x69 + m.x107*m.x72)) - 0.1*m.x9 - 0.9*m.x21 == 0)
m.c47 = Constraint(expr=m.x109*m.x127 - (m.x108*m.x126 + m.x102*m.x34 + m.x105*m.x49 + m.x111*m.x82 - (m.x108*m.x61 +
m.x108*m.x64 + m.x108*m.x67 + m.x108*m.x70 + m.x108*m.x73)) - 0.1*m.x10 - 0.9*m.x22 == 0)
m.c48 = Constraint(expr=m.x111*m.x129 - (m.x110*m.x128 + m.x101*m.x36 + m.x104*m.x51 + m.x107*m.x66 - (m.x110*m.x75 +
m.x110*m.x78 + m.x110*m.x81 + m.x110*m.x84 + m.x110*m.x87)) - 0.1*m.x12 - 0.9*m.x24 == 0)
m.c49 = Constraint(expr=m.x112*m.x130 - (m.x111*m.x129 + m.x102*m.x37 + m.x105*m.x52 + m.x108*m.x67 - (m.x111*m.x76 +
m.x111*m.x79 + m.x111*m.x82 + m.x111*m.x85 + m.x111*m.x88)) - 0.1*m.x13 - 0.9*m.x25 == 0)
m.c50 = Constraint(expr= m.x2 - m.b137 <= 0)
m.c51 = Constraint(expr= m.x3 - m.b138 <= 0)
m.c52 = Constraint(expr= m.x4 - m.b139 <= 0)
m.c53 = Constraint(expr= m.x5 - m.b140 <= 0)
m.c54 = Constraint(expr= m.x6 - m.b141 <= 0)
m.c55 = Constraint(expr= m.x7 - m.b142 <= 0)
m.c56 = Constraint(expr= m.x8 - m.b143 <= 0)
m.c57 = Constraint(expr= m.x9 - m.b144 <= 0)
m.c58 = Constraint(expr= m.x10 - m.b145 <= 0)
m.c59 = Constraint(expr= m.x11 - m.b146 <= 0)
m.c60 = Constraint(expr= m.x12 - m.b147 <= 0)
m.c61 = Constraint(expr= m.x13 - m.b148 <= 0)
m.c62 = Constraint(expr= m.x14 - m.b149 <= 0)
m.c63 = Constraint(expr= m.x15 - m.b150 <= 0)
m.c64 = Constraint(expr= m.x16 - m.b151 <= 0)
m.c65 = Constraint(expr= m.x17 - m.b152 <= 0)
m.c66 = Constraint(expr= m.x18 - m.b153 <= 0)
m.c67 = Constraint(expr= m.x19 - m.b154 <= 0)
m.c68 = Constraint(expr= m.x20 - m.b155 <= 0)
m.c69 = Constraint(expr= m.x21 - m.b156 <= 0)
m.c70 = Constraint(expr= m.x22 - m.b157 <= 0)
m.c71 = Constraint(expr= m.x23 - m.b158 <= 0)
m.c72 = Constraint(expr= m.x24 - m.b159 <= 0)
m.c73 = Constraint(expr= m.x25 - m.b160 <= 0)
m.c74 = Constraint(expr= m.x26 - m.b161 <= 0)
m.c75 = Constraint(expr= m.x27 - m.b162 <= 0)
m.c76 = Constraint(expr= m.x28 - m.b163 <= 0)
m.c77 = Constraint(expr= m.x29 - m.b164 <= 0)
m.c78 = Constraint(expr= m.x30 - m.b165 <= 0)
m.c79 = Constraint(expr= m.x31 - m.b166 <= 0)
m.c80 = Constraint(expr= m.x32 - m.b167 <= 0)
m.c81 = Constraint(expr= m.x33 - m.b168 <= 0)
m.c82 = Constraint(expr= m.x34 - m.b169 <= 0)
m.c83 = Constraint(expr= m.x35 - m.b170 <= 0)
m.c84 = Constraint(expr= m.x36 - m.b171 <= 0)
m.c85 = Constraint(expr= m.x37 - m.b172 <= 0)
m.c86 = Constraint(expr= m.x38 - m.b173 <= 0)
m.c87 = Constraint(expr= m.x39 - m.b174 <= 0)
m.c88 = Constraint(expr= m.x40 - m.b175 <= 0)
m.c89 = Constraint(expr= m.x41 - m.b176 <= 0)
m.c90 = Constraint(expr= m.x42 - m.b177 <= 0)
m.c91 = Constraint(expr= m.x43 - m.b178 <= 0)
m.c92 = Constraint(expr= m.x44 - m.b179 <= 0)
m.c93 = Constraint(expr= m.x45 - m.b180 <= 0)
m.c94 = Constraint(expr= m.x46 - m.b181 <= 0)
m.c95 = Constraint(expr= m.x47 - m.b182 <= 0)
m.c96 = Constraint(expr= m.x48 - m.b183 <= 0)
m.c97 = Constraint(expr= m.x49 - m.b184 <= 0)
m.c98 = Constraint(expr= m.x50 - m.b185 <= 0)
m.c99 = Constraint(expr= m.x51 - m.b186 <= 0)
m.c100 = Constraint(expr= m.x52 - m.b187 <= 0)
m.c101 = Constraint(expr= m.x53 - m.b188 <= 0)
m.c102 = Constraint(expr= m.x54 - m.b189 <= 0)
m.c103 = Constraint(expr= m.x55 - m.b190 <= 0)
m.c104 = Constraint(expr= m.x56 - m.b191 <= 0)
m.c105 = Constraint(expr= m.x57 - m.b192 <= 0)
m.c106 = Constraint(expr= m.x58 - m.b193 <= 0)
m.c107 = Constraint(expr= m.x59 - m.b194 <= 0)
m.c108 = Constraint(expr= m.x60 - m.b195 <= 0)
m.c109 = Constraint(expr= m.x61 - m.b196 <= 0)
m.c110 = Constraint(expr= m.x62 - m.b197 <= 0)
m.c111 = Constraint(expr= m.x63 - m.b198 <= 0)
m.c112 = Constraint(expr= m.x64 - m.b199 <= 0)
m.c113 = Constraint(expr= m.x65 - m.b200 <= 0)
m.c114 = Constraint(expr= m.x66 - m.b201 <= 0)
m.c115 = Constraint(expr= m.x67 - m.b202 <= 0)
m.c116 = Constraint(expr= m.x68 - m.b203 <= 0)
m.c117 = Constraint(expr= m.x69 - m.b204 <= 0)
m.c118 = Constraint(expr= m.x70 - m.b205 <= 0)
m.c119 = Constraint(expr= m.x71 - m.b206 <= 0)
m.c120 = Constraint(expr= m.x72 - m.b207 <= 0)
m.c121 = Constraint(expr= m.x73 - m.b208 <= 0)
m.c122 = Constraint(expr= m.x74 - m.b209 <= 0)
m.c123 = Constraint(expr= m.x75 - m.b210 <= 0)
m.c124 = Constraint(expr= m.x76 - m.b211 <= 0)
m.c125 = Constraint(expr= m.x77 - m.b212 <= 0)
m.c126 = Constraint(expr= m.x78 - m.b213 <= 0)
m.c127 = Constraint(expr= m.x79 - m.b214 <= | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from functools import partial
import builtins
import gc
from enum import Enum
from itertools import islice
import operator
import copy
import inspect
import itertools
from types import MethodType
from collections import abc
import numbers
import os
import random
import sys
import threading
import uuid
import weakref
from collections import defaultdict, namedtuple
from distutils.version import Version, LooseVersion
from itertools import islice
from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict,List,Iterable
import typing
import numpy as np
import tensorflow as tf
from tensorflow.python import enable_eager_execution
from tensorflow.python.eager import context
from tensorflow.python.framework import func_graph, ops
from tensorflow.python.module import module
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures, tracking
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.util import object_identity
from trident.backend import iteration_tools
from trident.backend.common import camel2snake, to_list, unpack_singleton, enforce_singleton, OrderedDict, get_session, set_session, Signature, PrintException, TensorShape, \
get_args_spec
from trident.backend.tensorflow_ops import *
from trident.backend import tensorflow_ops as tops
from trident.backend import dtype
_FUN_NAMES = [
('float', tops.float),
('long', tops.long),
('equal', tops.equal),
('int', tops.int),
('to', tops.to)]
for target_fun_name, source_fun in _FUN_NAMES:
setattr(Tensor, target_fun_name, source_fun)
from trident.backend.tensorspec import *
from trident.data.utils import pickle_it
from trident.backend import tensorflow_serialization as serialization
from trident import context
__all__ = ['set_device', 'DTYPE_MAPPING','Layer', 'get_device', 'Parameter', 'Sequential', 'ModuleList', 'ModuleDict', 'summary', 'normalize_padding', 'load', 'save', 'try_map_args_and_call',
'fix_layer','fix_keras_module']
ctx = context._context()
DTYPE_MAPPING = {
tf.bool: dtype.bool,
tf.int8: dtype.int8,
tf.int16: dtype.int16,
tf.int32: dtype.int32,
tf.int64: dtype.int64,
tf.uint8: dtype.uint8,
tf.float16: dtype.float16,
tf.float32: dtype.float32,
tf.float64: dtype.float64,
tf.complex64: dtype.complex64,
tf.complex128: dtype.complex128,
}
def get_device():
"""get current device
Returns: device string ('cpu', 'cuda)
"""
if ctx.device is None or ctx.device == 'cuda':
set_device('/gpu:0' if len(tf.config.list_physical_devices('GPU')) > 0 else "/cpu:0")
return ctx.device
def set_device(device='/cpu:0'):
if device.lower() == 'cuda' or device.lower() == 'gpu':
ctx.device = '/gpu:0'
if device.lower() == 'cpu':
ctx.device = '/cpu:0'
if 'gpu' in device and len(tf.config.list_physical_devices('GPU')) == 0:
raise ValueError('Gpu is not available...')
try:
if 'cpu' in ctx.device:
if len(tf.config.list_physical_devices('GPU')) > 0:
os.environ["CUDA_VISIBLE_DEVICES"] = '999'
if tf.test.gpu_device_name():
print('GPU found')
else:
print("No GPU found")
elif 'gpu' in device or 'cuda' in device:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
gcitems = gc.get_objects()
for i in range(len(gcitems)):
obj = gcitems[i]
try:
if is_tensor(obj):
with tf.device(ctx.device):
obj = tf.identity(device)
elif isinstance(obj, Layer):
obj.to(device)
except Exception as e:
print(e)
except Exception as e:
print(e)
version = tf.version
sys.stdout.write('Tensorflow version:{0}.\n'.format(version.VERSION))
tf_version = LooseVersion(vstring=version.VERSION)
base_version = LooseVersion(vstring='2.2.0-rc0')
if tf_version.version < base_version.version:
raise ValueError('trident only support Tensorflow 2.2.0-rc0 or newer.\n')
sys.stdout.write('use device:{0}.\n'.format(get_device()))
try:
enable_eager_execution()
tf.executing_eagerly()
sys.stdout.write('executing_eagerly\n')
except Exception as e:
sys.stdout.write('executing_eagerly fail. {0}\n'.format(e))
def load(path):
"""load model from *.pth or *.pth.tar
Args:
path (str):
Returns:
"""
with tf.device(get_device()):
if '.tar' in path:
return serialization.load_pthtar(path)
else:
return serialization.load(path)
def save(obj, path, is_compressed=False):
serialization.save(obj, path, is_compressed=is_compressed)
return True
class RemovableHandle(object):
"""A handle which provides the capability to remove a hook."""
next_id = 0
def __init__(self, hooks_dict):
self.hooks_dict_ref = weakref.ref(hooks_dict)
self.id = RemovableHandle.next_id
RemovableHandle.next_id += 1
def remove(self):
hooks_dict = self.hooks_dict_ref()
if hooks_dict is not None and self.id in hooks_dict:
del hooks_dict[self.id]
def __getstate__(self):
return self.hooks_dict_ref(), self.id
def __setstate__(self, state):
if state[0] is None:
# create a dead reference
self.hooks_dict_ref = weakref.ref(OrderedDict())
else:
self.hooks_dict_ref = weakref.ref(state[0])
self.id = state[1]
RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.remove()
#
# class BackwardHook(object):
# """
# A wrapper class to implement nn.Module backward hooks.
# It handles:
# - Ignoring non-Tensor inputs and replacing them by None before calling the user hook
# - Generating the proper Node to capture a set of Tensor's gradients
# - Linking the gradients captures for the outputs with the gradients captured for the input
# - Calling the user hook once both output and input gradients are available
# """
#
# def __init__(self, module, user_hooks):
# self.user_hooks = user_hooks
# self.module = module
#
# self.grad_outputs = None
# self.n_outputs = -1
# self.output_tensors_index = None
# self.n_inputs = -1
# self.input_tensors_index = None
#
# def _pack_with_none(self, indices, values, size):
# res = [None] * size
# for idx, val in zip(indices, values):
# res[idx] = val
#
# return tuple(res)
#
# def _unpack_none(self, indices, values):
# res = []
# for idx in indices:
# res.append(values[idx])
#
# return tuple(res)
#
# def _set_user_hook(self, grad_fn, user_hook):
# @functools.wraps(user_hook)
# def hook(grad_input, _):
# if self.grad_outputs is None:
# raise RuntimeError("Module backward hook for grad_input is called before "
# "the grad_output one. This happens because the gradient "
# "in your nn.Module flows to the Module's input without "
# "passing through the Module's output. Make sure that the "
# "output depends on the input and that the loss is computed "
# "based on the output.")
#
# grad_input = self._pack_with_none(self.input_tensors_index, grad_input, self.n_inputs)
# res = user_hook(self.module, grad_input, self.grad_outputs)
# if res is None:
# return res
#
# if len(res) != len(grad_input):
# raise RuntimeError("Backward hook returned an invalid number of grad_input, "
# "got {}, but expected {}".format(len(res), len(grad_input)))
# return self._unpack_none(self.input_tensors_index, res)
# grad_fn.register_hook(hook)
#
# def _apply_on_tensors(self, fn, args):
# # Can be used to apply the given function to the tensors contained in the
# # args. Will return updated args and the tensors indices
# tensors_idx = []
# tensors = []
#
# requires_grad = False
# for i, arg in enumerate(args):
# if is_tensor(arg):
# tensors_idx.append(i)
# tensors.append(arg)
# requires_grad |= arg.requires_grad
#
# if not requires_grad:
# return args, None
#
# new_tensors = torch.nn.modules._functions.BackwardHookFunction.apply(*tensors)
# if len(new_tensors) == 0:
# raise RuntimeError("Cannot set Module backward hook for a Module with no input Tensors.")
# grad_fn = new_tensors[0].grad_fn
# if not grad_fn.name() == "BackwardHookFunctionBackward":
# raise RuntimeError("Error while setting up backward hooks. Please open "
# "an issue with a code sample to reproduce this.")
#
# fn(grad_fn)
#
# arg_list = list(args)
# for idx, val in zip(tensors_idx, new_tensors):
# arg_list[idx] = val
#
# return tuple(arg_list), tensors_idx
#
# def setup_input_hook(self, args):
# def fn(grad_fn):
# for hook in self.user_hooks:
# self._set_user_hook(grad_fn, hook)
#
# res, input_idx = self._apply_on_tensors(fn, args)
# self.n_inputs = len(args)
# self.input_tensors_index = input_idx
# return res
#
# def setup_output_hook(self, args):
# def fn(grad_fn):
# def hook(_, grad_output):
# self.grad_outputs = self._pack_with_none(self.output_tensors_index,
# grad_output,
# self.n_outputs)
# grad_fn.register_hook(hook)
#
# is_tuple = True
# if not isinstance(args, tuple):
# args = (args,)
# is_tuple = False
#
# res, output_idx = self._apply_on_tensors(fn, args)
# self.n_outputs = len(args)
# self.output_tensors_index = output_idx
#
# if not is_tuple:
# res = res[0]
# return res
def _is_not_trainable_variable(obj):
return module._is_variable(obj) and not getattr(obj, "trainable", False)
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
# This is thread-local to allow building separate models in different threads
# concurrently, but comes at the cost of not being able to build one model
# across threads.
_GRAPH = threading.local()
def get_graph():
if context.executing_eagerly():
global _GRAPH
if not getattr(_GRAPH, 'graph', None):
_GRAPH.graph = func_graph.FuncGraph('trident_graph')
return _GRAPH.graph
else:
return ops.get_default_graph()
# A global dictionary mapping graph objects to an index of counters used
# for various layer/optimizer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()
def reset_name(module: tf.Module, prefix_dict=None):
def get_uid(prefix, seq):
if prefix not in module._uid_prefixs or seq < module._uid_prefixs[prefix]:
module._uid_prefixs[prefix] = seq
return module._uid_prefixs[prefix]
if not hasattr(module, '_uid_prefixs') or prefix_dict is not None:
module._uid_prefixs = prefix_dict
if not hasattr(module, 'default_name'):
module.default_name = camel2snake(module.__class__.__name__) + '_' + str(get_global_uid(camel2snake(module.__class__.__name__)))
prefix, seq = module.default_name.rsplit('_', 1) # if '_' in module.default_name else
seq = int(seq)
module.default_name = prefix + '_' + str(seq - get_uid(prefix, seq) + 1)
if module._name is None:
module._name = module.default_name
module.__name__ = module._name
module.update_name_scope(module._name)
_UID_PREFIX = defaultdict(int)
def get_global_uid(prefix=''):
_UID_PREFIX[prefix] += 1
return _UID_PREFIX[prefix]
class _IncompatibleKeys(namedtuple('IncompatibleKeys', ['missing_keys', 'unexpected_keys'])):
def __repr__(self):
if not self.missing_keys and not self.unexpected_keys:
return '<All keys matched successfully>'
return super(_IncompatibleKeys, self).__repr__()
__str__ = __repr__
def _addindent(s_, numSpaces):
s = s_.split('\n')
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
r"""This tracks hooks common to all modules that are executed before/after
calling forward and backward. This is global state used for debugging/profiling
purposes"""
_global_backward_hooks = OrderedDict()
_global_forward_pre_hooks = OrderedDict()
_global_forward_hooks = OrderedDict()
_grad_t = Union[Tuple[Tensor, ...], Tensor]
# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be
# the type of the subclass, not the looser type of `Module`.
T = TypeVar('T', bound='Layer')
def register_module_forward_pre_hook(hook: Callable[..., None]) -> RemovableHandle:
r"""Registers a forward pre-hook common to all modules.
.. warning ::
This adds global state to the `nn.module` module
and it is only intended for debugging/profiling purposes.
The hook will be called every time before :func:`forward` is invoked.
It should have the following | |
:
O00oo00o000o = IIiO0Ooo . rloc
if ( rloc . is_exact_match ( O00oo00o000o ) ) : return ( IIiO0Ooo )
if 50 - 50: IiII - II111iiii
return ( None )
if 10 - 10: OoooooooOO % Ii1I * OOooOOo + IiII * oO0o
if 13 - 13: II111iiii
def get_rloc_by_interface ( self , interface ) :
for IIiO0Ooo in self . rloc_set :
if ( IIiO0Ooo . interface == interface ) : return ( IIiO0Ooo )
if 14 - 14: i11iIiiIii . IiII
return ( None )
if 70 - 70: Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
I11i111 = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( I11i111 == None ) :
I11i111 = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , I11i111 )
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
I11i111 . add_source_entry ( self )
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
if 49 - 49: iII111i + OoOoOO00
if 33 - 33: ooOoO0o
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
Iii1 = lisp_map_cache . lookup_cache ( self . group , True )
if ( Iii1 == None ) :
Iii1 = lisp_mapping ( self . group , self . group , [ ] )
Iii1 . eid . copy_address ( self . group )
Iii1 . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , Iii1 )
if 19 - 19: I1Ii111 % IiII
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( Iii1 . group )
Iii1 . add_source_entry ( self )
if 94 - 94: I1Ii111 * I1ii11iIi11i * I1ii11iIi11i - o0oOOo0O0Ooo . i11iIiiIii
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 16 - 16: i1IIi
if 88 - 88: OOooOOo
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 79 - 79: oO0o
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
OOoOOoo = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( OOoOOoo ) )
if 13 - 13: oO0o % ooOoO0o % I1IiiI - o0oOOo0O0Ooo
else :
Iii1 = lisp_map_cache . lookup_cache ( self . group , True )
if ( Iii1 == None ) : return
if 50 - 50: I1Ii111 . I1Ii111 . OoO0O00 + I11i * o0oOOo0O0Ooo
i111i111I111 = Iii1 . lookup_source_cache ( self . eid , True )
if ( i111i111I111 == None ) : return
if 16 - 16: i11iIiiIii
Iii1 . source_cache . delete_cache ( self . eid )
if ( Iii1 . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 83 - 83: Oo0Ooo / Oo0Ooo . I11i + oO0o % Ii1I
if 22 - 22: ooOoO0o
if 83 - 83: OOooOOo - i11iIiiIii - i1IIi / oO0o
if 33 - 33: OoO0O00 + OOooOOo
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 36 - 36: o0oOOo0O0Ooo . o0oOOo0O0Ooo / oO0o * ooOoO0o * Ii1I * IiII
if 39 - 39: i1IIi
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 79 - 79: ooOoO0o - II111iiii - oO0o
if 55 - 55: iII111i % iIii1I11I1II1 + Ii1I + oO0o . i11iIiiIii - OOooOOo
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 14 - 14: oO0o - i11iIiiIii / OoOoOO00 % o0oOOo0O0Ooo / IiII * I1IiiI
if 2 - 2: i1IIi / I1Ii111 + I1IiiI + I1ii11iIi11i - o0oOOo0O0Ooo + iIii1I11I1II1
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
o0OOoOO = "," + str ( self . secondary_iid )
return ( prefix . replace ( o0OOoOO , o0OOoOO + "*" ) )
if 78 - 78: I1ii11iIi11i % i1IIi . I1Ii111 + Oo0Ooo . o0oOOo0O0Ooo % II111iiii
if 65 - 65: Ii1I . OoOoOO00 + O0 / iIii1I11I1II1 % Ii1I % I1Ii111
def increment_decap_stats ( self , packet ) :
o00o = packet . udp_dport
if ( o00o == LISP_DATA_PORT ) :
oOOoo0O00 = self . get_rloc ( packet . outer_dest )
else :
if 31 - 31: o0oOOo0O0Ooo - Oo0Ooo
if 15 - 15: O0 + OOooOOo
if 8 - 8: i11iIiiIii . IiII . I1ii11iIi11i + i1IIi % I1Ii111
if 64 - 64: I1IiiI . Oo0Ooo * OoO0O00
for oOOoo0O00 in self . rloc_set :
if ( oOOoo0O00 . translated_port != 0 ) : break
if 87 - 87: i1IIi / OoooooooOO
if 68 - 68: I1Ii111 / iIii1I11I1II1
if ( oOOoo0O00 != None ) : oOOoo0O00 . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 8 - 8: ooOoO0o * IiII * OOooOOo / I1IiiI
if 40 - 40: i11iIiiIii + OoooooooOO
def rtrs_in_rloc_set ( self ) :
for oOOoo0O00 in self . rloc_set :
if ( oOOoo0O00 . is_rtr ( ) ) : return ( True )
if 2 - 2: o0oOOo0O0Ooo * OoO0O00
return ( False )
if 88 - 88: Oo0Ooo + oO0o + iII111i
if 51 - 51: i1IIi + i11iIiiIii * I11i / iII111i + OoooooooOO
if 89 - 89: i11iIiiIii - I1Ii111 - O0 % iIii1I11I1II1 / IiII - O0
class lisp_dynamic_eid ( ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 63 - 63: OOooOOo
if 23 - 23: Oo0Ooo / i1IIi - OOooOOo / Oo0Ooo
def get_timeout ( self , interface ) :
try :
IIiiiiII = lisp_myinterfaces [ interface ]
self . timeout = IIiiiiII . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 86 - 86: OOooOOo * OoOoOO00 % i1IIi * IiII . I1ii11iIi11i
if 72 - 72: i1IIi - I1Ii111 . O0 * OoO0O00
if 62 - 62: Oo0Ooo . iII111i
if 15 - 15: i11iIiiIii * I11i + oO0o
class lisp_group_mapping ( ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 67 - 67: IiII . OoO0O00
if 59 - 59: oO0o * o0oOOo0O0Ooo
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = | |
<reponame>FYPYTHON/method
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/31 14:06
# @Author : <EMAIL>
# @File : erasecover.py
# @Software: PyCharm
from kazoo import client
class Solution:
def eraseOverlapIntervals(self, intervals):
if not intervals:
return 0
intervals.sort(key=lambda x: x[1])
count = 0
start_point = intervals[0][1]
for i in range(0, len(intervals) - 1):
if start_point > intervals[i + 1][0]:
count += 1
else:
start_point = intervals[i + 1][1]
return count
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def print(self):
print(self.val)
# if self.next is None:
# return ("{} {}".format(self.val, None))
# else:
# return ("{} {}".format(self.val, self.next))
class Solution1:
"""
# s = Solution()
# # 24 + 17
# print("24 + 17")
# l4 = ListNode(2, None)
# l2 = ListNode(4, l4)
# l5 = ListNode(5, l2)
#
# l1 = ListNode(1, None)
# l7 = ListNode(7, l1)
# l6 = ListNode(6, l7)
# s1 = Solution1()
# s1.addTwoNumbers(l5, l1)
"""
def printListNode(self, node: ListNode):
strv = ""
while node is not None:
strv = str(node.val) + strv
node = node.next
print("node v:", strv)
def addTwoNumbers(self, l1: ListNode, l2: ListNode):
self.printListNode(l1)
self.printListNode(l2)
# pass
#
# str1 = ""
# str2 = ""
#
# while True:
# print("l1", l1.val)
# str1 = str(l1.val) + str1
# l1 = l1.next
# if l1 is None:
# break
# print("str1:", str1)
#
# while True:
# print("l2", l2.val)
# str2 = str(l2.val) + str2
# l2 = l2.next
# if l2 is None:
# break
# print("str2:", str2)
# v3 = str(int(str1) + int(str2))
# print(v3)
# return v3
sp = 0
cend = True
node = None
tnode = None
while cend:
if l1 is None and l2 is None:
cend = False
break
# print(l1.val, l2.val)
v1 = v2 = 0
if l1 is not None:
v1 = l1.val
l1 = l1.next
if l2 is not None:
v2 = l2.val
l2 = l2.next
sum = 0
if v1 + v2 > 9:
if sp > 0:
sum = v1 + v2 - 10 + 1
else:
sum = v1 + v2 - 10
sp = 1
else:
if sp > 0:
sum = v1 + v2 + 1
else:
sum = v1 + v2
sp = 0
print(sum)
if node is None:
node = ListNode(sum, None)
else:
if tnode is None:
tnode = ListNode(sum, None)
node.next = tnode
else:
tnode.next = ListNode(sum, None)
tnode = tnode.next
self.printListNode(node)
def findMedianSortedArrays(self, nums1, nums2) -> float:
cc = sorted(nums1 + nums2)
length = len(cc)
# print(cc)
if length % 2 == 1:
return cc[length // 2]
else:
return (cc[length // 2] + cc[length // 2 - 1]) * 1.0 / 2
def lengthOfLongestSubstring(self, s: str) -> int:
"""
s1 = "abcabc"
s2 = 'dvdf'
s3 = 'bbbbb'
s4 = 'pwwkew'
sb = lengthOfLongestSubstring("", s4)
print(sb)
"""
pos = 0
maxlen = 0
curlen = 0
for i in range(len(s)):
"""
a b c a b c
pos
i
s[pos:i] = a b c
curlen = 3 - 0 - 0
pos = 0 + 0 + 1
pos i
s[pos:i] = b c a
curlen = 4 - 1 - 0
pos = 1 + 0 + 1
"""
# print(s[i], s[pos:i], pos, i)
if s[i] in s[pos:i] and curlen > 0:
if curlen > maxlen:
maxlen = curlen
curlen = i - pos - s[pos:i].index(s[i])
pos = pos + s[pos:i].index(s[i]) + 1
else:
curlen += 1
# print(i, pos, curlen, maxlen)
if curlen > maxlen:
return curlen
else:
return maxlen
#
def convert(self, s: str, numRows: int) -> str:
"""
z1 = 'LEETCODEISHIRING'
z2 = "PAYPALISHIRING"
z3 = "AB"
zz = convert("", z3, 2)
print(zz)
"""
# if numRows <= 1: return s
# step = numRows * 2 - 2
# slen = len(s)
# znum = slen // step if slen % step == 0 else slen // step + 1
# news = ""
# for row in range(numRows):
# if row == 0:
# for i in range(znum):
# news += s[i*step]
# elif 0 < row < numRows - 1:
# for i in range(znum):
# if row + i*step < slen:
# news += s[row + i*step]
# if row + i*step + (numRows - row - 1) * 2 < slen:
# news += s[row + i*step + (numRows - row - 1) * 2]
# else:
# for i in range(znum):
# if numRows-1 + i*step < slen:
# news += s[numRows-1 + i*step]
# return news
"""
s = 'LEETCODEISHIRING' numRows = 3
L C I R
E T O E S I I G
E D H N
"""
if numRows == 1 or numRows >= len(s):
return s
index = 0
step = 1
L = ['' for _ in range(numRows)]
for x in s:
L[index] += x
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
print(L)
return ''.join(L)
# str reverse [::-1]
def reverse(self, x: int) -> int:
if x < 0:
xs = int("-" + str(-x)[::-1])
return xs if xs > -2 ** 31 - 1 else 0
else:
xs = int(str(x)[::-1])
return 0 if xs > 2 ** 31 - 1 else xs
def threeSumClosest(self, nums, target: int) -> int:
n = len(nums)
nums.sort() # 排序
ans = float('inf')
for first in range(n - 2): # 枚举第一个元素
if first > 0 and nums[first] == nums[first - 1]: continue # 保证first不会有重复
second, third = first + 1, n - 1
max_sum = nums[first] + nums[-2] + nums[-1]
min_sum = nums[first] + nums[first + 1] + nums[first + 2]
if max_sum <= target: # 最大的数
if abs(max_sum - target) < abs(ans - target):
ans = max_sum
continue
elif min_sum >= target: # 最小的数
if abs(min_sum - target) < abs(ans - target):
ans = min_sum
break
while second < third:
two_sum_target = target - nums[first]
s = nums[second] + nums[third]
if abs(s + nums[first] - target) < abs(ans - target):
ans = s + nums[first]
if s > two_sum_target: # 当前数值太大 右指针左移
third -= 1
while third > second and nums[third] == nums[third + 1]:
third -= 1
elif s < two_sum_target: # 当前数值太小 左指针右移
second += 1
while third > second and nums[second] == nums[second - 1]:
second += 1
else: # 刚好等于 直接返回target即可
return target
return ans
def fourSum(self, nums, target: int):
pass
from typing import List
def findCircleNum(self, M: List[List[int]]) -> int:
N = len(M)
count = 0
visited = set()
def dfs(i):
for j in range(N):
if M[i][j] and j not in visited:
visited.add(j)
dfs(j)
for i in range(N):
if i not in visited:
count += 1
visited.add(i)
dfs(i)
return count
class UnionFind(object):
"""并查集类"""
def findCircleNum(self, isConnected: List[List[int]]) -> int:
self.uf = [-1 for i in range(len(isConnected) + 1)] # 列表0位置空出
self.sets_count = len(isConnected) # 判断并查集里共有几个集合, 初始化默认互相独立
for i in range(len(isConnected)):
for j in range(i, len(isConnected[i])):
if i == j or isConnected[i][j] == 0:
continue
else:
# print("union:", i, j)
self.union(i, j)
# print(self.sets_count)
# print(self.uf)
return self.sets_count
def find(self, p):
"""尾递归"""
if self.uf[p] < 0:
return p
self.uf[p] = self.find(self.uf[p])
return self.uf[p]
def union(self, p, q):
"""连通p,q 让q指向p"""
proot = self.find(p)
qroot = self.find(q)
if proot == qroot:
return
elif self.uf[proot] > self.uf[qroot]: # 负数比较, 左边规模更小
self.uf[qroot] += self.uf[proot]
self.uf[proot] = qroot
else:
self.uf[proot] += self.uf[qroot] # 规模相加
self.uf[qroot] = proot
self.sets_count -= 1 # 连通后集合总数减一
def is_connected(self, p, q):
"""判断pq是否已经连通"""
return self.find(p) == self.find(q) # 即判断两个结点是否是属于同一个祖先
def rotate(self, nums: List[int], k: int) -> None:
n = len(nums)
k = k % n
if k == 0: return nums
for i in range(k):
last = nums[-1]
for i in range(n -1, 0, -1):
nums[i] = nums[i-1]
nums[0] = last
def rotate2(self, nums: List[int], k: int) -> None:
n = len(nums)
k = k % n
| |
beta=0)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(cvuint8)
ret1, th1 = cv2.threshold(cl1, bglb, 255, cv2.THRESH_TOZERO)
ret1, th1 = cv2.threshold(th1, bgub, 255, cv2.THRESH_TOZERO_INV)
th1 = th1.astype('uint8')
#cv2.imwrite(saveName+"_clahe_thres128BG.png", th1)
gb = cv2.GaussianBlur(th1, (1, 1), 0)
#cv2.imwrite(saveName+"_clahe_thres128BG_gaussianBlur.png", gb)
ret2, th2 = cv2.threshold(gb, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
if saveTofile:
cv2.imwrite(saveName+"_clahe.png", cl1)
cv2.imwrite(saveName+"_clahe_thres128BG_gaussianBlur_binaOTSU.png", th2)
img_gray = cv2.cvtColor(th2, cv2.COLOR_GRAY2BGR)
cl1_gray = cv2.cvtColor(cl1, cv2.COLOR_GRAY2BGR)
return img,cl1,img_gray,cl1_gray,th2
###Line detection from connected components
def detectLineFromCCs(image,labels, font, fontScale,
fontColor, lineType,
ratioUB=10, ratioLB=3,
areaUB=2000, areaLB=80,
angleM=None,angleStd=20,
pixelLB=75,pixelUB=150,
lineWidthUB=3,printText=False):
cvuint8 = cv2.convertScaleAbs(image)
unfilted = cv2.convertScaleAbs(image)
areas = list()
lines = list()
filteredLines = list()
lines_detailed = list()
filteredLines_detailed = list()
### Iterate from all connected components
for labelindex in range(1, labels.max()+1):
# for labelindex in range(15,16):
cnt = np.nonzero(labels == labelindex)
if np.sum(labels == labelindex) < pixelLB & np.sum(labels == labelindex) > pixelUB :
continue
pnts = np.zeros((len(cnt[0]), 2), dtype='float32')
for j in range(len(cnt[0])):
pnts[j, 0] = cnt[1][j]
pnts[j, 1] = cnt[0][j]
rect = cv2.minAreaRect(pnts)
w = rect[1][0]
h = rect[1][1]
angle = rect[2]
if w < h:
angle = int(90 - angle)
else:
angle = -int(angle)
###filter smaller lines
#if w * h < 10: # and w*h>0:
# continue
# if w*h >2000:
# continue
areas.append(w * h)
# print(w,h,w*h)
length_rect = max([w, h])
box = cv2.cv2.boxPoints(rect) # cv2.boxPoints(rect) for OpenCV 3.x
box = np.int0(box)
if w > h:
startpoint = ((box[0] + box[1]) / 2)
endpoint = ((box[2] + box[3]) / 2)
else:
startpoint = ((box[0] + box[3]) / 2)
endpoint = ((box[1] + box[2]) / 2)
###change row and cols
###save lines (startpoint, endpoint, line length and line angle)
lines.append(
[startpoint, endpoint, length_rect, angle])
bwidth=min(w,h)
bheight=max(w,h)
if bwidth>0:
ratio=bheight/bwidth
else:
ratio=0
lines_detailed.append([startpoint[0],startpoint[1], endpoint[0],endpoint[1], length_rect, angle,bwidth,bheight, labelindex])
cv2.line(unfilted, (int(startpoint[0]), int(startpoint[1])), (int(endpoint[0]), int(endpoint[1])),
COLOR_GREEN, lineType)
if printText:
cv2.putText(unfilted, str(int(length_rect))+',' +str(int(bwidth)) + "," + str(int(angle)) + "," + '%.2f' % ratio,
(box[0][0], box[0][1]), font, fontScale,
fontColor, lineType)
cv2.drawContours(unfilted, [box], 0, COLOR_CYAN, 1)
if bheight < ratioLB * bwidth:
continue
if bheight > ratioUB * bwidth:
continue
if w * h < areaLB: # and w*h>0:
continue
if w * h > areaUB:
continue
if bwidth > lineWidthUB:
continue
if angleM is not None:
if (angle> angleM+angleStd) or (angle< angleM-angleStd) :
continue
###draw filtered and unmerged lines on individual frame
filteredLines.append(
[startpoint, endpoint,int(length_rect), int(angle)])
filteredLines_detailed.append(
[startpoint[0], startpoint[1], endpoint[0], endpoint[1], length_rect, angle,bwidth,bheight, labelindex])
cv2.line(cvuint8, (int(startpoint[0]), int(startpoint[1])), (int(endpoint[0]), int(endpoint[1])),
COLOR_GREEN, lineType)
if printText:
cv2.putText(cvuint8, str(int(length_rect))+',' +str(int(bwidth))+ "," + str(int(angle))+"," + '%.2f' % ratio , (box[0][0], box[0][1]), font, fontScale,
fontColor, lineType)
#linelength = cv2.norm(startpoint - endpoint)
# cv2.putText(cvuint8, str(int(length_rect)) + "," + str(int(angle)), (box[0][0], box[0][1]), font, fontScale, fontColor, lineType)
# [intX, intY, intW, intH]=cv2.boundingRect((labels == i).astype('uint8'))
# if intW*intH >100:
# cv2.rectangle(cvuint8, (intX, intY), (intX+intW, intY+intH), (0, 255, 0), 1)
#lines=np.array(lines)
return cvuint8,lines,filteredLines,unfilted, np.array(lines_detailed), np.array(filteredLines_detailed)
## input
## lines : raw lines detected
## image : original image
## outline: outline of the ROI
## dist_thres (in pixels): merge lines only within this distance threshold
## pixel_density_thres (0 to 255): to connect the two lines, detemine valid pixel between them
## line_density_thres (0 to 1): to connect the two lines, detemine valid pixel ratio over the total distance between liens
## output
##
def mergeLines(lines,image,outline,dist_thres=100,pixel_density_thres=60,line_density_thres=0.6):
#print(lines)
merged = list()
mergedLines = list()
lineInd = dict()
curInd = 0
cvuint8 = cv2.convertScaleAbs(image)
for l1 in range(len(lines)):
for l2 in range(l1+1,len(lines)):
if l1 != l2:
p11 = lines[l1][0]
p12 = lines[l1][1]
p21 = lines[l2][0]
p22 = lines[l2][1]
dist = min(cv2.norm(p11 - p21), cv2.norm(p11 - p22), cv2.norm(p12 - p21), cv2.norm(p12 - p22))
nodepair = np.argmin(
(cv2.norm(p11 - p21), cv2.norm(p11 - p22), cv2.norm(p12 - p21), cv2.norm(p12 - p22)))
if dist > dist_thres:
continue
if nodepair == 0:
p1 = p11
p2 = p21
p3 = p12
p4 = p22
if nodepair == 1:
p1 = p11
p2 = p22
p3 = p12
p4 = p21
if nodepair == 2:
p1 = p12
p2 = p21
p3 = p11
p4 = p22
if nodepair == 3:
p1 = p12
p2 = p22
p3 = p11
p4 = p21
lineIter = SubFun.createLineIterator(p1.astype("int"), p2.astype("int"), cvuint8[:, :, 0])
###check if the two lines are loosely connected in the original image
if (sum(lineIter[:, 2] > pixel_density_thres) / (lineIter.shape[0] + 1)) > line_density_thres:
merged.append([l1, l2])
if l1 in lineInd:
lineInd[l2] = lineInd[l1]
elif l2 in lineInd:
lineInd[l1] = lineInd[l2]
else:
# if l1 not in lineInd and l2 not in lineInd:
lineInd[l1] = curInd
lineInd[l2] = curInd
curInd += 1
linelength = cv2.norm(p3, p4)
density = int(255 / 200 * linelength)
###print merged lines on individual frame
cv2.line(cvuint8, (int(p3[0]), int(p3[1])),
(int(p4[0]), int(p4[1])), (0, 255,), 1)
###print merged lines on all frames
#cv2.line(outline, (int(p3[0]), int(p3[1])),
# (int(p4[0]), int(p4[1])), (0, density,), 1)
rect = cv2.minAreaRect(np.matrix((p3,p4),dtype='float32'))
w = rect[1][0]
h = rect[1][1]
angle = rect[2]
if w < h:
angle = int(90 - angle)
else:
angle = -int(angle)
mergedLines.append([p3,p4,linelength,angle])
#merged = np.array(merged)
#mergedLines = np.array(mergedLines)
return merged,mergedLines, cvuint8
def minDist(startpoint,endpoint,point):
dnorm = np.linalg.norm(np.cross(endpoint - startpoint, startpoint - point)) / np.linalg.norm(
endpoint - startpoint)
dstart = np.linalg.norm(startpoint - point)
dend = np.linalg.norm(endpoint - point)
projection=np.dot(endpoint - startpoint, point-startpoint )
#print(projection,np.linalg.norm(endpoint-startpoint),dstart,dend,dnorm)
if projection < 0 or projection > np.linalg.norm(endpoint-startpoint):
return np.min([dstart,dend])
else:
return dnorm
def findConvexHull(outline):
ret, outline = cv2.threshold(outline, 50, 255, cv2.THRESH_BINARY)
ol = outline[:, :, 0].copy()
im2, contours, hierarchy = cv2.findContours(ol, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
hull = []
# calculate points for each contour
for i in range(len(contours)):
# creating convex hull object for each contour
hull.append(cv2.convexHull(contours[i], True))
for p in contours[i]:
outline[p[0][1], p[0][0], 1] = 255
drawing = np.zeros((outline.shape[0], outline.shape[1], 3), np.uint8)
# draw contours and hull points
for i in range(len(contours)):
color_contours = (0, 255, 0) # green - color for contours
color = (255, 0, 0) # blue - color for convex hull
# draw ith contour
cv2.drawContours(drawing, contours, i, color_contours, 1, 8, hierarchy)
# draw ith convex hull object
cv2.drawContours(drawing, hull, i, color, 1, 8)
def createLineIterator(P1, P2, img):
imageH = img.shape[0]
imageW = img.shape[1]
P1X = P1[0]
P1Y = P1[1]
P2X = P2[0]
P2Y = P2[1]
# print(imageH,imageW,P1X,P1Y,P2X,P2Y)
# difference and absolute difference between points
# used to calculate slope and relative location between points
dX = P2X - P1X
dY = P2Y - P1Y
dXa = np.abs(dX)
dYa = np.abs(dY)
# predefine np array for output based on distance between points
itbuffer = np.empty(shape=(np.maximum(dYa, dXa), 3), dtype=np.float32)
itbuffer.fill(np.nan)
# Obtain coordinates along the line using a form of Bresenham's algorithm
negY = P1Y > P2Y
negX = P1X > P2X
if P1X == P2X: # vertical line segment
itbuffer[:, 0] = P1X
if negY:
itbuffer[:, 1] = np.arange(P1Y - 1, P1Y - dYa - 1, -1)
else:
itbuffer[:, 1] = np.arange(P1Y + 1, P1Y + dYa + 1)
elif P1Y == P2Y: # horizontal line segment
itbuffer[:, 1] = P1Y
if negX:
itbuffer[:, 0] = np.arange(P1X - 1, P1X - dXa - 1, -1)
else:
itbuffer[:, 0] = np.arange(P1X + 1, P1X + dXa + 1)
else: # diagonal line segment
steepSlope = dYa > dXa
if steepSlope:
slope = dX.astype(np.float32) / dY.astype(np.float32)
if negY:
itbuffer[:, 1] = np.arange(P1Y - 1, P1Y - dYa - 1, -1)
else:
itbuffer[:, 1] = np.arange(P1Y + 1, P1Y + dYa + 1)
itbuffer[:, 0] = (slope * (itbuffer[:, 1] - P1Y)).astype(np.int) + P1X
else:
slope = dY.astype(np.float32) / dX.astype(np.float32)
if negX:
itbuffer[:, 0] = np.arange(P1X - 1, P1X - dXa - 1, -1)
else:
itbuffer[:, 0] = np.arange(P1X + 1, P1X + dXa + 1)
itbuffer[:, 1] = (slope * (itbuffer[:, 0] - P1X)).astype(np.int) + P1Y
# Remove points outside of image
colX = itbuffer[:, 0]
colY = itbuffer[:, 1]
itbuffer = itbuffer[(colX >= 0) & (colY >= 0) & (colX < imageW) & (colY < imageH)]
# Get intensities from img ndarray
itbuffer[:, 2] = img[itbuffer[:, 1].astype(np.uint), itbuffer[:, 0].astype(np.uint)]
return itbuffer
def collectLinesOnBoundary(lines,edgepoint,distThres=300):
collectLinesOnBoundary | |
aratio1 = c/a this gives c = aratio1*a
# aratio2 = b/a this gives b = aratio2*a
# volume = 4/3 pi a*b*c for an ellipsoid
vol = 1*aratio1*aratio2
rad_cor = pow(vol,-1./3.)
v[:,2] *= aratio1*rad_cor # make oblate, adjusts z coords
v[:,1] *= aratio2*rad_cor # make elongated in xy plane , adjusts y coords
v[:,0] *= rad_cor # adjusts x coords
# volume should now stay the same
sub_com(v) # subtract center of mass from vertex positions
sbody = pymesh.form_mesh(v, f)
sbody.add_attribute("face_area")
sbody.add_attribute("face_normal")
sbody.add_attribute("face_centroid")
return sbody
# substract the center of mass from a list of vertices
def sub_com(v):
nv = len(v)
xsum = np.sum(v[:,0])
ysum = np.sum(v[:,1])
zsum = np.sum(v[:,2])
xmean = xsum/nv
ymean = ysum/nv
zmean = zsum/nv
v[:,0]-= xmean
v[:,1]-= ymean
v[:,2]-= zmean
# compute surface area by summing area of all facets
# divide by 4pi which is the surface area of a sphere with radius 1
def surface_area(mesh):
#f = mesh.faces
S_i = mesh.get_face_attribute('face_area')
area =np.sum(S_i)
return area/(4*np.pi)
# print number of faces
def nf_mesh(mesh):
f = mesh.faces
print('number of faces ',len(f))
# In[4]:
# meshplot with a bounding box
def plt_mesh(vertices,faces,xmax):
m = np.array([-xmax,-xmax,-xmax])
ma = np.abs(m)
# Corners of the bounding box
v_box = np.array([[m[0], m[1], m[2]], [ma[0], m[1], m[2]], [ma[0], ma[1], m[2]], [m[0], ma[1], m[2]],
[m[0], m[1], ma[2]], [ma[0], m[1], ma[2]], [ma[0], ma[1], ma[2]], [m[0], ma[1], ma[2]]])
# Edges of the bounding box
f_box = np.array([[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7],
[7, 4], [0, 4], [1, 5], [2, 6], [7, 3]], dtype=np.int)
p = meshplot.plot(vertices, faces, return_plot=True) # plot body
p.add_edges(v_box, f_box, shading={"line_color": "red"});
#p.add_points(v_box, shading={"point_color": "green"})
return p
# meshplot with a bounding square
def plt_mesh_square(vertices,faces,xmax):
m = np.array([-xmax,-xmax,-xmax])
ma = np.abs(m)
# Corners of the bounding box
v_box = np.array([[-xmax, -xmax, 0], [-xmax, xmax, 0], [xmax, xmax,0] , [xmax, -xmax, 0]])
# Edges of the bounding box
f_box = np.array([[0, 1], [1, 2], [2, 3], [3, 0]], dtype=np.int)
p = meshplot.plot(vertices, faces, return_plot=True) # plot body
p.add_edges(v_box, f_box, shading={"line_color": "red"});
#p.add_points(v_box, shading={"point_color": "green"})
return p
# perform a rotation on a vertex list and return a new set of rotated vertices
# rotate about axis and via angle in radians
def rotate_vertices(vertices,axis,angle):
qs = pymesh.Quaternion.fromAxisAngle(axis, angle)
v = np.copy(vertices)
nv = len(v)
# loop over all vertices and do two rotations
for i in range(nv):
v[i] = qs.rotate(v[i]) # perform rotation
return v
# In[5]:
# compute the volume of the tetrahedron formed from face with index iface
# and the origin
def vol_i(mesh,iface):
f = mesh.faces
v = mesh.vertices
iv1 = f[iface,0] # indexes of the 3 vertices
iv2 = f[iface,1]
iv3 = f[iface,2]
#print(iv1,iv2,iv3)
v1 = v[iv1] # the 3 vertices
v2 = v[iv2]
v3 = v[iv3]
#print(v1,v2,v3)
mat = np.array([v1,v2,v3])
# the volume is equal to 1/6 determinant of the matrix formed with the three vertices
# https://en.wikipedia.org/wiki/Tetrahedron
#print(mat)
vol = np.linalg.det(mat)/6.0 # compute determinant
return vol
# compute the volume of the mesh by looping over all tetrahedrons formed from the faces
# we assume that the body is convex
def volume_mesh(mesh):
f = mesh.faces
nf = len(f)
vol = 0.0
for iface in range(nf):
vol += vol_i(mesh,iface)
return vol
# if vol equ radius is 1 the volume should be equal to 4*np.pi/3 which is 4.1888
# tests
#vi = vol_i(squannit,1)
#print(vi)
#vtot = volume_mesh(squannit)
#print(vtot)
# correct all the radii so that the volume becomes that of a sphere with radius 1
# return a new mesh
def cor_volume(mesh):
vol = volume_mesh(mesh)
print('Volume {:.4f}'.format(vol))
rad = pow(vol*3/(4*np.pi),1.0/3.0)
print('radius of vol equ sphere {:.4f}'.format(rad))
f = mesh.faces
v = np.copy(mesh.vertices)
v /= rad
newmesh = pymesh.form_mesh(v, f)
newmesh.add_attribute("face_area")
newmesh.add_attribute("face_normal")
newmesh.add_attribute("face_centroid")
vol = volume_mesh(newmesh)
print('new Volume {:.3f}'.format(vol))
return newmesh
# In[6]:
# compute the radiation force instantaneously on a triangular mesh for each facit
# arguments:
# mesh, the body (a triangular surface mesh)
# s_hat is a 3 length np.array (a unit vector) pointing to the Sun
# return the vector F_i for each facet
# returns: F_i_x is the x component of F_i and is a vector that has the length of the number of faces
# Force is zero if facets are not on the day side
def F_i(mesh,s_hat):
s_len = np.sqrt(s_hat[0]**2 + s_hat[1]**2 + s_hat[2]**2) # in case s_hat was not normalized
#nf = len(mesh.faces)
S_i = mesh.get_face_attribute('face_area') # vector of facet areas
f_normal = mesh.get_face_attribute('face_normal') # vector of vector of facet normals
# normal components
nx = np.squeeze(f_normal[:,0]) # a vector, of length number of facets
ny = np.squeeze(f_normal[:,1])
nz = np.squeeze(f_normal[:,2])
# dot product of n_i and s_hat
n_dot_s = (nx*s_hat[0] + ny*s_hat[1] + nz*s_hat[2])/s_len # a vector
F_i_x = -S_i*n_dot_s*nx # a vector, length number of facets
F_i_y = -S_i*n_dot_s*ny
F_i_z = -S_i*n_dot_s*nz
ii = (n_dot_s <0) # the night sides
F_i_x[ii] = 0 # get rid of night sides
F_i_y[ii] = 0
F_i_z[ii] = 0
return F_i_x,F_i_y,F_i_z # these are each vectors for each face
# compute radiation forces F_i for each face, but averaging over all positions of the Sun
# a circular orbit for the asteroid is assumed
# arguments:
# nphi_Sun is the number of solar angles, evenly spaced in 2pi so we are assuming circular orbit
# incl is solar orbit inclination in radians
# returns: F_i_x average and other 2 components of forces for each facet
def F_i_sun_ave(mesh,nphi_Sun,incl):
dphi = 2*np.pi/nphi_Sun
# compute the first set of forces so we have vectors the right length
phi = 0.0
s_hat = np.array([np.cos(phi)*np.cos(incl),np.sin(phi)*np.cos(incl),np.sin(incl)])
# compute the radiation force instantaneously on the triangular mesh for sun at s_hat
F_i_x_sum,F_i_y_sum,F_i_z_sum = F_i(mesh,s_hat)
# now compute the forces for the rest of the solar angles
for i in range(1,nphi_Sun): # do the rest of the angles
phi = i*dphi
s_hat = np.array([np.cos(phi)*np.cos(incl),np.sin(phi)*np.cos(incl),np.sin(incl)])
# compute the radiation force instantaneously on the triangular mesh for sun at s_hat
F_i_x,F_i_y,F_i_z = F_i(mesh,s_hat) # These are vectors of length number of facets
F_i_x_sum += F_i_x # sum up forces
F_i_y_sum += F_i_y
F_i_z_sum += F_i_z
F_i_x_ave = F_i_x_sum/nphi_Sun # average
F_i_y_ave = F_i_y_sum/nphi_Sun
F_i_z_ave = F_i_z_sum/nphi_Sun
return F_i_x_ave,F_i_y_ave,F_i_z_ave # these are vectors for each face
# compute cross product C=AxB using components
def cross_prod_xyz(Ax,Ay,Az,Bx,By,Bz):
Cx = Ay*Bz - Az*By
Cy = Az*Bx - Ax*Bz
Cz = Ax*By - Ay*Bx
return Cx,Cy,Cz
# compute total Yorp torque averaging over nphi_Sun solar positions
# this is at a single body orientation
# a circular orbit is assumed
# arguments:
# mesh: the body
# nphi_Sun is the number of solar angles
# incl is solar orbit inclination in radians
# returns: torque components
def tau_Ys(mesh,nphi_Sun,incl):
# compute F_i for each face, but averaging over all positions of the Sun
F_i_x_ave, F_i_y_ave,F_i_z_ave = F_i_sun_ave(mesh,nphi_Sun,incl)
r_i = mesh.get_face_attribute("face_centroid") # radii to each facet
rx = np.squeeze(r_i[:,0]) # radius of centroid from center of mass
ry = np.squeeze(r_i[:,1]) # these are vectors, length number of faces
rz = np.squeeze(r_i[:,2])
# cross product works on vectors
tau_i_x,tau_i_y,tau_i_z = cross_prod_xyz(rx,ry,rz,F_i_x_ave,F_i_y_ave,F_i_z_ave)
#This is the torque from each day lit facet
tau_x = np.sum(tau_i_x) # sum up forces from all faces
tau_y = np.sum(tau_i_y)
tau_z = np.sum(tau_i_z)
return tau_x,tau_y,tau_z # these are numbers for torque components
# compute total BYORP averaging over nphi_Sun solar positions
# for a single binary vector a_bin and body position described with mesh
# arguments:
# incl is solar orbit inclination in radians
# nphi_Sun is the number of solar angles
# returns: torque components
def tau_Bs(mesh,nphi_Sun,incl,a_bin):
# compute F_i for each face, but averaging over all positions of the Sun
F_i_x_ave, F_i_y_ave,F_i_z_ave = F_i_sun_ave(mesh,nphi_Sun,incl) # these are vectors length number of faces
# forces from day lit faces
F_x = np.sum(F_i_x_ave) #sum up the force
F_y = np.sum(F_i_y_ave)
F_z = np.sum(F_i_z_ave)
a_x = a_bin[0] # binary direction
a_y = a_bin[1]
a_z = a_bin[2]
tau_x,tau_y,tau_z = cross_prod_xyz(a_x,a_y,a_z,F_x,F_y,F_z) # cross product
return tau_x,tau_y,tau_z # these are numbers that give the torque components
# In[7]:
# first rotate vertices in the mesh about the z axis | |
#Copyright (c) Microchip. All rights reserved.
#Import needed python packages
import sys
import time #For delays and getting current time
import smbus
from array import array #For handling samples
import statistics #For signal filtration
import MAX30100_definitions as MAX30100 #MAX30100 registers definitions
#Python SMBUS for I2C connection to click board
SMBUS_ID = 2 #Bus ID (1 or 2)
bus = smbus.SMBus(SMBUS_ID) #Bus on which click board is connected
#Define needed variables
ALPHA = 0.95 #For DC filter
dcw = 0
old_dcw = 0
bw_filter_val0 = 0
bw_filter_val1 = 0
index = 0
SAMPLES_BUFFER_SIZE = 500
samples = []
timestamps = []
bpm = []
peaks = 0
peak_det = 0
peak0_timestamp = 0
peak1_timestamp = 0
BPM_MAGIC_THRESH = 40 #Magic threshold for counting a pulse as a peak
RET_ERROR = -1
RET_SUCCESS = 0
#Each sample is 4 bytes; 0,1 for IR LED and 2,3 for red LED
class SAMPLE:
def __init__(self):
self.ir = -1
self.red = -1
class HEART_RATE:
def __init__(self):
self.beats = []
self.bpm = 0
#Reporting error handler
#For INTERNAL USE by the module and shouldn't be exported.
def report_error(err):
print ("ERR: " + err)
#Read status register
#For INTERNAL USE by the module and shouldn't be exported.
def get_status():
status = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.INT_STATUS)
return status
#Read MAX30100 FW version
#For INTERNAL USE by the module and shouldn't be exported.
def get_revision_ID():
rev_id = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.REVISION_ID)
return rev_id
#Read MAX30100 part ID
#For INTERNAL USE by the module and shouldn't be exported.
def get_part_ID():
part_id = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.PART_ID)
return part_id
#Reset
#For INTERNAL USE by the module and shouldn't be exported.
def reset():
timeout = 10
mode_config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.MODE_CONFIG)
mode_config = mode_config | (1 << 6)
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.MODE_CONFIG, mode_config)
while timeout:
mode_config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.MODE_CONFIG)
if (mode_config & 0x40) == 0:
break
else:
time.sleep(0.01)
timeout -= 1
if timeout == 0:
report_error("board reset")
return RET_ERROR
time.sleep(0.05)
return RET_SUCCESS
#initialize
#For INTERNAL USE by the module and shouldn't be exported.
def initialize():
timeout = 20
config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.MODE_CONFIG)
config = (config & ~0x07) | MAX30100.TEMP_EN
config = (config & ~0x07) | MAX30100.SPO2_EN
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.MODE_CONFIG, config)
config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.SPO2_CONFIG)
#config |= MAX30100.SPO2_HI_RES_EN
config |= MAX30100.SAMPLES_100
config |= MAX30100.PULSE_WIDTH_1600
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.SPO2_CONFIG, config)
config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.LED_CONFIG)
config |= MAX30100.IR_CURRENT_500
config |= MAX30100.RED_CURRENT_500
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.LED_CONFIG, config)
config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.INT_ENABLE)
config |= MAX30100.ENA_A_FULL
config |= MAX30100.ENA_HR
#config |= MAX30100.ENA_SO2
config |= MAX30100.ENA_TEMP
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.INT_ENABLE, config)
while timeout:
status = get_status()
if (status & MAX30100.A_FULL) == MAX30100.A_FULL:
break
else:
time.sleep(0.01)
timeout -= 1
if timeout == 0:
report_error("board init")
return RET_ERROR
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.FIFO_WRITE_PTR, 0)
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.OVER_FLOW_CNT, 0)
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.FIFO_READ_PTR, 0)
return RET_SUCCESS
#Remove DC component from a sample
#For INTERNAL USE by the module and shouldn't be exported.
def dc_remove(sample) :
global dcw
global old_dcw
dcw = sample + ALPHA*old_dcw
sample_dc_filtered = dcw - old_dcw
old_dcw = dcw
return sample_dc_filtered
#Low pass Butterworth filter
#For INTERNAL USE by the module and shouldn't be exported.
def low_pass_butterworth_filter(sample) :
global bw_filter_val0
global bw_filter_val1
bw_filter_val0 = bw_filter_val1
#Fs = 100Hz and Fc = 10Hz
bw_filter_val1 = (2.452372752527856026e-1 * sample) + (0.50952544949442879485 * bw_filter_val0)
sample_bw_filtered = bw_filter_val0 + bw_filter_val1
return sample_bw_filtered
#Locate the peaks
#For INTERNAL USE by the module and shouldn't be exported.
def peak_detect(samples_arr) :
peaks = 0
peak_det = 0
peaks_idxs = []
i = 1
j = 0
while i < len(samples_arr):
curr = samples_arr[i]
prev = samples_arr[i-1]
#print (str(curr))
if curr > BPM_MAGIC_THRESH:
if curr < prev:
if peak_det == 0:
peak_det = 1
peaks += 1
peaks_idxs.append(i)
elif curr < 0:
peak_det = 0
i += 1
return peaks_idxs
def process_peaks() :
global samples
global timestamps
global peaks
global peak_det
global peak0_timestamp
global peak1_timestamp
curr_bpm = 0
i = 1
while i < len(samples):
if samples[i] > BPM_MAGIC_THRESH:
if samples[i] < samples[i-1]:
if peak_det == 0:
peak_det = 1
if peaks == 0:
peak0_timestamp = timestamps[i-1]
elif peaks == 1:
peak1_timestamp = timestamps[i-1]
peaks += 1
elif samples[i] < 0:
peak_det = 0
if peaks == 2:
diff = peak1_timestamp - peak0_timestamp
if diff != 0:
curr_bpm = 60000/diff
print (">>>> " + str(curr_bpm) + " .. " + str(len(bpm)))
peaks = 1
peak0_timestamp = peak1_timestamp
i += 1
if len(bpm) == 10:
bpm.pop(0)
bpm.append(curr_bpm)
#Set Red LED current
#Input:
# Value 0 -> 15
# Size: 4 bits
# Mask [RED_CURRENT_MASK]: 0Xxxxx0000
#For INTERNAL USE by the module and shouldn't be exported.
def set_red_led_current(level):
''' - Set red LED current and hence intensity level
- "level" value should vary between 0 (lowest) and 15 (highest)
- Red led current affects measurement of SPO2 in SPO2 mode'''
if level < (MAX30100.RED_CURRENT_0 >> 4) or level > (MAX30100.RED_CURRENT_500 >> 4):
report_error("Red LED level set")
return RET_ERROR
else:
led_config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.LED_CONFIG)
led_config &= 0x0F
led_config |= level << 4
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.LED_CONFIG, led_config)
return RET_SUCCESS
#Set IR LED current
#Input:
# Value 0 -> 15
# Size: 4 bits
# Mask [IR_CURRENT_MASK]: 0X0000xxxx
#For INTERNAL USE by the module and shouldn't be exported.
def set_ir_led_current(level):
if level < MAX30100.IR_CURRENT_0 or level > MAX30100.IR_CURRENT_500:
report_error("IR LED level set")
return RET_ERROR
else:
led_config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.LED_CONFIG)
led_config &= 0xF0
led_config |= level
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.LED_CONFIG, led_config)
return RET_SUCCESS
#Set SPO2 sampling rate
#Input:
# Value: Sampling rate value 0 -> 7
# Size: 3 bits
# Mask [SAMPLES_MASK]: 0X000xxx00
#For INTERNAL USE by the module and shouldn't be exported.
def set_spo2_sr(sr):
if sr < (MAX30100.SAMPLES_50 >> 2) or sr > (MAX30100.SAMPLES_1000 >> 2):
report_error("SPO2 sampling rate set")
return RET_ERROR
else:
spo2_config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.SPO2_CONFIG)
spo2_config |= sr << 2
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.SPO2_CONFIG, spo2_config)
return RET_SUCCESS
#Set LED (RED and IR) pulse width
#Input:
# Value: Pulse width value 0 -> 3
# Size: 2 bits
# Mask [PULSE_WIDTH_MASK]: 0X000000xx
#For INTERNAL USE by the module and shouldn't be exported.
def set_led_pw(pw):
if pw < MAX30100.PULSE_WIDTH_200 or pw > MAX30100.PULSE_WIDTH_1600:
report_error("LED pulse width set")
return RET_ERROR
else:
spo2_config = bus.read_byte_data(MAX30100.I2C_ADR, MAX30100.SPO2_CONFIG)
spo2_config |= pw
bus.write_byte_data(MAX30100.I2C_ADR, MAX30100.SPO2_CONFIG, spo2_config)
return RET_SUCCESS
#Function returns single IR reading for HR calculation.
#Returned data to be filtered first before using it.
#For INTERNAL USE by the module and shouldn't be exported.
def get_ir_readings():
timeout = 50
sample = SAMPLE()
while timeout:
status = get_status()
if (status & MAX30100.HR_RDY) == MAX30100.HR_RDY:
break
else:
time.sleep(0.01)
timeout -= 1
if timeout == 0:
report_error("HR read")
return sample
data = bus.read_i2c_block_data(MAX30100.I2C_ADR, MAX30100.FIFO_DATA_REG, 4)
sample.ir = data[0] << 8 | data[1]
return sample.ir
#Get beats readings for plotting purposes
#Function needs to wait at least one second to have enough samples for filtering purposes
def get_beats():
''' - Return an array of heart rate readings for plotting purposes
- You need to place your finger firmly on the sensor
- take care as sensor reading is very sensitive to finger movements
- You need to wait for 1 second for the function to collect enough samples
- You have to call the function continuously for more reliable signal '''
global samples
global timestamps
t0 = time.time()
t1 = t0
acquisition_time = 1 #1 second
#Clear samples and timestamps history
del samples[:]
del timestamps[:]
#Acquire samples for acquisition_time seconds
while t1 < (t0 + acquisition_time):
samples.append(get_ir_readings())
timestamps.append(int(round(time.time() * 1000)))
t1 = time.time()
print ("Number of samples: " + str(len(samples)))
#Pass signal through DC filter
i = 0
while i < len(samples):
samples[i] = dc_remove(samples[i])
i += 1
#Calculate signal medium
ir_median = statistics.median(samples)
#Pass signal through median diff filter and then butterworth filter
i = 0
while i < len(samples):
samples[i] = ir_median - samples[i]
samples[i] = low_pass_butterworth_filter(samples[i])
i += 1
#Return clean (filtered) signal
return samples
#Calculate BPM
#Function needs to wait at least 10 seconds to get more accurate BPM
def calculate_bpm() :
''' - Calculate heart rate as BPM (bit per minute)
- You need to place your finger firmly on the sensor
- take care as sensor reading is very sensitive to finger movements
- You need to wait for 15 seconds for the function to collect enough samples'''
#List of BPMs acting as | |
'Wonderboom Airport', 'South Africa'],
['PRZ', 'Prineville (OR)', 'Prineville', 'USA'],
['PSA', 'Florence', 'Galileo Galilei', 'Italy'],
['PSB', 'Philipsburg (PA)', 'Mid-State', 'USA'],
['PSC', 'Pasco (WA)', 'Tri-Cities', 'USA'],
['PSD', 'Port Said', 'Port Said', 'Egypt'],
['PSE', 'Ponce', 'Mercedita', 'Puerto Rico'],
['PSF', 'Pittsfield (MA)', 'Pittsfield', 'USA'],
['PSG', 'Petersburg (AK)', 'Municipal', 'USA'],
['PSH', 'St Peter', 'St Peter', 'Germany'],
['PSI', 'Pasni', 'Pasni', 'Pakistan'],
['PSJ', 'Poso', 'Poso', 'Indonesia'],
['PSK', 'Dublin (VA)', 'New River Valley', 'USA'],
['PSL', 'Perth', 'Perth', 'Scotland, UK'],
['PSM', 'Portsmouth (NH)', 'Pease AFB', 'USA'],
['PSN', 'Palestine (TX)', 'Palestine', 'USA'],
['PSO', 'Pasto', 'Cano', 'Colombia'],
['PSP', 'Palm Springs (CA) ,', 'Municipal', 'USA'],
['PSQ', 'Philadelphia (PA)', 'PB', 'USA'],
['PSR', 'Pescara', 'Liberi', 'Italy'],
['PSS', 'Posadas', 'Posadas', 'Argentina'],
['PST', 'Preston', 'Preston', 'Cuba'],
['PSU', 'Putussibau', 'Putussibau', 'Indonesia'],
['PSV', 'Papa Stour', 'Papa Stour', 'United Kingdom'],
['PSW', 'Passos', 'Passos', 'Brazil'],
['PSX', 'Palacios (TX)', 'Palacios', 'USA'],
['PSY', 'Port Stanley', 'Port Stanley', 'Falkland Islands'],
['PSZ', '<NAME>', 'Puerto Suarez', 'Bolivia'],
['PTA', 'Port Alsworth (AK)', 'Port Alsworth', 'USA'],
['PTB', 'Petersburg (VA)', 'Municipal', 'USA'],
['PTC', 'Port Alice (AK)', 'Port Alice', 'USA'],
['PTD', 'Port Alexander (AK)', 'Port Alexander', 'USA'],
['PTE', 'Port Stephens', 'Port Stephens', 'Australia'],
['PTF', 'Malololailai', 'Malololailai', 'Fiji'],
['PTG', 'Pietersburg', 'Pietersburg', 'South Africa'],
['PTH', 'Port Heiden (AK)', 'Port Heiden', 'USA'],
['PTI', 'Port Douglas', 'Port Douglas', 'Australia'],
['PTJ', 'Portland', 'Portland', 'Australia'],
['PTK', 'Pontiac (MI)', 'Pontiac', 'USA'],
['PTL', 'Port Armstrong (AK)', 'Port Armstrong', 'USA'],
['PTM', 'Palmarito', 'Palmarito', 'Venezuela'],
['PTN', 'Patterson (LA)', 'Williams Memorial', 'USA'],
['PTO', 'Pato Branco', 'Municipal', 'Brazil'],
['PTP', 'Pointe-a-Pitre', 'Le Raizet', 'Guadeloupe'],
['PTQ', 'Porto de Moz', 'Porto de Moz', 'Brazil'],
['PTR', 'Pleasant Harbour (AK)', 'Pleasant Harbour', 'USA'],
['PTS', 'Pittsburg (KS)', 'Municipal Airport', 'USA'],
['PTT', 'Pratt (KS)', 'Pratt', 'USA'],
['PTU', 'Platinum (AK)', 'Platinum', 'USA'],
['PTV', 'Porterville (CA)', 'Porterville', 'USA'],
['PTW', 'Pottstown (PA)', 'Pottstown/Limerick', 'USA'],
['PTX', 'Pitalito', 'Pitalito', 'Colombia'],
['PTY', 'Panama City', 'Tocumen Intl Airport', 'Panama'],
['PTZ', 'Pastaza', 'Pastaza', 'Ecuador'],
['PUA', 'Puas', 'Puas', 'Papua New Guinea'],
['PUB', 'Pueblo (CO)', 'Memorial', 'USA'],
['PUC', 'Price (UT)', 'Carbon County', 'USA'],
['PUD', 'Puerto Deseado', 'Puerto Deseado', 'Argentina'],
['PUE', 'Puerto Obaldia', 'Puerto Obaldia', 'Panama'],
['PUF', 'Pau', 'Uzein', 'France'],
['PUG', 'Port Augusta', 'Port Augusta', 'Australia'],
['PUH', 'Pochutla', 'Pochutla', 'Mexico'],
['PUI', 'Pureni', 'Pureni', 'Papua New Guinea'],
['PUJ', 'Punta Cana', 'Punta Cana', 'Dominican Republic'],
['PUK', 'Pukarua', 'Pukarua', 'French Polynesia'],
['PUL', 'Poulsbo (WA)', 'Poulsbo', 'USA'],
['PUM', 'Pomala', 'Pomala', 'Indonesia'],
['PUN', 'Punia', 'Punia', 'Congo (DRC)'],
['PUO', 'Prudhoe Bay (AK)', 'Prudhoe Bay', 'USA'],
['PUP', 'Po', 'Po', 'Burkina Faso'],
['PUQ', 'Punta Arenas', 'Pres Ibanez', 'Chile'],
['PUR', 'Puerto Rico', 'Puerto Rico', 'Bolivia'],
['PUS', 'Pusan', 'Kimhae', 'South Korea'],
['PUT', 'Puttaparthi', 'Puttaprathe', 'India'],
['PUU', 'Puerto Asis', 'Puerto Asis', 'Colombia'],
['PUV', 'Poum', 'Poum', 'New Caledonia'],
['PUW', 'Pullman (WA)', 'Moscow Regional', 'USA'],
['PUX', '<NAME>', 'Puerto Varas', 'Chile'],
['PUY', 'Pula', 'Pula', 'Croatia'],
['PUZ', '<NAME>', 'Puerto Cabezas', 'Nicaragua'],
['PVA', 'Providencia', 'Providencia', 'Colombia'],
['PVC', 'Provincetown (MA)', 'Provincetown', 'USA'],
['PVD', 'Providence (RI)', 'Theodore Francis', 'USA'],
['PVE', 'El Porvenir', 'El Porvenir', 'Panama'],
['PVF', 'Placerville (CA)', 'Placerville', 'USA'],
['PVG', 'Shanghai', 'Pu Dong', 'PR China'],
['PVH', 'Porto Velho', 'Porto Velho International', 'Brazil'],
['PVI', 'Paranavai', 'Paranavai', 'Brazil'],
['PVK', 'Preveza/Lefkas', 'Aktion', 'Greece'],
['PVN', 'Pleven', 'Pleven', 'Bulgaria'],
['PVO', 'Portoviejo', 'Portoviejo', 'Ecuador'],
['PVR', 'Puerto Vallarta', 'Ordaz', 'Mexico'],
['PVS', 'Provideniya', 'Provideniya', 'Russia'],
['PVU', 'Provo (UT)', 'Provo', 'USA'],
['PVW', 'Plainview (TX)', 'Hale County', 'USA'],
['PVX', 'Provedenia', 'Provedenia', 'Russia'],
['PVY', 'Pope Vanoy (AK)', 'Pope Vanoy', 'USA'],
['PVZ', 'Painesville (OH)', 'Casement', 'USA'],
['PWA', 'Oklahoma City (OK)', 'Wiley Post', 'USA'],
['PWD', 'Plentywood (MT)', 'herwood', 'USA'],
['PWE', 'Pevek', 'Pevek', 'Russia'],
['PWI', 'Pawi', 'Beles', 'Ethiopia'],
['PWK', 'Chicago (IL)', 'Pal-Waukee', 'USA'],
['PWL', 'Purwokerto', 'Purwokerto', 'Indonesia'],
['PWM', 'Portland (ME)', 'International Jetport', 'USA'],
['PWN', 'Pitts Town', 'Pitts Town', 'Bahamas'],
['PWO', 'Pweto', 'Pweto', 'Congo (DRC)'],
['PWQ', 'Pavlodar', 'Pavlodar', 'Kazakhstan'],
['PWR', 'Port Walter (AK)', 'Port Walter', 'USA'],
['PWT', 'Bremerton (WA)', 'Bremerton', 'USA'],
['PXL', 'Polacca (AZ)', 'Polacca', 'USA'],
['PXM', 'Puerto Escondido', 'Puerto Escondido', 'Mexico'],
['PXO', 'Porto Santo', 'Porto Santo', 'Portugal'],
['PXU', 'Pleiku', 'Pleiku', 'Vietnam'],
['PYA', 'Puerto Boyaca', 'Puerto Boyaca', 'Colombia'],
['PYB', 'Jeypore', 'Jeypore', 'India'],
['PYC', 'Playon Chico', 'Playon Chico', 'Panama'],
['PYE', 'Penrhyn Island', 'Penrhyn Island', 'Cook Islands'],
['PYH', 'Puerto Ayacucho', 'Puerto Ayacucho', 'Venezuela'],
['PYJ', 'Polyarnyj', 'Polyarnyj', 'Russia'],
['PYL', 'Perry Island (AK)', 'Perry SPB', 'USA'],
['PYM', 'Plymouth (MA)', 'Plymouth', 'USA'],
['PYN', 'Payan', 'Payan', 'Colombia'],
['PYO', 'Putumayo', 'Putumayo', 'Ecuador'],
['PYR', 'Pyrgos', 'Andravida', 'Greece'],
['PYV', 'Yaviza', 'Yaviza', 'Panama'],
['PYX', 'Pattaya', 'Pattaya', 'Thailand'],
['PZA', 'Paz De Ariporo', 'Casanare', 'Colombia'],
['PZB', 'Pietermaritzburg', 'Pietermaritzburg', 'South Africa'],
['PZE', 'Penzance', 'Penzance', 'United Kingdom'],
['PZH', 'Zhob', 'Zhob', 'Pakistan'],
['PZK', 'Puka Puka Island', 'Puka Puka Island/Attol', 'Cook Islands'],
['PZL', 'Phinda', 'Zulu Inyala', 'South Africa'],
['PZO', 'Puerto Ordaz', 'Puerto Ordaz', 'Venezuela'],
['PZU', 'Port Sudan', 'Port Sudan New International Airport', 'Sudan'],
['PZY', 'Piestany', 'Piestany', 'Slovakia'],
['QAJ', 'Ajman', 'Ajman International Airport (under construction)', 'United Arab Emirates'],
['QAM', 'Amiens', 'Glisy Aerodrome', 'France'],
['QAQ', "L'Aquila", "L'Aquila–Preturo Airport", 'Italy'],
['QBC', 'Bella Coola', 'Bella Coola', 'Canada'],
['QBQ', 'Besançon', 'Besançon-La Vèze Airport', 'France'],
['QCY', 'Coningsby', 'RAF Station', 'United Kingdom'],
['QDM', '<NAME>', 'Shek Mun Station', 'Hong Kong, PR China'],
['QGV', 'Frankfurt', 'Neu Isenburg', 'Germany'],
['QHU', 'Husum', 'Husum Schwesing Airport', 'Germany'],
['QLR', 'Monte Real', 'Monte Real AB', 'Portugal'],
['QLX', 'Lauterach', 'Lauterach', 'Austria'],
['QMM', '<NAME>', '<NAME>', 'Italy'],
['QMQ', 'Murzuq', 'Murzuq', 'Libya'],
['QMV', 'Montvale (NJ)', 'Montvale', 'USA'],
['QNY', 'New York (NY)', 'Marine Air Terminal', 'USA'],
['QOW', 'Owerri', 'Sam Mbakwe Airport', 'Nigeria'],
['QPG', 'Singapore', 'Paya Lebar AB', 'Singapore'],
['QRA', 'Johannesburg', 'Rand Germiston Airport', 'South Africa'],
['QRO', 'Queretaro', 'Querétaro Intercontinental Airport', 'Mexico'],
['QRW', 'Warri', 'Warri Airport', 'Nigeria'],
['QSC', 'São Carlos', 'São Carlos Airport', 'Brazil'],
['QSF', 'Setif', 'Ain Arnat Airport', 'Algeria'],
['QSG', 'Sonderborg', 'Sonderborg', 'Denmark'],
['QSR', 'Salerno', 'Salerno Airport', 'Italy'],
['QSZ', 'Yarkant County', 'Shache Airport', 'Xinjiang, China'],
['QUB', 'Ubari', 'Ubari', 'Libya'],
['QUF', 'Tallinn', 'Pirita Harbour', 'Estonia'],
['QUG', 'Chichester', 'Goodwood Aerodrome', 'United Kingdom'],
['QUY', 'Wyton', 'RAF Station', 'United Kingdom'],
['QWG', 'Charlotte (NC)', 'Wilgrove Air Park', 'USA'],
['QXB', 'Aix-en-Provence', 'Aix-en-Provence Aerodrome', 'France'],
['QYW', 'Cannes', 'Vieux Port', 'France'],
['RAA', 'Rakanda', 'Rakanda', 'Papua New Guinea'],
['RAB', 'Rabaul', 'Tokua Airport', 'Papua New Guinea'],
['RAC', 'Racine (WI)', 'Horlick', 'USA'],
['RAD', 'Tortola', 'Road Town', 'British Virgin Islands'],
['RAE', 'Arar', 'Arar', 'Saudi Arabia'],
['RAG', 'Raglan', 'Raglan', 'New Zealand'],
['RAH', 'Rafha', 'Rafha', 'Saudi Arabia'],
['RAI', 'Praia', 'Francisco Mendes Airport', 'Cape Verde'],
['RAJ', 'Rajkot', 'Rajkot Civil Airport', 'India'],
['RAK', 'Marrakesh', 'Menara Airport', 'Morocco'],
['RAL', 'Riverside (CA)', 'Riverside Municipal', 'USA'],
['RAM', 'Ramingining', 'Ramingining', 'Australia'],
['RAN', 'Ravenna', 'La Spreta Airport', 'Italy'],
['RAO', 'Ribeirao Preto', 'Leite Lopes', 'Brazil'],
['RAP', 'Rapid City (SD)', 'Regional Airport', 'USA'],
['RAQ', 'Raha', 'Sugimanuru', 'Indonesia'],
['RAR', 'Rarotonga', 'Rarotonga', 'Cook Islands'],
['RAS', 'Rasht', 'Rasht', 'Iran'],
['RAT', 'Raduzhnyi', 'Raduzhnyi', 'Russia'],
['RAU', 'Rangpur', 'Rangpur', 'Bangladesh'],
['RAV', 'Cravo Norte', 'Cravo Norte', 'Colombia'],
['RAW', 'Arawa', 'Arawa', 'Papua New Guinea'],
['RAX', 'Oram', 'Oram', 'Papua New Guinea'],
['RAY', 'Rothesay', 'Rothesay Heliport', 'United Kingdom'],
['RAZ', 'Rawala Kot', 'Rawala Kot', 'Pakistan'],
['RBA', 'Rabat', 'Sale Airport', 'Morocco'],
['RBB', 'Borba', 'Borba', 'Brazil'],
['RBC', 'Robinvale', 'Robinvale', 'Australia'],
['RBD', 'Dallas/Fort Worth (TX)', 'Redbird', 'USA'],
['RBE', 'Ratanakiri', 'Ratanakiri', 'Cambodia'],
['RBF', 'Big Bear (CA)', 'Big Bear City Airport', 'USA'],
['RBG', 'Roseburg (OR)', 'Municipal Airport', 'USA'],
['RBH', 'Brooks Lodge (AK)', 'Brooks Lodge', 'USA'],
['RBI', 'Rabi', 'Rabi', 'Fiji'],
['RBJ', 'Rebun', 'Rebun', 'Japan'],
['RBK', 'Rancho (CA)', 'French Valley', 'USA'],
['RBL', 'Red Bluff (CA)', 'Red Bluff Fss', 'USA'],
['RBM', 'Straubing', 'Wallmuhle', 'Germany'],
['RBN', 'Fort Jefferson (FL)', 'Fort Jefferson', 'USA'],
['RBO', 'Robore', 'Robore', 'Bolivia'],
['RBP', 'Rabaraba', 'Rabaraba', 'Papua New Guinea'],
['RBQ', 'Rurrenabaque', 'Rurrenabaque', 'Bolivia'],
['RBR', 'Rio Branco', 'Plácido de Castro International Airport', 'Brazil'],
['RBS', 'Orbost', 'Orbost', 'Australia'],
['RBT', 'Marsabit', 'Marsabit', 'Kenya'],
['RBU', 'Roebourne', 'Roebourne', 'Australia'],
['RBV', 'Ramata', 'Ramata', 'Solomon Islands'],
['RBW', 'Walterboro (SC)', 'Municipal Airport', 'USA'],
['RBY', 'Ruby (AK)', 'Ruby', 'USA'],
['RCA', 'Rapid City (SD)', 'Ellsworth AFB', 'USA'],
['RCB', 'Richards Bay', 'Richards Bay', 'South Africa'],
['RCE', 'Roche Harbor (WA)', 'Roche Harbor', 'USA'],
['RCH', 'Riohacha', 'Riohacha', 'Colombia'],
['RCK', 'Rockdale (TX)', 'Coffield', 'USA'],
['RCL', 'Redcliffe', 'Redcliffe', 'Vanuatu'],
['RCM', 'Richmond', 'Richmond', 'Australia'],
['RCN', 'American River', 'American River', 'Australia'],
['RCO', 'Rochefort', 'Saint Agnant', 'France'],
['RCQ', 'Reconquista', 'Reconquista', 'Argentina'],
['RCR', 'Rochester (IN)', 'Fulton County', 'USA'],
['RCS', 'Rochester', 'Rochester', 'United Kingdom'],
['RCT', 'Reed City (MI)', 'Miller Field', 'USA'],
['RCU', 'Rio Cuarto', 'Rio Cuarto', 'Argentina'],
['RCY', 'Rum Cay', 'Rum Cay', 'Bahamas'],
['RDA', 'Rockhampton Downs', 'Rockhampton Downs', 'Australia'],
['RDB', 'Red Dog (AK)', 'Red Dog', 'USA'],
['RDC', 'Redencao', 'Redencao', 'Brazil'],
['RDD', 'Redding (CA)', 'Redding', 'USA'],
['RDE', 'Merdey', 'Merdey', 'Indonesia'],
['RDG', 'Reading (PA)', 'Municipal/Spaatz Fld', 'USA'],
['RDM', 'Redmond (OR)', 'Roberts Field', 'USA'],
['RDR', 'Red River (ND)', 'Grand Forks AFB', 'USA'],
['RDS', 'Rincon de los Sauces', 'Rincon de Los Sauces', 'Argentina'],
['RDT', '<NAME>', '<NAME>', 'Senegal'],
['RDU', 'Raleigh/Durham (NC)', 'Durham/Raleigh Airport', 'USA'],
['RDV', 'Red Devil (AK)', 'Red Devil', 'USA'],
['RDZ', 'Rodez', 'Marcillac', 'France'],
['REA', 'Reao', 'Reao', 'French Polynesia'],
['REB', 'Rechlin', 'Rechlin', 'Germany'],
['REC', 'Recife', 'Guararapes-Gilberto Freyre International Airport', 'Brazil'],
['RED', 'Reedsville (PA)', 'Mifflin County', 'USA'],
['REE', 'Lubbock (TX)', 'Reese AFB', 'USA'],
['REG', '<NAME>', 'Tito Menniti', 'Italy'],
['REH', 'Rehoboth Beach (DE)', 'Rehoboth Beach', 'USA'],
['REI', 'Regina', 'Regina', 'French Guiana'],
['REK', 'Reykjavik', 'Metropolitan Area', 'Iceland'],
['REL', 'Trelew', 'Almirante Marcos A. Zar Airport', 'Argentina'],
['REN', 'Orenburg', 'Orenburg', 'Russia'],
['REO', 'Rome (OR)', 'State', 'USA'],
['REP', 'Siem Reap', 'Siem Reap-Angkor International Airport', 'Cambodia'],
['RER', 'Retalhuleu', 'Base Aerea Del Sur', 'Guatemala'],
['RES', 'Resistencia', 'Resistencia', 'Argentina'],
['RET', 'Rost', 'Stolport', 'Norway'],
['REU', 'Reus', 'Reus', 'Spain'],
['REW', 'Rewa', 'Rewa', 'India'],
['REX', 'Reynosa', 'Gen. Lucio Blanco', 'Mexico'],
['REY', 'Reyes', 'Reyes', 'Bolivia'],
['REZ', 'Resende', 'Resende', 'Brazil'],
['RFA', 'Rafai', 'Rafai', 'Central African Republic'],
['RFD', 'Rockford (IL)', 'Greater Rockford', 'USA'],
['RFG', 'Refugio (TX)', 'Rooke Field', 'USA'],
['RFK', 'Anguilla (MS)', 'Rollang Field', 'USA'],
['RFN', 'Raufarhofn', 'Raufarhofn', 'Iceland'],
['RFP', 'Raiatea', 'Raiatea', 'French Polynesia'],
['RFR', 'Rio Frio', 'Rio Frio', 'Costa Rica'],
['RFS', 'Rosita', 'Rosita', 'Nicaragua'],
['RGA', 'Rio Grande', 'Rio Grande', 'Argentina'],
['RGE', 'Porgera', 'Porgera', 'Papua New Guinea'],
['RGH', 'Balurghat', 'Balurghat', 'India'],
['RGI', 'Rangiroa', 'Rangiroa', 'French Polynesia'],
['RGL', 'Rio Gallegos', 'Internacional', 'Argentina'],
['RGN', 'Yangon', 'Mingaladon', 'Myanmar'],
['RGR', 'Ranger (TX)', 'Ranger Municipal', 'USA'],
['RGT', 'Rengat', 'Japura', 'Indonesia'],
['RHA', 'Reykholar', 'Reykholar', 'Iceland'],
['RHD', '<NAME>', 'Rio Hondo', 'Argentina'],
['RHE', 'Reims', 'Reims', 'France'],
['RHG', 'Ruhengeri', 'Ruhengeri', 'Rwanda'],
['RHI', 'Rhinelander (WI)', 'Oneida County', 'USA'],
['RHL', 'Roy Hill', 'Roy Hill', 'Australia'],
['RHO', 'Rhodes', 'Diagoras Airport', 'Greece'],
['RHP', 'Ramechhap', 'Ramechhap', 'Nepal'],
['RHV', 'San | |
<reponame>renefritze/mpi4py
# Author: <NAME>
# Contact: <EMAIL>
"""Pickle-based communication using protocol 5."""
import sys as _sys
import struct as _struct
from .. import MPI
from ..MPI import (
PROC_NULL,
ANY_SOURCE,
ANY_TAG,
Status,
)
from ..MPI import (
_typedict,
_comm_lock,
_commctx_inter,
memory as _memory,
Pickle as _Pickle,
)
if _sys.version_info >= (3, 8):
from pickle import (
dumps as _dumps,
loads as _loads,
HIGHEST_PROTOCOL as _PROTOCOL,
)
else: # pragma: no cover
try:
from pickle5 import (
dumps as _dumps,
loads as _loads,
HIGHEST_PROTOCOL as _PROTOCOL,
)
except ImportError:
_PROTOCOL = MPI.Pickle().PROTOCOL
def _dumps(obj, *_p, **_kw):
return MPI.pickle.dumps(obj)
def _loads(buf, *_p, **_kw):
return MPI.pickle.loads(buf)
def _buffer_handler(protocol, threshold):
bufs = []
if protocol is None or protocol < 0:
protocol = _PROTOCOL
if protocol < 5:
return bufs, None
buffer_len = len
buffer_raw = _memory
buffer_add = bufs.append
def buf_cb(buf):
buf = buffer_raw(buf)
if buffer_len(buf) >= threshold:
buffer_add(buf)
return False
return True
return bufs, buf_cb
class Pickle(_Pickle):
"""Pickle/unpickle Python objects using out-of-band buffers."""
THRESHOLD = 1024**2 // 4 # 0.25 MiB
def __init__(self, dumps=_dumps, loads=_loads, protocol=_PROTOCOL):
"""Initialize pickle context."""
# pylint: disable=useless-super-delegation
super().__init__(dumps, loads, protocol)
def dumps(self, obj):
"""Serialize object to data and out-of-band buffers."""
bufs, buf_cb = _buffer_handler(self.PROTOCOL, self.THRESHOLD)
data = super().dumps(obj, buf_cb)
return data, bufs
def loads(self, data, bufs):
"""Deserialize object from data and out-of-band buffers."""
# pylint: disable=useless-super-delegation
return super().loads(data, bufs)
pickle = Pickle()
def _bigmpi_create_type(basetype, count, blocksize):
qsize, rsize = divmod(count, blocksize)
qtype = basetype.Create_vector(
qsize, blocksize, blocksize)
rtype = basetype.Create_contiguous(rsize)
rdisp = qtype.Get_extent()[1]
bigtype = MPI.Datatype.Create_struct(
(1, 1), (0, rdisp), (qtype, rtype))
qtype.Free()
rtype.Free()
return bigtype
class _BigMPI:
"""Support for large message counts."""
blocksize = 1024**3 # 1 GiB
def __init__(self):
self.cache = {}
def __enter__(self):
return self
def __exit__(self, *exc):
cache = self.cache
for dtype in cache.values():
dtype.Free()
cache.clear()
def __call__(self, buf):
buf = _memory(buf)
count = len(buf)
blocksize = self.blocksize
if count < blocksize:
return (buf, count, MPI.BYTE)
cache = self.cache
dtype = cache.get(count)
if dtype is not None:
return (buf, 1, dtype)
dtype = _bigmpi_create_type(MPI.BYTE, count, blocksize)
cache[count] = dtype.Commit()
return (buf, 1, dtype)
_bigmpi = _BigMPI()
def _info_typecode():
return 'q'
def _info_datatype():
code = _info_typecode()
return _typedict[code]
def _info_pack(info):
code = _info_typecode()
size = len(info)
sfmt = "{0}{1}".format(size, code)
return _struct.pack(sfmt, *info)
def _info_alloc(size):
code = _info_typecode()
itemsize = _struct.calcsize(code)
return bytearray(size * itemsize)
def _info_unpack(info):
code = _info_typecode()
itemsize = _struct.calcsize(code)
size = len(info) // itemsize
sfmt = "{0}{1}".format(size, code)
return _struct.unpack(sfmt, info)
def _new_buffer(size):
return MPI.memory.allocate(size)
def _send_raw(comm, send, data, bufs, dest, tag):
# pylint: disable=too-many-arguments
info = [len(data)]
info.extend(len(_memory(sbuf)) for sbuf in bufs)
infotype = _info_datatype()
info = _info_pack(info)
send(comm, (info, infotype), dest, tag)
with _bigmpi as bigmpi:
send(comm, bigmpi(data), dest, tag)
for sbuf in bufs:
send(comm, bigmpi(sbuf), dest, tag)
def _send(comm, send, obj, dest, tag):
if dest == PROC_NULL:
send(comm, (None, 0, MPI.BYTE), dest, tag)
return
data, bufs = pickle.dumps(obj)
with _comm_lock(comm, 'send'):
_send_raw(comm, send, data, bufs, dest, tag)
def _isend(comm, isend, obj, dest, tag):
sreqs = []
def send(comm, buf, dest, tag):
sreqs.append(isend(comm, buf, dest, tag))
_send(comm, send, obj, dest, tag)
request = Request(sreqs)
return request
def _recv_raw(comm, recv, buf, source, tag, status=None):
# pylint: disable=too-many-arguments
if status is None:
status = Status()
MPI.Comm.Probe(comm, source, tag, status)
source = status.Get_source()
tag = status.Get_tag()
infotype = _info_datatype()
infosize = status.Get_elements(infotype)
info = _info_alloc(infosize)
MPI.Comm.Recv(comm, (info, infotype), source, tag, status)
info = _info_unpack(info)
if buf is not None:
buf = _memory.frombuffer(buf)
if len(buf) > info[0]:
buf = buf[:info[0]]
if len(buf) < info[0]:
buf = None
data = _new_buffer(info[0]) if buf is None else buf
bufs = list(map(_new_buffer, info[1:]))
with _bigmpi as bigmpi:
recv(comm, bigmpi(data), source, tag)
for rbuf in bufs:
recv(comm, bigmpi(rbuf), source, tag)
status.Set_elements(MPI.BYTE, sum(info))
return data, bufs
def _recv(comm, recv, buf, source, tag, status):
# pylint: disable=too-many-arguments
if source == PROC_NULL:
recv(comm, (None, 0, MPI.BYTE), source, tag, status)
return None
with _comm_lock(comm, 'recv'):
data, bufs = _recv_raw(comm, recv, buf, source, tag, status)
return pickle.loads(data, bufs)
def _mprobe(comm, mprobe, source, tag, status):
if source == PROC_NULL:
rmsg = MPI.Comm.Mprobe(comm, source, tag, status)
return Message([rmsg])
if status is None:
status = Status()
with _comm_lock(comm, 'recv'):
message = []
numbytes = 0
rmsg = mprobe(comm, source, tag, status)
if rmsg is None:
return None
message.append(rmsg)
source = status.Get_source()
tag = status.Get_tag()
infotype = _info_datatype()
infosize = status.Get_elements(infotype)
for _ in range(infosize):
rmsg = MPI.Comm.Mprobe(comm, source, tag, status)
message.append(rmsg)
numbytes += status.Get_elements(MPI.BYTE)
status.Set_elements(MPI.BYTE, numbytes)
return Message(message)
def _mrecv_info(rmsg, size, status=None):
mrecv = MPI.Message.Recv
infotype = _info_datatype()
info = _info_alloc(size)
mrecv(rmsg, (info, infotype), status)
info = _info_unpack(info)
return info
def _mrecv_none(rmsg, mrecv, status):
_mrecv_info(rmsg, 0, status)
noproc = MPI.MESSAGE_NO_PROC
mrecv(noproc, (None, 0, MPI.BYTE))
data, bufs = pickle.dumps(None)
return (bytearray(data), bufs)
def _mrecv_data(message, mrecv, status=None):
if message[0] == MPI.MESSAGE_NO_PROC:
rmsg = message[0]
return _mrecv_none(rmsg, mrecv, status)
rmsg = iter(message)
icnt = len(message) - 1
info = _mrecv_info(next(rmsg), icnt, status)
data = _new_buffer(info[0])
bufs = list(map(_new_buffer, info[1:]))
with _bigmpi as bigmpi:
mrecv(next(rmsg), bigmpi(data))
for rbuf in bufs:
mrecv(next(rmsg), bigmpi(rbuf))
if status is not None:
status.Set_elements(MPI.BYTE, sum(info))
return (data, bufs)
def _mrecv(message, status):
def mrecv(rmsg, buf):
MPI.Message.Recv(rmsg, buf)
data, bufs = _mrecv_data(message, mrecv, status)
return pickle.loads(data, bufs)
def _imrecv(message):
rreqs = []
def mrecv(rmsg, buf):
rreqs.append(MPI.Message.Irecv(rmsg, buf))
data, bufs = _mrecv_data(message, mrecv)
request = Request(rreqs)
setattr(request, '_data_bufs', (data, bufs))
return request
def _req_load(request):
data_bufs = getattr(request, '_data_bufs', None)
if request == MPI.REQUEST_NULL and data_bufs is not None:
delattr(request, '_data_bufs')
if data_bufs is not None:
data, bufs = data_bufs
obj = pickle.loads(data, bufs)
return obj
return None
def _test(request, test, status):
statuses = None if status is None else [status]
flag = test(request, statuses)
if flag:
obj = _req_load(request)
return (flag, obj)
return (flag, None)
def _testall(requests, testall, statuses):
if isinstance(statuses, list):
for _ in range(len(requests) - len(statuses)):
statuses.append(Status())
reqarray = []
stsarray = None
for req in requests:
reqarray.extend(req)
if statuses is not None:
stsarray = []
for req, sts in zip(requests, statuses):
stsarray.extend([sts] * len(req))
flag = testall(reqarray, stsarray)
if flag:
objs = [_req_load(req) for req in requests]
return (flag, objs)
return (flag, None)
def _bcast_intra_raw(comm, bcast, data, bufs, root):
rank = comm.Get_rank()
if rank == root:
info = [len(data)]
info.extend(len(_memory(sbuf)) for sbuf in bufs)
infotype = _info_datatype()
infosize = _info_pack([len(info)])
bcast(comm, (infosize, infotype), root)
info = _info_pack(info)
bcast(comm, (info, infotype), root)
else:
infotype = _info_datatype()
infosize = _info_alloc(1)
bcast(comm, (infosize, infotype), root)
infosize = _info_unpack(infosize)[0]
info = _info_alloc(infosize)
bcast(comm, (info, infotype), root)
info = _info_unpack(info)
data = _new_buffer(info[0])
bufs = list(map(_new_buffer, info[1:]))
with _bigmpi as bigmpi:
bcast(comm, bigmpi(data), root)
for rbuf in bufs:
bcast(comm, bigmpi(rbuf), root)
return data, bufs
def _bcast_intra(comm, bcast, obj, root):
rank = comm.Get_rank()
if rank == root:
data, bufs = pickle.dumps(obj)
else:
data, bufs = pickle.dumps(None)
with _comm_lock(comm, 'bcast'):
data, bufs = _bcast_intra_raw(comm, bcast, data, bufs, root)
return pickle.loads(data, bufs)
def _bcast_inter(comm, bcast, obj, root):
rank = comm.Get_rank()
size = comm.Get_remote_size()
comm, tag, localcomm, _ = _commctx_inter(comm)
if root == MPI.PROC_NULL:
return None
elif root == MPI.ROOT:
send = MPI.Comm.Send
data, bufs = pickle.dumps(obj)
_send_raw(comm, send, data, bufs, 0, tag)
return None
elif 0 <= root < size:
if rank == 0:
recv = MPI.Comm.Recv
data, bufs = _recv_raw(comm, recv, None, root, tag)
else:
data, bufs = pickle.dumps(None)
with _comm_lock(localcomm, 'bcast'):
data, bufs = _bcast_intra_raw(localcomm, bcast, data, bufs, 0)
return pickle.loads(data, bufs)
comm.Call_errhandler(MPI.ERR_ROOT)
raise MPI.Exception(MPI.ERR_ROOT)
def _bcast(comm, bcast, obj, root):
if comm.Is_inter():
return _bcast_inter(comm, bcast, obj, root)
else:
return _bcast_intra(comm, bcast, obj, root)
class Request(tuple):
"""Request."""
def __new__(cls, request=None):
"""Create and return a new object."""
if request is None:
request = (MPI.REQUEST_NULL,)
if isinstance(request, MPI.Request):
request = (request,)
return super().__new__(cls, request)
def __eq__(self, other):
"""Return ``self==other``."""
if isinstance(other, Request):
return tuple(self) == tuple(other)
if isinstance(other, MPI.Request):
return all(req == other for req in self)
return NotImplemented
def __ne__(self, other):
"""Return ``self!=other``."""
if isinstance(other, Request):
return tuple(self) != tuple(other)
if isinstance(other, MPI.Request):
return any(req != other for req in self)
return NotImplemented
def __bool__(self):
"""Return ``bool(self)``."""
return any(req for req in self)
def Free(self) -> None:
"""Free a communication request."""
# pylint: disable=invalid-name
for req in self:
req.Free()
def cancel(self):
"""Cancel a communication request."""
| |
<filename>tests/test_slippage.py
# Copyright 2018 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pandas as pd
from moonshot import Moonshot
from moonshot.slippage import FixedSlippage
from moonshot.cache import TMP_DIR
class MoonshotSlippgeTestCase(unittest.TestCase):
"""
Test cases related to applying slippage in a backtest.
"""
def tearDown(self):
"""
Remove cached files.
"""
for file in glob.glob("{0}/moonshot*.pkl".format(TMP_DIR)):
os.remove(file)
def test_no_slippage(self):
"""
Tests that the resulting DataFrames are correct when no slippage is
applied.
"""
class BuyBelow10ShortAbove10(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10.
"""
def prices_to_signals(self, prices):
long_signals = prices.loc["Close"] <= 10
short_signals = prices.loc["Close"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = BuyBelow10ShortAbove10().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
-1.0,
-1.0,
1.0],
"FI23456": [1.0,
-1.0,
1.0,
-1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
-0.5,
-0.5,
0.5],
"FI23456": [0.5,
-0.5,
0.5,
-0.5]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
-0.5,
-0.5],
"FI23456": ["nan",
0.5,
-0.5,
0.5]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
1.0,
0.0],
"FI23456": ["nan",
0.5,
1.0,
1.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
0.0242857], # (9.99 - 10.50)/10.50 * -0.5
"FI23456": [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
-0.1176471] # (10.50 - 8.50)/8.50 * -0.5
}
)
def test_apply_slippage(self):
"""
Tests that the resulting DataFrames are correct when a single
slippage class is applied.
"""
class TestSlippage(FixedSlippage):
ONE_WAY_SLIPPAGE = 0.001 # 10 BPS
class BuyBelow10ShortAbove10(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10.
"""
SLIPPAGE_CLASSES = TestSlippage
def prices_to_signals(self, prices):
long_signals = prices.loc["Close"] <= 10
short_signals = prices.loc["Close"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = BuyBelow10ShortAbove10().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
-1.0,
-1.0,
1.0],
"FI23456": [1.0,
-1.0,
1.0,
-1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
-0.5,
-0.5,
0.5],
"FI23456": [0.5,
-0.5,
0.5,
-0.5]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
-0.5,
-0.5],
"FI23456": ["nan",
0.5,
-0.5,
0.5]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
1.0,
0.0],
"FI23456": ["nan",
0.5,
1.0,
1.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0005,
0.001,
0.0],
"FI23456": [0.0,
0.0005,
0.001,
0.001]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
-0.0005,
-0.0237273, # (10.50 - 11)/11 * 0.5 - 0.001
0.0242857], # (9.99 - 10.50)/10.50 * -0.5
"FI23456": [0.0,
-0.0005,
-0.1146364, # (8.50 - 11)/11 * 0.5 - 0.001
-0.1186471] # (10.50 - 8.50)/8.50 * -0.5 - 0.001
}
)
def test_apply_slippage_continuous_intraday(self):
"""
Tests that the resulting DataFrames are correct when a single
slippage class is applied on a continuous intraday strategy.
"""
class TestSlippage(FixedSlippage):
ONE_WAY_SLIPPAGE = 0.001 # 10 BPS
class BuyBelow10ShortAbove10ContIntraday(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10.
"""
SLIPPAGE_CLASSES = TestSlippage
def prices_to_signals(self, prices):
long_signals = prices.loc["Close"] <= 10
short_signals = prices.loc["Close"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02"])
fields = ["Close"]
times = ["10:00:00", "11:00:00", "12:00:00"]
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=["Field", "Date", "Time"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9.6,
10.45,
10.12,
15.45,
8.67,
12.30,
],
"FI23456": [
# Close
10.56,
12.01,
10.50,
9.80,
13.40,
7.50,
],
},
index=idx
)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = BuyBelow10ShortAbove10ContIntraday().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
"FI12345": [1.0,
-1.0,
-1.0,
-1.0,
1.0,
-1.0],
"FI23456": [-1.0,
-1.0,
-1.0,
1.0,
-1.0,
1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
"FI12345": [0.5,
-0.5,
-0.5,
-0.5,
0.5,
-0.5],
"FI23456": [-0.5,
-0.5,
-0.5,
0.5,
-0.5,
0.5]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
"FI12345": ['nan',
0.5,
-0.5,
-0.5,
-0.5,
0.5],
"FI23456": ['nan',
-0.5,
-0.5,
-0.5,
0.5,
-0.5]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
"FI12345": ['nan',
0.5,
1.0,
0.0,
0.0,
1.0],
"FI23456": ['nan',
0.5,
0.0,
0.0,
1.0,
1.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
"FI12345": [0.0,
0.0005,
0.001,
0.0,
0.0,
0.001],
"FI23456": [0.0,
0.0005,
0.0,
0.0,
0.001,
0.001]}
)
returns = results.loc["Return"].reset_index()
| |
import os
import re
import time
import glob
import hashlib
import requests
import cv2 as cv
from .forms import *
from .models import *
import urllib.request
from PIL import Image
from django.shortcuts import render
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.views.decorators.cache import cache_page
from django.http import HttpResponseRedirect, JsonResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
pagesize = 10
def index(request):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
categorys = Category.objects.all().order_by('index')
threads = Thread.objects.all().order_by('-updatetime')
paginator = Paginator(threads, pagesize, )
threads = paginator.get_page(1)
if not authorid:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': 'comprehensive'})
else:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center,
'categoryactive': 'comprehensive'})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def page(request, categorynick, pageindex):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
if categorynick == 'comprehensive':
threads = Thread.objects.all().order_by('-updatetime')
else:
threads = Thread.objects.filter(category=Category.objects.get(nickname=categorynick)).order_by('-updatetime')
paginator = Paginator(threads, pagesize, )
try:
threads = paginator.get_page(pageindex)
except PageNotAnInteger:
threads = paginator.page(1)
except EmptyPage:
threads = paginator.page(paginator.num_pages)
categorys = Category.objects.all().order_by('index')
if not authorid:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick})
else:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def detail(request, categorynick, threadid):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
categorys = Category.objects.all().order_by('index')
nexttopicid = 0
thread = Thread.objects.get(id=threadid)
threads = Thread.objects.filter(category=Category.objects.get(nickname=categorynick)).all()
if threads:
for threadtemp in threads:
if threadtemp.id > threadid:
nexttopicid = threadtemp.id
break
comments = Comment.objects.filter(thread=thread).order_by('createtime')
paginator = Paginator(comments, pagesize, )
comments = paginator.page(1)
if not authorid:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments, 'nexttopicid': nexttopicid})
else:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments, 'nexttopicid': nexttopicid})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def detailnext(request, categorynick, threadid):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
categorys = Category.objects.all().order_by('index')
nexttopicid = 0
thread = Thread.objects.get(id=threadid)
threads = Thread.objects.filter(category=Category.objects.get(nickname=categorynick)).all()
if threads:
for threadtemp in threads:
if threadtemp.id > threadid:
nexttopicid = threadtemp.id
break
comments = Comment.objects.filter(thread=thread).order_by('createtime')
paginator = Paginator(comments, pagesize, )
comments = paginator.page(1)
if not authorid:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments, 'nexttopicid': nexttopicid})
else:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments, 'nexttopicid': nexttopicid})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def detailpage(request, categorynick, threadid, pageindex):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
categorys = Category.objects.all().order_by('index')
thread = Thread.objects.get(id=threadid)
comments = Comment.objects.filter(thread=thread).order_by('createtime')
paginator = Paginator(comments, pagesize, )
try:
comments = paginator.get_page(pageindex)
except PageNotAnInteger:
comments = paginator.page(1)
except EmptyPage:
comments = paginator.page(paginator.num_pages)
if not authorid:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments})
else:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def category(request, categorynick):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
if categorynick == 'comprehensive':
threads = Thread.objects.all().order_by('-updatetime')
else:
threads = Thread.objects.filter(category=Category.objects.get(nickname=categorynick)).order_by('-updatetime')
paginator = Paginator(threads, pagesize, )
threads = paginator.page(1)
categorys = Category.objects.all().order_by('index')
if not authorid:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick})
else:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def login(request):
form = UserForm(request.POST)
if form.is_valid():
data = form.cleaned_data
username = data['username']
password = data['password']
ipaddr = request.META['REMOTE_ADDR']
flag = True
try:
Black.objects.get(ipaddr=ipaddr)
except Black.DoesNotExist:
flag = False
if flag:
return render(request, 'black.html')
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5]+$', username):
request.session['errmsg_sider'] = '不可以包含非法字符!'
return HttpResponseRedirect('/')
try:
userobj = User.objects.get(username=str(username))
except User.DoesNotExist:
request.session['errmsg_sider'] = '用户名或密码错误!'
return HttpResponseRedirect('/')
if userobj.password == password:
request.session['authorid'] = userobj.id
request.session['username'] = userobj.username
else:
request.session['errmsg_sider'] = '用户名或密码错误!'
return HttpResponseRedirect('/')
def register(request):
form = UserForm(request.POST)
if form.is_valid():
data = form.cleaned_data
email = data['email'].strip()
username = data['username'].strip()
password = data['password'].strip()
ipaddr = request.META['REMOTE_ADDR']
flag = True
try:
Black.objects.get(ipaddr=ipaddr)
except Black.DoesNotExist:
flag = False
if flag:
return render(request, 'black.html')
if len(username) <= 4 and len(username) >= 14:
request.session['errmsg_sider'] = '用户名长度只能在4到14个字符之间!'
return HttpResponseRedirect('/')
if len(username) <= 4 and len(username) >= 14:
request.session['errmsg_sider'] = '密码长度只能在4到14个字符之间!'
return HttpResponseRedirect('/')
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5]+$', username):
request.session['errmsg_sider'] = '不可以包含非法字符!'
return HttpResponseRedirect('/')
try:
validate_email(email)
except ValidationError:
request.session['errmsg_sider'] = '邮箱格式错误!'
return HttpResponseRedirect('/')
m = hashlib.md5()
m.update(email.encode("utf-8"))
avator = 'http://www.gravatar.com/avatar/' + m.hexdigest() + '?s=50'
flag = 0
try:
User.objects.get(username=str(username))
except User.DoesNotExist:
flag += 1
try:
User.objects.get(email=email)
except User.DoesNotExist:
flag += 1
if flag == 2:
userobj = User.objects.create(username=str(username), password=str(password), email=email, avator=avator,
ipaddr=ipaddr)
request.session['authorid'] = userobj.id
request.session['username'] = userobj.username
else:
request.session['errmsg_sider'] = '用户名或邮箱已存在!'
return HttpResponseRedirect('/')
request.session['errmsg_sider'] = '填写的数据有误!'
return HttpResponseRedirect('/')
def logout(request):
if not request.session.get('username', None):
request.session['errmsg_sider'] = '未登录!'
return HttpResponseRedirect('/')
request.session.flush()
return HttpResponseRedirect('/')
def search(request):
form = SearchForm(request.POST)
if form.is_valid():
data = form.cleaned_data
keyword = data['keyword']
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5]+$', keyword):
request.session['errmsg_keyword'] = '不可以包含非法字符!'
return HttpResponseRedirect('/')
threads = Thread.objects.filter(title__icontains=keyword)
if len(threads) > 10:
threads = threads[:10]
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
categorys = Category.objects.all().order_by('index')
if not authorid:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'authorid': None, 'username': None,
'categoryactive': 'comprehensive'})
else:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'username': username, 'authorid': authorid,
'categoryactive': 'comprehensive'})
return rend
request.session['errmsg_keyword'] = '输入关键词错误!'
return HttpResponseRedirect('/')
def searchphoto(request):
form = SearchPhotoForm(request.POST, request.FILES)
if form.is_valid():
imgkey = form.cleaned_data['imgkey']
ext = os.path.splitext(imgkey.name)[1]
if ext != '.jpg' and ext != '.png':
return JsonResponse({'res': '图片格式不支持!'})
if imgkey.size > 6291456:
return JsonResponse({'res': '图片大小不能超过6兆!'})
flag = False
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ext = os.path.splitext(imgkey.name)[1]
dir = '/static/chimg/'
filename = str(int(time.time()))
filepath = dir + filename + ext
f = open(basepath + filepath, 'wb')
for line in imgkey.chunks():
f.write(line)
f.close()
if imgkey.size > 1572864:
if ext == '.png':
png2jpg(basepath + filepath)
realpath = dir + filename + '.jpg'
for infile in glob.glob(basepath + realpath):
im = Image.open(infile)
size = im.size
im.thumbnail(size, Image.ANTIALIAS)
im.save(basepath + realpath, 'jpeg')
flag = True
if flag:
path = realpath
else:
path = filepath
filename, ext2 = os.path.splitext(path)
files = None
if ext2 == '.jpg':
files = {'file': (filename + ext2, open(basepath + path, 'rb'), 'image/jpeg', {})}
else:
files = {'file': (filename + ext2, open(basepath + path, 'rb'), 'image/png', {})}
res = requests.post(url='http://saucenao.com/search.php', files=files)
obj = re.search(r'"https://danbooru.donmai.us/(.*?)"', res.text)
if obj:
return JsonResponse({'res': obj.group(0).replace(r'"', '')})
else:
return JsonResponse({'res': '没有发现这张图片呢~'})
return JsonResponse({'res': '上传出现错误!'})
def publish(request):
username = request.session.get('username', None)
if not username:
request.session['errmsg_center'] = '未登录!'
return HttpResponseRedirect('/')
flag = True
try:
userobj = User.objects.get(username=str(username))
Black.objects.get(ipaddr=userobj.ipaddr)
except Black.DoesNotExist:
flag = False
if flag:
return render(request, 'black.html')
category = None
form = ThreadForm(request.POST, request.FILES)
if form.is_valid():
data = form.cleaned_data
body = data['body']
title = data['title']
authorid = data['authorid']
attachment = form.cleaned_data['attachment']
category = request.POST.get('category')
musicurl = data['musicurl']
if len(title) >= 50:
request.session['errmsg_center'] = '标题长度不能大于50个字符!'
return HttpResponseRedirect('/category/' + category)
if len(body) >= 10000:
request.session['errmsg_center'] = '内容长度不能大于10000个字符!'
return HttpResponseRedirect('/category/' + category)
if musicurl:
ext = os.path.splitext(musicurl)[1]
if ext != '.mp3':
request.session['errmsg_center'] = 'MP3链接格式错误!'
return HttpResponseRedirect('/category/' + category)
try:
with urllib.request.urlopen(musicurl) as file:
flag = False
except urllib.request.URLError:
flag = True
if flag:
request.session['errmsg_center'] = 'MP3链接可能失效了!'
return HttpResponseRedirect('/category/' + category)
if attachment:
ext = os.path.splitext(attachment.name)[1]
if ext != '.jpg' and ext != '.png':
request.session['errmsg_center'] = '图片格式不支持!'
return HttpResponseRedirect('/category/' + category)
if attachment.size > 6291456:
request.session['errmsg_center'] = '图片大小不能超过6兆!'
return HttpResponseRedirect('/category/' + category)
if not title:
title = '无标题'
path = None
if attachment:
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ext = os.path.splitext(attachment.name)[1]
dir = '/static/img/'
| |
cutoff=atol,
cache=cache,
equalize_norms=equalize_norms,
**loop_simplify_opts)
elif meth == 'P':
tn.pair_simplify_(output_inds=ix_o, cutoff=atol,
cache=cache,
equalize_norms=equalize_norms,
**loop_simplify_opts)
else:
raise ValueError(f"'{meth}' is not a valid simplify type.")
old_nt, old_ni = nt, ni
nt, ni = tn.num_tensors, tn.num_indices
if equalize_norms:
if equalize_norms is True:
# this also redistributes the collected exponents
tn.equalize_norms_()
else:
tn.equalize_norms_(value=equalize_norms)
if progbar:
pbar.close()
return tn
full_simplify_ = functools.partialmethod(full_simplify, inplace=True)
def hyperinds_resolve(self, mode='dense', sorter=None, inplace=False):
"""Convert this into a regular tensor network, where all indices
appear at most twice, by inserting COPY tensor or tensor networks
for each hyper index.
Parameters
----------
mode : {'dense', 'mps', 'tree'}, optional
What type of COPY tensor(s) to insert.
sorter : None or callable, optional
If given, a function to sort the indices that a single hyperindex
will be turned into. Th function is called like
``tids.sort(key=sorter)``.
inplace : bool, optional
Whether to insert the COPY tensors inplace.
Returns
-------
TensorNetwork
"""
check_opt('mode', mode, ('dense', 'mps', 'tree'))
tn = self if inplace else self.copy()
copy_tensors = []
for ix, tids in tuple(tn.ind_map.items()):
if len(tids) > 2:
d = tn.ind_size(ix)
tids = list(tids)
if sorter is not None:
tids.sort(key=sorter)
# reindex tensors surrounding ind
copy_inds = []
for tid in tids:
new_ix = rand_uuid()
t = tn.tensor_map[tid]
t.reindex_({ix: new_ix})
copy_inds.append(new_ix)
# inject new tensor(s) to connect dangling inds
if mode == 'dense':
copy_tensors.append(
COPY_tensor(d=d, inds=copy_inds, dtype=t.dtype))
elif mode == 'mps':
copy_tensors.extend(
COPY_mps_tensors(d=d, inds=copy_inds, dtype=t.dtype))
elif mode == 'tree':
copy_tensors.extend(
COPY_tree_tensors(d=d, inds=copy_inds, dtype=t.dtype))
tn.add(copy_tensors)
return tn
hyperinds_resolve_ = functools.partialmethod(
hyperinds_resolve, inplace=True)
def max_bond(self):
"""Return the size of the largest bond in this network.
"""
return max(t.max_dim() for t in self)
@property
def shape(self):
"""Actual, i.e. exterior, shape of this TensorNetwork.
"""
return tuple(di[0] for di in self.outer_dims_inds())
@property
def dtype(self):
"""The dtype of this TensorNetwork, this is the minimal common type
of all the tensors data.
"""
return get_common_dtype(*self.arrays)
def iscomplex(self):
return iscomplex(self)
def astype(self, dtype, inplace=False):
"""Convert the type of all tensors in this network to ``dtype``.
"""
TN = self if inplace else self.copy()
for t in TN:
t.astype(dtype, inplace=True)
return TN
astype_ = functools.partialmethod(astype, inplace=True)
def __getstate__(self):
# This allows pickling, by removing all tensor owner weakrefs
d = self.__dict__.copy()
d['tensor_map'] = {
k: t.copy() for k, t in d['tensor_map'].items()
}
return d
def __setstate__(self, state):
# This allows picklings, by restoring the returned TN as owner
self.__dict__ = state.copy()
for tid, t in self.__dict__['tensor_map'].items():
t.add_owner(self, tid=tid)
def __str__(self):
return "{}([{}{}{}])".format(
self.__class__.__name__,
os.linesep,
"".join([" " + repr(t) + "," + os.linesep
for t in self.tensors[:-1]]),
" " + repr(self.tensors[-1]) + "," + os.linesep)
def __repr__(self):
rep = f"<{self.__class__.__name__}("
rep += f"tensors={self.num_tensors}"
rep += f", indices={self.num_indices}"
return rep + ")>"
draw = draw_tn
graph = draw_tn
TNLO_HANDLED_FUNCTIONS = {}
class TNLinearOperator(spla.LinearOperator):
r"""Get a linear operator - something that replicates the matrix-vector
operation - for an arbitrary uncontracted TensorNetwork, e.g::
: --O--O--+ +-- : --+
: | | | : |
: --O--O--O-O-- : acting on --V
: | | : |
: --+ +---- : --+
left_inds^ ^right_inds
This can then be supplied to scipy's sparse linear algebra routines.
The ``left_inds`` / ``right_inds`` convention is that the linear operator
will have shape matching ``(*left_inds, *right_inds)``, so that the
``right_inds`` are those that will be contracted in a normal
matvec / matmat operation::
_matvec = --0--v , _rmatvec = v--0--
Parameters
----------
tns : sequence of Tensors or TensorNetwork
A representation of the hamiltonian
left_inds : sequence of str
The 'left' inds of the effective hamiltonian network.
right_inds : sequence of str
The 'right' inds of the effective hamiltonian network. These should be
ordered the same way as ``left_inds``.
ldims : tuple of int, or None
The dimensions corresponding to left_inds. Will figure out if None.
rdims : tuple of int, or None
The dimensions corresponding to right_inds. Will figure out if None.
optimize : str, optional
The path optimizer to use for the 'matrix-vector' contraction.
backend : str, optional
The array backend to use for the 'matrix-vector' contraction.
is_conj : bool, optional
Whether this object should represent the *adjoint* operator.
See Also
--------
TNLinearOperator1D
"""
def __init__(self, tns, left_inds, right_inds, ldims=None, rdims=None,
optimize=None, backend=None, is_conj=False):
if backend is None:
self.backend = get_tensor_linop_backend()
else:
self.backend = backend
self.optimize = optimize
if isinstance(tns, TensorNetwork):
self._tensors = tns.tensors
if ldims is None or rdims is None:
ix_sz = tns.ind_sizes()
ldims = tuple(ix_sz[i] for i in left_inds)
rdims = tuple(ix_sz[i] for i in right_inds)
else:
self._tensors = tuple(tns)
if ldims is None or rdims is None:
ix_sz = dict(concat((zip(t.inds, t.shape) for t in tns)))
ldims = tuple(ix_sz[i] for i in left_inds)
rdims = tuple(ix_sz[i] for i in right_inds)
self.left_inds, self.right_inds = left_inds, right_inds
self.ldims, ld = ldims, prod(ldims)
self.rdims, rd = rdims, prod(rdims)
self.tags = oset.union(*(t.tags for t in self._tensors))
self._kws = {'get': 'expression'}
# if recent opt_einsum specify constant tensors
if hasattr(oe.backends, 'evaluate_constants'):
self._kws['constants'] = range(len(self._tensors))
self._ins = ()
else:
self._ins = tuple(t.data for t in self._tensors)
# conjugate inputs/ouputs rather all tensors if necessary
self.is_conj = is_conj
self._conj_linop = None
self._adjoint_linop = None
self._transpose_linop = None
self._contractors = dict()
super().__init__(dtype=self._tensors[0].dtype, shape=(ld, rd))
def _matvec(self, vec):
in_data = reshape(vec, self.rdims)
if self.is_conj:
in_data = conj(in_data)
# cache the contractor
if 'matvec' not in self._contractors:
# generate a expression that acts directly on the data
iT = Tensor(in_data, inds=self.right_inds)
self._contractors['matvec'] = tensor_contract(
*self._tensors, iT, output_inds=self.left_inds,
optimize=self.optimize, **self._kws)
fn = self._contractors['matvec']
out_data = fn(*self._ins, in_data, backend=self.backend)
if self.is_conj:
out_data = conj(out_data)
return out_data.ravel()
def _matmat(self, mat):
d = mat.shape[-1]
in_data = reshape(mat, (*self.rdims, d))
if self.is_conj:
in_data = conj(in_data)
# for matmat need different contraction scheme for different d sizes
key = f"matmat_{d}"
# cache the contractor
if key not in self._contractors:
# generate a expression that acts directly on the data
iT = Tensor(in_data, inds=(*self.right_inds, '_mat_ix'))
o_ix = (*self.left_inds, '_mat_ix')
self._contractors[key] = tensor_contract(
*self._tensors, iT, output_inds=o_ix,
optimize=self.optimize, **self._kws)
fn = self._contractors[key]
out_data = fn(*self._ins, in_data, backend=self.backend)
if self.is_conj:
out_data = conj(out_data)
return reshape(out_data, (-1, d))
def trace(self):
if 'trace' not in self._contractors:
tn = TensorNetwork(self._tensors)
self._contractors['trace'] = tn.trace(
self.left_inds, self.right_inds, optimize=self.optimize)
return self._contractors['trace']
def copy(self, conj=False, transpose=False):
if transpose:
inds = self.right_inds, self.left_inds
dims = self.rdims, self.ldims
else:
inds = self.left_inds, self.right_inds
dims = self.ldims, self.rdims
if conj:
is_conj = not self.is_conj
else:
is_conj = self.is_conj
return TNLinearOperator(self._tensors, *inds, *dims, is_conj=is_conj,
optimize=self.optimize, backend=self.backend)
def conj(self):
if self._conj_linop is None:
self._conj_linop = self.copy(conj=True)
return self._conj_linop
def _transpose(self):
if self._transpose_linop is None:
self._transpose_linop = self.copy(transpose=True)
return self._transpose_linop
def _adjoint(self):
"""Hermitian conjugate of this TNLO.
"""
# cache the adjoint
if self._adjoint_linop is None:
self._adjoint_linop = self.copy(conj=True, transpose=True)
return self._adjoint_linop
def to_dense(self, *inds_seq, **contract_opts):
"""Convert this TNLinearOperator into a dense array, defaulting to
grouping the left and right indices respectively.
"""
contract_opts.setdefault('optimize', self.optimize)
if self.is_conj:
ts = (t.conj() for t in self._tensors)
else:
ts = self._tensors
if not inds_seq:
inds_seq = self.left_inds, self.right_inds
return tensor_contract(*ts, **contract_opts).to_dense(*inds_seq)
@functools.wraps(tensor_split)
def split(self, **split_opts):
return tensor_split(self, left_inds=self.left_inds,
right_inds=self.right_inds, **split_opts)
@property
def A(self):
return self.to_dense()
def astype(self, dtype):
"""Convert this ``TNLinearOperator`` to type ``dtype``.
"""
return TNLinearOperator(
(t.astype(dtype) for t in self._tensors),
left_inds=self.left_inds, right_inds=self.right_inds,
ldims=self.ldims, rdims=self.rdims,
optimize=self.optimize, backend=self.backend,
)
def __array_function__(self, func, types, args, kwargs):
if (
(func not in TNLO_HANDLED_FUNCTIONS) or
(not all(issubclass(t, self.__class__) for t in types))
):
return NotImplemented
return TNLO_HANDLED_FUNCTIONS[func](*args, **kwargs)
def tnlo_implements(np_function):
"""Register an __array_function__ implementation for TNLinearOperator
objects.
"""
def decorator(func):
TNLO_HANDLED_FUNCTIONS[np_function] = func
return func
return decorator
@tnlo_implements(np.trace)
def _tnlo_trace(x):
return x.trace()
class PTensor(Tensor):
"""A tensor whose data array is lazily generated from a set of parameters
and a function.
Parameters
----------
fn : callable
The function that generates the tensor data from ``params``.
params : sequence of numbers
The initial parameters supplied to the generating function like
``fn(params)``.
inds : optional
Should match the shape of ``fn(params)``,
see :class:`~quimb.tensor.tensor_core.Tensor`.
tags : optional
See :class:`~quimb.tensor.tensor_core.Tensor`.
left_inds : optional
See :class:`~quimb.tensor.tensor_core.Tensor`.
See Also
--------
PTensor
"""
def __init__(self, fn, | |
import numpy as np
'''global constants'''
# Maximum supported configurations
gtrack_NUM_POINTS_MAX = 1000
gtrack_NUM_TRACKS_MAX = 250
# Target ID definitions
gtrack_ID_POINT_TOO_WEAK = 253
gtrack_ID_POINT_BEHIND_THE_WALL = 254
gtrack_ID_POINT_NOT_ASSOCIATED = 255
# Boundary boxes
gtrack_MAX_BOUNDARY_BOXES = 2
gtrack_MAX_STATIC_BOXES = 2
MAXNUMBERMEASUREMENTS = 800
MAXNUMBERTRACKERS = 20
zero3x3 = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=np.float32)
pinit6x6 = np.array([0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0.5, 0., 0., 0.,
0., 0., 0., 0.5, 0., 0.,
0., 0., 0., 0., 1., 0.,
0., 0., 0., 0., 0., 1.], dtype=np.float32)
VERBOSE_ERROR_INFO = 0x00000001 # /*!< Report Errors */
VERBOSE_WARNING_INFO = 0x00000002 # /*!< Report Warnings */
VERBOSE_DEBUG_INFO = 0x00000004 # /*!< Report Debuging information */
VERBOSE_MATRIX_INFO = 0x00000008 # /*!< Report Matrix math computations */
VERBOSE_UNROLL_INFO = 0x00000010 # /*!< Report velocity unrolling data */
VERBOSE_STATE_INFO = 0x00000020 # /*!< Report state transitions */
VERBOSE_ASSOSIATION_INFO = 0x00000040 # /*!< Report association data */
VERBOSE_GATEXY_INFO = 0x00000080 # /*!< Report gating in XY space */
VERBOSE_GATERA_INFO = 0x00000100 # /*!< Report gating in range/angle space */
VERBOSE_GATEG1_INFO = 0x00000200 # /*!< Report unitary gating */
'''below is the gtrack alg configuration params'''
# GTRACK Box Structure
class gtrack_boundaryBox():
def __init__(self, left, right, bottom, top):
self.left = left
self.right = right
self.bottom = bottom
self.top = top
# GTRACK Scene Parameters
class gtrack_sceneryParams():
def __init__(self, numBoundaryBoxes=0, numStaticBoxes=0, bound_box=[(0, 0, 0, 0), (0, 0, 0, 0)],
static_box=[(0, 0, 0, 0), (0, 0, 0, 0)]):
self.numBoundaryBoxes = numBoundaryBoxes
self.boundaryBox = [gtrack_boundaryBox(*bound) for bound, _ in zip(bound_box, range(gtrack_MAX_BOUNDARY_BOXES))]
self.numStaticBoxes = numStaticBoxes
self.staticBox = [gtrack_boundaryBox(*bound) for bound, _ in zip(static_box, range(gtrack_MAX_STATIC_BOXES))]
# GTRACK Gate Limits
class gtrack_gateLimits():
def __init__(self, length, width, vel):
self.length = length
self.width = width
self.vel = vel
# GTRACK Gating Function Parameters
class gtrack_gatingParams():
def __init__(self, volume=2, params=[(3, 2, 0)]):
self.volume = volume
self.limits = [gtrack_gateLimits(i, j, k) for (i, j, k) in params]
# GTRACK Tracking Management Function Parameters
class gtrack_stateParams():
def __init__(self, det2actThre=3, det2freeThre=3, active2freeThre=5, static2freeThre=5, exit2freeThre=5):
self.det2actThre = det2actThre
self.det2freeThre = det2freeThre
self.active2freeThre = active2freeThre
self.static2freeThre = static2freeThre
self.exit2freeThre = exit2freeThre
# GTRACK Update Function Parameters
class gtrack_varParams():
def __init__(self, lengthStd=np.float32(1 / 3.46), widthStd=np.float32(1 / 3.46), dopplerStd=2.):
self.lengthStd = lengthStd
self.widthStd = widthStd
self.dopplerStd = dopplerStd
# GTRACK Allocation Function Parameters
class gtrack_allocationParams():
def __init__(self, snrThre=100., velocityThre=0.5, pointsThre=5, maxDistanceThre=1., maxVelThre=2.):
self.snrThre = snrThre
self.velocityThre = velocityThre
self.pointsThre = pointsThre
self.maxDistanceThre = maxDistanceThre
self.maxVelThre = maxVelThre
# GTRACK Unrolling Parameters
class gtrack_unrollingParams():
def __init__(self, alpha=0.5, confidence=0.1):
self.alpha = alpha
self.confidence = confidence
# GTRACK State Vector
class gtrack_STATE_VECTOR_TYPE():
def __init__(self):
self.gtrack_STATE_VECTORS_2D = 0
self.gtrack_STATE_VECTORS_2DA = 1
self.gtrack_STATE_VECTORS_3D = 2
self.gtrack_STATE_VECTORS_3DA = 3
# GTRACK Verbose Level
class gtrack_VERBOSE_TYPE():
def __init__(self):
self.gtrack_VERBOSE_NONE = 0
self.gtrack_VERBOSE_ERROR = 1
self.gtrack_VERBOSE_WARNING = 2
self.gtrack_VERBOSE_DEBUG = 3
self.gtrack_VERBOSE_MATRIX = 4
self.gtrack_VERBOSE_MAXIMUM = 5
# GTRACK Advanced Parameters
class gtrack_advancedParameters():
def __init__(self):
self.gatingParams = gtrack_gatingParams()
self.allocationParams = gtrack_allocationParams()
self.unrollingParams = gtrack_unrollingParams()
self.stateParams = gtrack_stateParams()
self.variationParams = gtrack_varParams()
self.sceneryParams = gtrack_sceneryParams()
# GTRACK Configuration
class gtrack_moduleConfig():
def __init__(self):
self.stateVectorType = gtrack_STATE_VECTOR_TYPE().gtrack_STATE_VECTORS_2DA
self.verbose = gtrack_VERBOSE_TYPE().gtrack_VERBOSE_NONE
self.maxNumPoints = MAXNUMBERMEASUREMENTS
self.maxNumTracks = MAXNUMBERTRACKERS
self.initialRadialVelocity = 0
self.maxRadialVelocity = 20
self.radialVelocityResolution = 0
self.maxAcceleration = 12
self.deltaT = 0.4
self.advParams = gtrack_advancedParameters()
# GTRACK Measurement point
class gtrack_measurementPoint():
def __init__(self):
self.range = 0.
self.angle = 0.
self.doppler = 0.
self.snr = 0.
# GTRACK Measurement variances
class gtrack_measurementVariance():
def __init__(self):
self.rangeVar = 0
self.angleVar = 0
self.dopplerVar = 0
# GTRACK target descriptor
class gtrack_targetDesc():
def __init__(self):
self.uid = 0
self.tid = 0
self.S = np.zeros(shape=(6,), dtype=np.float32)
self.EC = np.zeros(shape=(9,), dtype=np.float32)
self.G = 0
# /**
# * @b Description
# * @n
# * This function is used to force matrix symmetry by averaging off-diagonal elements
# * Matrices are squared, real, single precision floating point.
# * Matrices are in row-major order
# *
# * @param[in] m (m=rows=cols)
# * Number of rows and cols
# * @param[in] A
# * Matrix A
# * @param[out] B
# * Matrix B
# *
# * \ingroup gtrack_ALG_MATH_FUNCTION
# *
# * @retval
# * None
# */
'''
def gtrack_matrixMakeSymmetrical(m, A, B):
A = A.reshape(m, m)
B = np.squeeze((1/2 * np.add(A, A.T)).reshape(1, -1))
A = np.squeeze(A.reshape(1, -1))
'''
def gtrack_matrixMakeSymmetrical(m, A):
A = A.reshape(m, m)
B = np.squeeze((1 / 2 * np.add(A, A.T)).reshape(1, -1))
return B
# i = j = 0
# B = np.zeros_like(A, dtype = np.float32)
# i = j = 0
# for i in range(0, m - 1):
# B[i*m + i] = A[i*m + i]
# for j in range(i+1, m):
# B[i*m+j] = B[j*m+i] = 0.5 * (A[i*m+j] + A[j*m+i])
# B[(i+1)*m+(i+1)] = A[(i+1)*m+(i+1)]
# return B
# /**
# * @b Description
# * @n
# * This function is used to multiply two matrices.
# * Matrices are all real, single precision floating point.
# * Matrices are in row-major order
# *
# * @param[in] rows
# * Outer dimension, number of rows
# * @param[in] m
# * Inner dimension
# * @param[in] cols
# * Outer dimension, number of cols
# * @param[in] A
# * Matrix A
# * @param[in] B
# * Matrix B
# * @param[out] C
# * Matrix C(rows,cols) = A(rows,m) X B(m,cols)
# *
# * \ingroup gtrack_ALG_MATH_FUNCTION
# *
# * @retval
# * None
# */
'''
def gtrack_matrixMultiply(rows, m, cols, A, B, C):
A = A.reshape(rows, m)
B = B.reshape(m, cols)
C = np.squeeze(np.dot(A, B).reshape(1, -1))
A = np.squeeze(A.reshape(1, -1))
B = np.squeeze(B.reshape(1, -1))
'''
def gtrack_matrixMultiply(rows, m, cols, A, B):
A = A.reshape(rows, m)
B = B.reshape(m, cols)
C = np.squeeze(np.dot(A, B).reshape(1, -1))
# A = np.squeeze(A.reshape(1, -1))
# B = np.squeeze(B.reshape(1, -1))
return np.float32(C)
# /**
# * @b Description
# * @n
# * This function is used to multiply two matrices. Second Matrix is getting transposed first
# * Matrices are all real, single precision floating point.
# * Matrices are in row-major order
# *
# * @param[in] rows
# * Outer dimension, number of rows
# * @param[in] m
# * Inner dimension
# * @param[in] cols
# * Outer dimension, number of cols
# * @param[in] A
# * Matrix A
# * @param[in] B
# * Matrix B
# * @param[out] C
# * Matrix C(rows,cols) = A(rows,m) X B(cols,m)T
# *
# * \ingroup gtrack_ALG_MATH_FUNCTION
# *
# * @retval
# * None
# */
'''
def gtrack_matrixTransposeMultiply(rows, m, cols, A, B, C):
A = A.reshape(rows, m)
B = B.reshape(cols, m)
C = np.squeeze(np.dot(A, B.T).reshape(1, -1))
A = np.squeeze(A.reshape(1, -1))
B = np.squeeze(B.reshape(1, -1))
'''
def gtrack_matrixTransposeMultiply(rows, m, cols, A, B):
A = A.reshape(rows, m)
B = B.reshape(cols, m)
C = np.squeeze(np.dot(A, B.T).reshape(1, -1))
# A = np.squeeze(A.reshape(1, -1))
# B = np.squeeze(B.reshape(1, -1))
return np.float32(C)
# /**
# * @b Description
# * @n
# * This function is used to multiply two matrices.
# * First matrix P is of size 6x6, the second one is of the size 3x6.
# * The second matrix is being transposed first.
# * Matrices are all real, single precision floating point.
# * Matrices are in row-major order
# *
# * @param[in] P
# * Matrix P
# * @param[in] J
# * Matrix J
# * @param[out] PJ
# * Matrix PJ = P(6,6) X J(3,6)T
# *
# * \ingroup gtrack_ALG_MATH_FUNCTION
# *
# * @retval
# * None
# */
'''
def gtrack_matrixComputePJT(P, J, PJ):
P = P.reshape(6, 6)
J = J.reshape(3, 6)
PJ = np.squeeze(np.dot(P, J.T).reshape(1, -1))
P = np.squeeze(P.reshape(1, -1))
J = np.squeeze(J.reshape(1, -1))
'''
def gtrack_matrixComputePJT(P, J):
P = P.reshape(6, 6)
J = J.reshape(3, 6)
PJ = np.squeeze(np.dot(P, J.T).reshape(1, -1))
# P = np.squeeze(P.reshape(1, -1))
# J = np.squeeze(J.reshape(1, -1))
return np.float32(PJ)
# /**
# * @b Description
# * @n
# * This function is used to multiply matrix by a scaller.
# * Matrices are all real, single precision floating point.
# * Matrices are in row-major order
# *
# * @param[in] rows
# * Number of rows
# * @param[in] cols
# * Number of cols
# * @param[in] A
# * Matrix A
# * @param[in] C
# * Scaller C
# * @param[out] B
# * Matrix B(rows,cols) = A(rows,cols) X C
# *
# * \ingroup gtrack_ALG_MATH_FUNCTION
# *
# * @retval
# * None
# */
'''
def gtrack_matrixScalerMultiply(rows, cols, A, C, B):
A = A.reshape(rows, cols)
B = np.squeeze(np.dot(C, A).reshape(1, -1))
A = np.squeeze(A.reshape(1, -1))
'''
def gtrack_matrixScalerMultiply(rows, cols, A, C):
A = A.reshape(rows, cols)
B = np.squeeze(np.dot(C, A).reshape(1, -1))
# A = np.squeeze(A.reshape(1, -1))
return np.float32(B)
# /**
# * @b Description
# * @n
# * This function is used to add two matrices.
# * Matrices are all real, single precision floating point.
# * Matrices are in row-major order
# *
# * @param[in] rows
# * Number of rows
# | |
<filename>tools/json_schema_compiler/cc_generator.py<gh_stars>1-10
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import any_helper
import cpp_util
import model
import schema_util
import sys
import util_cc_helper
class CCGenerator(object):
"""A .cc generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator):
self._cpp_type_generator = cpp_type_generator
self._namespace = namespace
self._target_namespace = (
self._cpp_type_generator.GetCppNamespaceName(self._namespace))
self._util_cc_helper = (
util_cc_helper.UtilCCHelper(self._cpp_type_generator))
self._any_helper = any_helper.AnyHelper()
def Generate(self):
"""Generates a Code object with the .cc for a single namespace.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
.Append(self._util_cc_helper.GetIncludePath())
.Append('#include "%s/%s.h"' %
(self._namespace.source_file_dir, self._namespace.unix_name))
)
includes = self._cpp_type_generator.GenerateIncludes()
if not includes.IsEmpty():
(c.Concat(includes)
.Append()
)
(c.Append()
.Append('using base::Value;')
.Append('using base::DictionaryValue;')
.Append('using base::ListValue;')
.Append('using base::BinaryValue;')
.Append('using %s;' % any_helper.ANY_CLASS)
.Append()
.Concat(self._cpp_type_generator.GetRootNamespaceStart())
.Concat(self._cpp_type_generator.GetNamespaceStart())
.Append()
)
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for property in self._namespace.properties.values():
property_code = self._cpp_type_generator.GeneratePropertyValues(
property,
'const %(type)s %(name)s = %(value)s;',
nodoc=True)
if property_code:
c.Concat(property_code).Append()
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
)
for type_ in self._namespace.types.values():
(c.Concat(self._GenerateType(
schema_util.StripSchemaNamespace(type_.name), type_)).Append()
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
(c.Concat(self._GenerateFunction(
cpp_util.Classname(function.name), function))
.Append()
)
(c.Concat(self._cpp_type_generator.GetNamespaceEnd())
.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
.Append()
)
# TODO(calamity): Events
return c
def _GenerateType(self, cpp_namespace, type_):
"""Generates the function definitions for a type.
"""
classname = cpp_util.Classname(schema_util.StripSchemaNamespace(type_.name))
c = Code()
if type_.functions:
# Types with functions are not instantiable in C++ because they are
# handled in pure Javascript and hence have no properties or
# additionalProperties.
if type_.properties:
raise NotImplementedError('\n'.join(model.GetModelHierarchy(type_)) +
'\nCannot generate both functions and properties on a type')
for function in type_.functions.values():
(c.Concat(
self._GenerateFunction(
cpp_namespace + '::' + cpp_util.Classname(function.name),
function))
.Append()
)
elif type_.type_ == PropertyType.OBJECT:
(c.Concat(self._GeneratePropertyFunctions(
cpp_namespace, type_.properties.values()))
.Sblock('%(namespace)s::%(classname)s()')
.Concat(self._GenerateInitializersAndBody(type_))
.Eblock('%(namespace)s::~%(classname)s() {}')
.Append()
)
if type_.from_json:
(c.Concat(self._GenerateTypePopulate(cpp_namespace, type_))
.Append()
)
if type_.from_client:
(c.Concat(self._GenerateTypeToValue(cpp_namespace, type_))
.Append()
)
c.Substitute({'classname': classname, 'namespace': cpp_namespace})
return c
def _GenerateInitializersAndBody(self, type_):
items = []
for prop in type_.properties.values():
if prop.optional:
continue
t = prop.type_
if t == PropertyType.INTEGER:
items.append('%s(0)' % prop.unix_name)
elif t == PropertyType.DOUBLE:
items.append('%s(0.0)' % prop.unix_name)
elif t == PropertyType.BOOLEAN:
items.append('%s(false)' % prop.unix_name)
elif t == PropertyType.BINARY:
items.append('%s(NULL)' % prop.unix_name)
elif (t == PropertyType.ADDITIONAL_PROPERTIES or
t == PropertyType.ANY or
t == PropertyType.ARRAY or
t == PropertyType.CHOICES or
t == PropertyType.ENUM or
t == PropertyType.OBJECT or
t == PropertyType.REF or
t == PropertyType.STRING):
# TODO(miket): It would be nice to initialize CHOICES and ENUM, but we
# don't presently have the semantics to indicate which one of a set
# should be the default.
continue
else:
sys.exit("Unhandled PropertyType: %s" % t)
if items:
s = ': %s' % (', '.join(items))
else:
s = ''
s = s + ' {}'
return Code().Append(s)
def _GenerateTypePopulate(self, cpp_namespace, type_):
"""Generates the function for populating a type given a pointer to it.
E.g for type "Foo", generates Foo::Populate()
"""
classname = cpp_util.Classname(schema_util.StripSchemaNamespace(type_.name))
c = Code()
(c.Append('// static')
.Sblock('bool %(namespace)s::Populate'
'(const Value& value, %(name)s* out) {')
.Append('if (!value.IsType(Value::TYPE_DICTIONARY))')
.Append(' return false;')
)
if type_.properties:
(c.Append('const DictionaryValue* dict = '
'static_cast<const DictionaryValue*>(&value);')
.Append()
)
for prop in type_.properties.values():
c.Concat(self._InitializePropertyToDefault(prop, 'out'))
for prop in type_.properties.values():
if prop.type_ == PropertyType.ADDITIONAL_PROPERTIES:
c.Append('out->additional_properties.MergeDictionary(dict);')
# remove all keys that are actual properties
for cur_prop in type_.properties.values():
if prop != cur_prop:
c.Append('out->additional_properties'
'.RemoveWithoutPathExpansion("%s", NULL);' % cur_prop.name)
c.Append()
else:
c.Concat(self._GenerateTypePopulateProperty(prop, 'dict', 'out'))
(c.Append('return true;')
.Eblock('}')
)
c.Substitute({'namespace': cpp_namespace, 'name': classname})
return c
def _GenerateTypePopulateProperty(self, prop, src, dst):
"""Generate the code to populate a single property in a type.
src: DictionaryValue*
dst: Type*
"""
c = Code()
value_var = prop.unix_name + '_value'
c.Append('Value* %(value_var)s = NULL;')
if prop.optional:
(c.Sblock(
'if (%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {'
)
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
.Eblock('}')
)
else:
(c.Append(
'if (!%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s))')
.Append(' return false;')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
)
c.Append()
c.Substitute({'value_var': value_var, 'key': prop.name, 'src': src})
return c
def _GenerateTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes the type into a |DictionaryValue|.
E.g. for type "Foo" generates Foo::ToValue()
"""
c = Code()
(c.Sblock('scoped_ptr<DictionaryValue> %s::ToValue() const {' %
cpp_namespace)
.Append('scoped_ptr<DictionaryValue> value(new DictionaryValue());')
.Append()
)
for prop in type_.properties.values():
if prop.type_ == PropertyType.ADDITIONAL_PROPERTIES:
c.Append('value->MergeDictionary(&%s);' % prop.unix_name)
else:
if prop.optional:
if prop.type_ == PropertyType.ENUM:
c.Sblock('if (%s != %s)' %
(prop.unix_name,
self._cpp_type_generator.GetEnumNoneValue(prop)))
else:
c.Sblock('if (%s.get())' % prop.unix_name)
c.Append('value->SetWithoutPathExpansion("%s", %s);' % (
prop.name,
self._CreateValueFromProperty(prop, 'this->' + prop.unix_name)))
if prop.optional:
c.Eblock();
(c.Append()
.Append('return value.Pass();')
.Eblock('}')
)
return c
def _GenerateFunction(self, cpp_namespace, function):
"""Generates the definitions for function structs.
"""
c = Code()
# Params::Populate function
if function.params:
c.Concat(self._GeneratePropertyFunctions(cpp_namespace + '::Params',
function.params))
(c.Append('%(cpp_namespace)s::Params::Params() {}')
.Append('%(cpp_namespace)s::Params::~Params() {}')
.Append()
.Concat(self._GenerateFunctionParamsCreate(cpp_namespace, function))
.Append()
)
# Result::Create function
if function.callback:
c.Concat(self._GenerateFunctionResultCreate(cpp_namespace, function))
c.Substitute({'cpp_namespace': cpp_namespace})
return c
def _GenerateCreateEnumValue(self, cpp_namespace, prop):
"""Generates CreateEnumValue() that returns the |StringValue|
representation of an enum.
"""
c = Code()
c.Append('// static')
c.Sblock('scoped_ptr<Value> %(cpp_namespace)s::CreateEnumValue(%(arg)s) {')
c.Sblock('switch (%s) {' % prop.unix_name)
if prop.optional:
(c.Append('case %s: {' % self._cpp_type_generator.GetEnumNoneValue(prop))
.Append(' return scoped_ptr<Value>();')
.Append('}')
)
for enum_value in prop.enum_values:
(c.Append('case %s: {' %
self._cpp_type_generator.GetEnumValue(prop, enum_value))
.Append(' return scoped_ptr<Value>(Value::CreateStringValue("%s"));' %
enum_value)
.Append('}')
)
(c.Append('default: {')
.Append(' return scoped_ptr<Value>();')
.Append('}')
)
c.Eblock('}')
c.Eblock('}')
c.Substitute({
'cpp_namespace': cpp_namespace,
'arg': cpp_util.GetParameterDeclaration(
prop, self._cpp_type_generator.GetType(prop))
})
return c
def _CreateValueFromProperty(self, prop, var):
"""Creates a Value given a property. Generated code passes ownership
to caller.
var: variable or variable*
E.g for std::string, generate Value::CreateStringValue(var)
"""
if prop.type_ == PropertyType.CHOICES:
# CHOICES conversion not implemented. If needed, write something to
# generate a function that returns a scoped_ptr<Value> and put it in
# _GeneratePropertyFunctions, then use it here. Look at CreateEnumValue()
# for reference.
raise NotImplementedError(
'Conversion of CHOICES to Value not implemented')
if self._IsObjectOrObjectRef(prop):
if prop.optional:
return '%s->ToValue().release()' % var
else:
return '%s.ToValue().release()' % var
elif prop.type_ == PropertyType.ANY:
return '%s.DeepCopy()' % self._any_helper.GetValue(prop, var)
elif prop.type_ == PropertyType.ADDITIONAL_PROPERTIES:
return '%s.DeepCopy()' % var
elif prop.type_ == PropertyType.ENUM:
return 'CreateEnumValue(%s).release()' % var
elif self._IsArrayOrArrayRef(prop):
return '%s.release()' % self._util_cc_helper.CreateValueFromArray(
self._cpp_type_generator.GetReferencedProperty(prop), var,
prop.optional)
elif self._IsFundamentalOrFundamentalRef(prop):
if prop.optional:
var = '*' + var
prop = self._cpp_type_generator.GetReferencedProperty(prop);
return {
PropertyType.STRING: 'Value::CreateStringValue(%s)',
PropertyType.BOOLEAN: 'Value::CreateBooleanValue(%s)',
PropertyType.INTEGER: 'Value::CreateIntegerValue(%s)',
PropertyType.DOUBLE: 'Value::CreateDoubleValue(%s)',
}[prop.type_] % var
else:
raise NotImplementedError('Conversion of %s to Value not '
'implemented' % repr(prop.type_))
def _GenerateParamsCheck(self, function, var):
"""Generates a check for the correct number of arguments when creating
Params.
"""
c = Code()
num_required = 0
for param in function.params:
if not param.optional:
num_required += 1
if num_required == len(function.params):
c.Append('if (%(var)s.GetSize() != %(total)d)')
elif not num_required:
c.Append('if (%(var)s.GetSize() > %(total)d)')
else:
c.Append('if (%(var)s.GetSize() < %(required)d'
' || %(var)s.GetSize() > %(total)d)')
c.Append(' return scoped_ptr<Params>();')
c.Substitute({
'var': var,
'required': num_required,
'total': len(function.params),
})
return c
def _GenerateFunctionParamsCreate(self, cpp_namespace, function):
"""Generate function to create an instance of Params. The generated
function takes a ListValue of arguments.
E.g for function "Bar", generate Bar::Params::Create()
"""
c = Code()
(c.Append('// static')
.Sblock('scoped_ptr<%(cpp_namespace)s::Params> '
'%(cpp_namespace)s::Params::Create(const ListValue& args) {')
.Concat(self._GenerateParamsCheck(function, 'args'))
.Append('scoped_ptr<Params> params(new Params());')
)
c.Substitute({'cpp_namespace': cpp_namespace})
for param in function.params:
c.Concat(self._InitializePropertyToDefault(param, 'params'))
for i, param in enumerate(function.params):
# Any failure will cause this function to return. If any argument is
# incorrect or missing, those following it are not processed. Note that
# for optional arguments, we allow missing arguments and proceed because
# there may be other arguments following it.
failure_value = 'scoped_ptr<Params>()'
c.Append()
value_var = param.unix_name + '_value'
(c.Append('Value* %(value_var)s = NULL;')
.Append('if (args.Get(%(i)s, &%(value_var)s) && '
'!%(value_var)s->IsType(Value::TYPE_NULL))')
.Sblock('{')
.Concat(self._GeneratePopulatePropertyFromValue(
param, value_var, 'params', failure_value))
.Eblock('}')
)
if not param.optional:
(c.Sblock('else {')
.Append('return %s;' % failure_value)
.Eblock('}')
)
c.Substitute({'value_var': value_var, 'i': i})
(c.Append()
.Append('return params.Pass();')
.Eblock('}')
.Append()
)
return c
def _GeneratePopulatePropertyFromValue(
self, prop, value_var, dst, failure_value, check_type=True):
"""Generates code to populate a model.Property given a Value*. The
existence of data inside the Value* is assumed so checks for existence
should be performed before the code this generates.
prop: the property the code is populating.
value_var: a Value* that should represent |prop|.
dst: the object with |prop| as a member.
failure_value: the value to return if |prop| cannot be extracted from
|value_var|
check_type: if true, will check if |value_var| is the correct Value::Type
"""
c = Code()
| |
import logging
import os
import sys
import traceback
import threading
import asyncio
import time
import traceback
import numpy as np
import torch
import ray
from collections import namedtuple, defaultdict
from concurrent.futures import ThreadPoolExecutor
from readerwriterlock import rwlock
from torch._C import dtype
from malib import settings
from malib.utils.errors import OversampleError
from malib.utils.general import BufferDict, iter_many_dicts_recursively
from malib.utils.logger import Log, Logger
from malib.utils.typing import (
BufferDescription,
PolicyID,
AgentID,
Dict,
List,
Any,
Union,
Tuple,
Status,
)
import threading
import pickle as pkl
def _gen_table_name(env_id, main_id, pid):
res = f"{env_id}"
if main_id:
if isinstance(main_id, List):
main_id = "_".join(sorted(main_id))
res += f"_{main_id}"
if pid:
if isinstance(pid, List):
pid = "_".join(sorted(pid))
res += f"_{pid}"
return res
DATASET_TABLE_NAME_GEN = _gen_table_name
Batch = namedtuple("Batch", "identity, data")
class Empty(Exception):
pass
class Full(Exception):
pass
def _start_loop(loop: asyncio.BaseEventLoop):
asyncio.set_event_loop(loop)
if not loop.is_running():
loop.run_forever()
def get_or_create_eventloop():
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
class _QueueActor:
def __init__(self, maxsize, event_loop):
self.maxsize = maxsize
self.queue = asyncio.Queue(self.maxsize, loop=event_loop)
def qsize(self):
return self.queue.qsize()
def empty(self):
return self.queue.empty()
def full(self):
return self.queue.full()
async def put(self, item, timeout=None):
try:
await asyncio.wait_for(self.queue.put(item), timeout)
except asyncio.TimeoutError:
raise Full
async def get(self, timeout=None):
try:
return await asyncio.wait_for(self.queue.get(), timeout)
except asyncio.TimeoutError:
raise Empty
def put_nowait(self, item):
self.queue.put_nowait(item)
def put_nowait_batch(self, items):
# If maxsize is 0, queue is unbounded, so no need to check size.
if self.maxsize > 0 and len(items) + self.qsize() > self.maxsize:
raise Full(
f"Cannot add {len(items)} items to queue of size "
f"{self.qsize()} and maxsize {self.maxsize}."
)
for item in items:
self.queue.put_nowait(item)
def get_nowait(self):
return self.queue.get_nowait()
def get_nowait_batch(self, num_items):
if num_items > self.qsize():
raise Empty(
f"Cannot get {num_items} items from queue of size " f"{self.qsize()}."
)
return [self.queue.get_nowait() for _ in range(num_items)]
def shutdown(self):
self.queue = None
class Table:
def __init__(
self,
capacity: int,
fragment_length: int,
data_shapes: Dict[AgentID, Dict[str, Tuple]] = None,
data_dtypes: Dict[AgentID, Dict[str, Tuple]] = None,
sample_start_size: int = 0,
event_loop: asyncio.BaseEventLoop = None,
name: str = "",
mode: str = "queue",
):
"""One table for one episode."""
self._name = name
self._threading_lock = threading.Lock()
self._rwlock = rwlock.RWLockFairD()
self._consumer_queue = None
self._producer_queue = None
self._is_fixed = False
self._sample_start_size = sample_start_size
self._size = 0
self._flag = 0
self._capacity = capacity
self._fragment_length = fragment_length
self._data_shapes = data_shapes
self._mode = mode
if mode == "queue":
self._consumer_queue = _QueueActor(maxsize=capacity, event_loop=event_loop)
self._producer_queue = _QueueActor(maxsize=capacity, event_loop=event_loop)
# ready index
self._producer_queue.put_nowait_batch([i for i in range(capacity)])
else:
self._consumer_queue = None
self._producer_queue = None
# build episode
if data_shapes is not None:
self._buffer = BufferDict()
for agent, _dshapes in data_shapes.items():
# if agent not in keys:
# continue
t = BufferDict()
for dk, dshape in _dshapes.items():
# XXX(ming): use fragment length for RNN?
# XXX(ziyu): For the case that need a total episode with each timestep in order,
# we add fragment_length + 1 to the shape,
# '+1' is because truncated mode to get the bootstrap value.
t[dk] = np.zeros(
(capacity,)
+ ((fragment_length + 1,) if self._fragment_length > 0 else ())
+ dshape,
dtype=data_dtypes[agent][dk],
)
self._buffer[agent] = t
else:
self._buffer = None
@property
def is_fixed(self):
return self._is_fixed
@property
def is_multi_agent(self) -> bool:
# always true
return True # len(self.buffer)
@property
def buffer(self) -> BufferDict:
return self._buffer
@property
def size(self):
return self._size
@property
def flag(self):
return self._flag
@property
def capacity(self):
return self._capacity
@property
def name(self):
return self._name
def build_buffer_from_samples(self, sample: Dict):
self._buffer = BufferDict()
for agent, _buff in sample.items():
t = BufferDict()
for dk, v in _buff.items():
t[dk] = np.zeros((self.capacity,) + v.shape[1:], dtype=v.dtype)
self._buffer[agent] = t
def sample_activated(self) -> bool:
return self._consumer_queue.size() >= self._sample_start_size
def fix_table(self):
self._is_fixed = True
if self._mode == "queue":
self._producer_queue.shutdown()
self._consumer_queue.shutdown()
def get_producer_index(self, buffer_size: int) -> Union[List[int], None]:
buffer_size = min(self._producer_queue.qsize(), buffer_size)
if buffer_size <= 0:
return None
else:
return self._producer_queue.get_nowait_batch(int(buffer_size))
def get_consumer_index(self, buffer_size: int) -> Union[List[int], None]:
buffer_size = min(self._consumer_queue.qsize(), buffer_size)
if buffer_size <= 0:
return None
else:
return self._consumer_queue.get_nowait_batch(int(buffer_size))
def free_consumer_index(self, indices: List[int]):
self._producer_queue.put_nowait_batch(indices)
def free_producer_index(self, indices: List[int]):
self._consumer_queue.put_nowait_batch(indices)
@staticmethod
def gen_table_name(*args, **kwargs):
return DATASET_TABLE_NAME_GEN(*args, **kwargs)
def insert(
self, data: List[Dict[str, Any]], indices: List[int] = None, size: int = None
):
assert isinstance(data, List), type(data)
if self.buffer is None:
self.build_buffer_from_samples(data[0])
if indices is None:
# generate indices
indices = np.arange(self._flag, self._flag + size) % self._capacity
shuffle_idx = np.arange(len(indices))
np.random.shuffle(shuffle_idx)
for d_list, k, value_list in iter_many_dicts_recursively(*data):
head_d = d_list[0]
batch_sizes = [v.shape[0] for v in value_list]
merged_shape = (sum(batch_sizes),) + value_list[0].shape[1:]
_placeholder = np.zeros(merged_shape, dtype=head_d[k].dtype)
index = 0
for batch_size, value in zip(batch_sizes, value_list):
_placeholder[index : index + batch_size] = value[:]
index += batch_size
assert len(_placeholder) >= len(indices), (
len(_placeholder),
len(indices),
_placeholder.shape,
k,
value_list[0].shape,
len(value_list),
)
head_d[k] = _placeholder[shuffle_idx]
# assert indices is not None, "indices: {}".format(indices)
self._buffer.set_data(indices, data[0])
self._size += len(indices)
self._size = min(self._size, self._capacity)
self._flag = (self._flag + len(indices)) % self._capacity
def sample(self, indices: List[int] = None, size: int = None) -> Dict[str, Any]:
if indices is None:
indices = np.random.choice(self.size, size)
return self._buffer.index(indices)
@staticmethod
def _save_helper_func(obj, fp, candidate_name=""):
if os.path.isdir(fp):
try:
os.makedirs(fp)
except:
pass
tfp = os.path.join(fp, candidate_name + ".tpkl")
else:
paths = os.path.split(fp)[0]
try:
os.makedirs(paths)
except:
pass
tfp = fp + ".tpkl"
with open(tfp, "wb") as f:
pkl.dump(obj, f, protocol=settings.PICKLE_PROTOCOL_VER)
def dump(self, fp, name=None):
if name is None:
name = self._name
with self._threading_lock:
serial_dict = {
"fragment_length": self._fragment_length,
"multi_agent": self.is_multi_agent,
"sample_start_size": self._sample_start_size,
"data_shapes": self._data_shapes,
"data": self._buffer,
"name": self._name,
}
self._save_helper_func(serial_dict, fp, name)
@classmethod
def load(cls, fp, event_loop=None):
with open(fp, "rb") as f:
serial_dict = pkl.load(f)
buffer = serial_dict["data"]
dtypes = {}
for agent, agent_data in buffer.items():
agent_dtypes = {}
for cname, cdata in agent_data.items():
agent_dtypes[cname] = cdata.dtype
dtypes[agent] = agent_dtypes
table = Table(
capacity=buffer.capacity,
fragment_length=serial_dict["fragment_length"],
data_shapes=serial_dict["data_shapes"],
data_dtypes=dtypes,
sample_start_size=serial_dict["sample_start_size"],
event_loop=event_loop,
name=serial_dict.get("name", ""),
)
table._buffer = buffer
return table
def to_csv(self, fp):
def _dump_episode(fname, episode: BufferDict):
class _InternalColumnGenerator:
def __init__(self, column_data_dict):
self.idx = 0
self.data = column_data_dict
self.columns = column_data_dict.keys()
self.length = len(next(iter(column_data_dict.values())))
def getlines(self):
column_info = "/".join([str(col) for col in self.columns]) + "\n"
yield column_info
while self.idx < self.length:
line = []
for c in self.columns:
line.append(str(self.data[c][self.idx].tolist()))
line = "/".join(line) + "\n"
self.idx += 1
yield line
lines = _InternalColumnGenerator(episode).getlines()
with open(fname, "w") as f:
f.writelines(lines)
with self._threading_lock:
try:
os.makedirs(fp)
except:
pass
assert self.is_multi_agent
for aid in self._buffer.keys():
episode = self._buffer[aid]
_dump_episode(os.path.join(fp, str(aid)), episode)
@ray.remote
class OfflineDataset:
def __init__(
self, dataset_config: Dict[str, Any], exp_cfg: Dict[str, Any], test_mode=False
):
self._episode_capacity = dataset_config.get(
"episode_capacity", settings.DEFAULT_EPISODE_CAPACITY
)
self._fragment_length = dataset_config.get("fragment_length")
self._learning_start = dataset_config.get("learning_start", 64)
self._tables: Dict[str, Table] = dict()
self._threading_lock = threading.Lock()
self._threading_pool = ThreadPoolExecutor()
loop = get_or_create_eventloop()
self.event_loop = loop
self.event_thread = threading.Thread(target=_start_loop, args=(loop,))
self.event_thread.setDaemon(True)
self.event_thread.start()
# parse init tasks
init_job_config = dataset_config.get("init_job", {})
if init_job_config.get("load_when_start"):
path = init_job_config.get("path")
if path:
self.load(path)
# # Read-only proxies for external offline dataset
# external_resource_config = dataset_config.get("extern")
# self.external_proxy: List[ExternalDataset] = []
# if external_resource_config:
# for external_config, sample_rate in zip(
# external_resource_config["links"],
# external_resource_config["sample_rates"],
# ):
# if not external_config["write"]:
# dataset = ExternalReadOnlyDataset(
# name=external_config["name"],
# path=external_config["path"],
# sample_rate=sample_rate,
# )
# self.external_proxy.append(dataset)
# else:
# raise NotImplementedError(
# "External writable dataset is not supported"
# )
# quitting job
quit_job_config = dataset_config.get("quit_job", {})
self.dump_when_closed = quit_job_config.get("dump_when_closed")
self.dump_path = quit_job_config.get("path")
Logger.info(
"dataset server initialized with (table_capacity={} table_learning_start={})".format(
self._episode_capacity, self._learning_start
)
)
def lock(self, lock_type: str, desc: Dict[AgentID, BufferDescription]) -> str:
"""Lock table ready to push or pull and return the table status."""
env_id = list(desc.values())[0].env_id
main_ids = sorted(list(desc.keys()))
table_name = Table.gen_table_name(
env_id=env_id,
main_id=main_ids,
pid=[desc[aid].policy_id for aid in main_ids],
)
# check it is multi-agent or not
# self.check_table(table_name, None, is_multi_agent=len(main_ids) > 1)
# table = self._tables[table_name]
# status = table.lock_push_pull(lock_type)
# return status
if table_name in self._tables:
return Status.SUCCESS
else:
return Status.FAILED
def unlock(self, lock_type: str, desc: Dict[AgentID, BufferDescription]):
env_id = list(desc.values())[0].env_id
main_ids = sorted(list(desc.keys()))
table_name = Table.gen_table_name(
env_id=env_id,
main_id=main_ids,
pid=[desc[aid].policy_id for aid in main_ids],
)
# self.check_table(table_name, None, is_multi_agent=len(main_ids) > 1)
# table = self._tables[table_name]
# status = table.unlock_push_pull(lock_type)
# return status
if table_name in self._tables:
return Status.SUCCESS
else:
return Status.FAILED
def create_table(self, buffer_desc: BufferDescription):
name = Table.gen_table_name(
env_id=buffer_desc.env_id,
main_id=buffer_desc.agent_id,
pid=buffer_desc.policy_id,
)
if name in self._tables:
raise Warning("Repeated table definite: {}".format(name))
# return None
else:
self._tables[name] | |
with a specific UUID.
:param userID: the users's UUID
:type userID: string
:rtype: list of dicts representing publishers
"""
# https://api.relayr.io/users/%s/publishers
url = '{0}/users/{1}/publishers'.format(self.host, userID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def get_user_apps(self, userID):
"""
Get all apps installed for a user with a specific UUID.
:param userID: the users's UUID
:type userID: string
:rtype: list of dicts ... with UUIDs and secrets
"""
# https://api.relayr.io/users/%s/apps
url = '{0}/users/{1}/apps'.format(self.host, userID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def get_user_transmitters(self, userID):
"""
Get all transmitters for a user with a specific UUID.
:param userID: the users's UUID
:type userID: string
:rtype: list of dicts with UUIDs and secrets
"""
# https://api.relayr.io/users/%s/transmitters
url = '{0}/users/{1}/transmitters'.format(self.host, userID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def get_user_devices(self, userID):
"""
Get all devices registered for a user with a specific UUID.
:param userID: the users's UUID
:type userID: string
:rtype: list of dicts ...
"""
# https://api.relayr.io/users/%s/devices
url = '{0}/users/{1}/devices'.format(self.host, userID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def get_user_devices_filtered(self, userID, meaning):
"""
Get all devices registered for a specific user filtered by meaning.
:param userID: the users's UUID
:type userID: string
:param meaning: a meaning used for filtering results
:type meaning: string
:rtype: list of dicts representing devices
"""
# https://api.relayr.io/users/%s/devices?meaning=%s
url = '{0}/users/{1}/devices?meaning={2}'.format(self.host, userID, meaning)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def get_user_devices_bookmarks(self, userID):
"""
Get a list of devices bookmarked by a specific user.
:param userID: the users's UUID
:type userID: string
:rtype: list of dicts, each representing a device
Sample result (anonymized UUIDs)::
[{u'firmwareVersion': u'1.0.0',
u'id': '...',
u'model': '...',
u'name': 'My Wunderbar Microphone',
u'owner': '...',
u'public': True,
u'secret': '238885'}]
"""
# https://api.relayr.io/users/%s/devices/bookmarks
url = '{0}/users/{1}/devices/bookmarks'.format(self.host, userID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def post_user_devices_bookmark(self, userID, deviceID):
"""
Bookmark a specific public device for a specific user.
:param userID: the users's UUID
:type userID: string
:param deviceID: the UUID of the device to be bookmarked
:type deviceID: string
:rtype: list of dicts ...
Sample result (anonymized UUIDs)::
{'createdAt': '2014-11-05T16:31:06.429Z',
'deviceId': '...',
'userId': '...'}
"""
# https://api.relayr.io/users/%s/devices/bookmarks
url = '{0}/users/{1}/devices/{2}/bookmarks'.format(self.host, userID, deviceID)
_, data = self.perform_request('POST', url, headers=self.headers)
return data
def delete_user_devices_bookmark(self, userID, deviceID):
"""
Delete a bookmark for a specific user and device.
:param userID: the users's UUID
:type userID: string
:param deviceID: the device's UUID
:type deviceID: string
:rtype: None
"""
# https://api.relayr.io/users/%s/devices/%s/bookmarks
url = '{0}/users/{1}/devices/{2}/bookmarks'.format(self.host, userID, deviceID)
_, data = self.perform_request('DELETE', url, headers=self.headers)
return data
def post_user_wunderbar(self, userID):
"""
Get the UUIDs and secrets of the master module and sensor modules.
:param userID: the users's UUID
:type userID: string
:rtype: dict with information about master and sensor modules/devices
Sample result (abbreviated, some values anonymized)::
{
"bridge": { ... },
"microphone": {
"name": "My Wunderbar Microphone",
"public": False,
"secret": "......",
"owner": "...",
"model": {
"readings": [
{
"meaning": "noise_level",
"unit": "dba"
}
],
"manufacturer": "Relayr GmbH",
"id": "...",
"name": "Wunderbar Microphone"
},
"id": "...",
"firmwareVersion": "1.0.0"
},
"light": { ... },
"masterModule": {
"owner": "...",
"secret": "............",
"id": "...",
"name": "My Wunderbar Master Module"
},
"infrared": { ... },
"thermometer": { ... },
"gyroscope": { ... }
}
"""
# https://api.relayr.io/users/%s/wunderbar
url = '{0}/users/{1}/wunderbar'.format(self.host, userID)
_, data = self.perform_request('POST', url, headers=self.headers)
return data
def delete_wunderbar(self, transmitterID):
"""
Delete a WunderBar identified by its master module from the relayr
cloud. This means that in addition to the transmitter (the master
module) all devices (sensors) associated with it are being deleted.
:param transmitterID: the UUID of the master module
:type transmitterID: string
"""
# https://api.relayr.io/wunderbars/%s
url = '{0}/wunderbars/{1}'.format(self.host, transmitterID)
_, data = self.perform_request('DELETE', url, headers=self.headers)
return data
def post_users_destroy(self, userID):
"""
Delete all WunderBars associated with a specific user from the relayr cloud.
:param userID: the users's UUID
:type userID: string
"""
# https://api.relayr.io/users/%s/destroy-everything-i-love
url = '{0}/users/{1}/destroy-everything-i-love'.format(self.host, userID)
_, data = self.perform_request('POST', url, headers=self.headers)
return data
# ..............................................................................
# Applications
# ..............................................................................
def get_public_apps(self):
"""
Get a list of all public relayr applications on the relayr platform.
:rtype: list of dicts, each representing a relayr application
"""
# https://api.relayr.io/apps
url = '{0}/apps'.format(self.host)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def post_app(self, appName, publisherID, redirectURI, appDescription):
"""
Register a new application on the relayr platform.
:rtype: list of dicts, each representing a relayr application
"""
data = {
"name": appName,
"publisher": publisherID,
"redirectUri": redirectURI,
"description": appDescription
}
# https://api.relayr.io/apps
url = '{0}/apps'.format(self.host)
_, data = self.perform_request('POST', url, data=data, headers=self.headers)
return data
def get_app_info(self, appID):
"""
Get information about an app with a specific UUID.
:param appID: the app's UUID
:type appID: string
Sample result (anonymized token value)::
{
"id": "...",
"name": "My App",
"description": "My Wunderbar app",
...
}
"""
# https://api.relayr.io/apps/<appID>
url = '{0}/apps/{1}'.format(self.host, appID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def get_app_info_extended(self, appID):
"""
Get extended information about the app with a specific UUID.
:param appID: the app's UUID
:type appID: string
Sample result (some values anonymized)::
{
"id": "...",
"name": "My App",
"publisher": "...",
"clientId": "...",
"clientSecret": "...",
"description": "My Wunderbar app",
"redirectUri": https://relayr.io
}
"""
# https://api.relayr.io/apps/<appID>/extended
url = '{0}/apps/{1}/extended'.format(self.host, appID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def patch_app(self, appID, description=None, name=None, redirectUri=None):
"""
Update one or more attributes of an app with a specific UUID.
:param appID: the application's UUID
:type appID: string
:param description: the user name to be set
:type description: string
:param name: the user email to be set
:type name: string
:param redirectUri: the redirect URI to be set
:type redirectUri: string
Sample result (some values anonymized)::
{
"id": "...",
"name": "<NAME>",
"publisher": "...",
"clientId": "...",
"clientSecret": "...",
"description": "My Wunderbar app",
"redirectUri": https://relayr.io
}
"""
data = {}
if name is not None:
data.update(name=name)
if description is not None:
data.update(description=description)
if redirectUri is not None:
data.update(redirectUri=redirectUri)
# https://api.relayr.io/apps/<appID>
url = '{0}/apps/{1}'.format(self.host, appID)
_, data = self.perform_request('PATCH', url, data=data, headers=self.headers)
return data
def delete_app(self, appID):
"""
Delete an application from the relayr platform.
:param appID: the application's UUID
:type appID: string
"""
# https://api.relayr.io/apps/<appID>
url = '{0}/apps/{1}'.format(self.host, appID)
_, data = self.perform_request('DELETE', url, headers=self.headers)
return data
def get_oauth2_app_info(self):
"""
Get info about the app initiating the request (the one in the token).
Sample result (anonymized token value)::
{
"id": "...",
"name": "<NAME>",
"description": "My Wunderbar app"
}
"""
# https://api.relayr.io/oauth2/app-info
url = '{0}/oauth2/app-info'.format(self.host)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
# ..............................................................................
# Publishers
# ..............................................................................
def get_public_publishers(self):
"""
Get a list of all publishers on the relayr platform.
:rtype: list of dicts, each representing a relayr publisher
"""
# https://api.relayr.io/publishers
url = '{0}/publishers'.format(self.host)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def post_publisher(self, userID, name):
"""
Register a new publisher.
:param userID: the user UUID of the publisher
:type userID: string
:param name: the publisher name
:type name: string
:rtype: a dict with fields describing the new publisher
"""
# https://api.relayr.io/publishers
data = {'owner': userID, 'name': name}
url = '{0}/publishers'.format(self.host)
_, data = self.perform_request('POST', url, data=data, headers=self.headers)
return data
def delete_publisher(self, publisherID):
"""
Delete a specific publisher from the relayr platform.
:param publisherID: the publisher UUID
:type publisherID: string
:rtype: an empty dict(?)
"""
# https://api.relayr.io/publishers
url = '{0}/publishers/{1}'.format(self.host, publisherID)
_, data = self.perform_request('DELETE', url, headers=self.headers)
return data
def get_publisher_apps(self, publisherID):
"""
Return a list of apps published by a specific publisher.
:param publisherID: the publisher UUID
:type publisherID: string
:rtype: A list of apps.
"""
# https://api.relayr.io/publishers/<id>/apps
url = '{0}/publishers/{1}/apps'.format(self.host, publisherID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def get_publisher_apps_extended(self, publisherID):
"""
Return a list with extended information about the publisher's apps.
:param publisherID: the publisher UUID
:type publisherID: string
:rtype: A list of apps.
"""
# https://api.relayr.io/publishers/<id>/apps/extended
url = '{0}/publishers/{1}/apps/extended'.format(self.host, publisherID)
_, data = self.perform_request('GET', url, headers=self.headers)
return data
def patch_publisher(self, publisherID, name=None):
"""
Update name attribute of a specific publisher.
:param publisherID: the publisher's UUID
| |
# -*- coding:utf-8 -*-
# Copyright xmuspeech (Author: JFZhou 2019-11-18)
# Updata xmuspeech (Author: <NAME> 2020-12-31)
import scipy
import numpy as np
import math
import os
import kaldi_io
import sys
import logging
'''
Modified from the code avaliable at https://github.com/vzxxbacq/PLDA/blob/master/plda.py
'''
# Logger
logger = logging.getLogger('libs')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(pathname)s:%(lineno)s] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
M_LOG_2PI = 1.8378770664093454835606594728112
class ClassInfo(object):
def __init__(self, weight=0, num_example=0, mean=0):
self.weight = weight
self.num_example = num_example
self.mean = mean
class PldaStats(object):
def __init__(self, dim):
self.dim_ = dim
self.num_example = 0
self.num_classes = 0
self.class_weight = 0
self.example_weight = 0
self.sum = np.zeros([dim,1])
self.offset_scatter= np.zeros([dim,dim])
self.classinfo = list()
def add_samples(self, weight, group):
# Each row represent an utts of the same speaker.
n = group.shape[0]
mean = np.mean(group, axis=0)
mean=mean.reshape((-1,1))
self.offset_scatter += weight * np.matmul(group.T,group)
self.offset_scatter += -n * weight * np.matmul(mean,mean.T)
self.classinfo.append(ClassInfo(weight, n, mean))
self.num_example += n
self.num_classes += 1
self.class_weight += weight
self.example_weight += weight * n
self.sum += weight * mean
#@property
def is_sorted(self):
for i in range(self.num_classes-1):
if self.classinfo[i+1].num_example < self.classinfo[i].num_example:
return False
return True
def sort(self):
for i in range(self.num_classes-1):
for j in range(i+1,self.num_classes):
if self.classinfo[i].num_example > self.classinfo[j].num_example:
self.classinfo[i],self.classinfo[j] = self.classinfo[j],self.classinfo[i]
return
class PLDA(object):
def __init__(self, normalize_length=True, simple_length_norm=False):
self.mean = 0
self.transform = 0
self.psi = 0
self.dim = 0
self.normalize_length = normalize_length
self.simple_length_norm = simple_length_norm
def transform_ivector(self,ivector, num_example):
self.dim = ivector.shape[-1]
transformed_ivec = self.offset
transformed_ivec = 1.0 * np.matmul(self.transform ,ivector) + 1.0 * transformed_ivec #其中offset为提前计算好的
if(self.simple_length_norm):
normalization_factor = math.sqrt(self.dim) / np.linalg.norm(transformed_ivec)
else:
normalization_factor = self.get_normalization_factor(transformed_ivec,
num_example)
if(self.normalize_length):
transformed_ivec = normalization_factor*transformed_ivec
return transformed_ivec
def log_likelihood_ratio(self, transform_train_ivector, num_utts,
transform_test_ivector):
self.dim = transform_train_ivector.shape[0]
mean = np.zeros([self.dim,1])
variance = np.zeros([self.dim,1])
for i in range(self.dim):
mean[i] = num_utts * self.psi[i] / (num_utts * self.psi[i] + 1.0)*transform_train_ivector[i]#单元素的nΨ/(nΨ+I) u ̅^g
variance[i] = 1.0 + self.psi[i] / (num_utts * self.psi[i] + 1.0)
logdet = np.sum(np.log(variance)) #ln|Ψ/(nΨ+I)+I|
transform_test_ivector=transform_test_ivector.reshape(-1,1)
sqdiff = transform_test_ivector - mean #u^p-nΨ/(nΨ+I) u ̅^g
sqdiff=sqdiff.reshape(1,-1)
sqdiff = np.power(sqdiff, 2.0)
variance = np.reciprocal(variance)
loglike_given_class = -0.5 * (logdet + M_LOG_2PI * self.dim + np.dot(sqdiff, variance))
sqdiff = transform_test_ivector
sqdiff = np.power(sqdiff, np.full(sqdiff.shape, 2.0))
sqdiff=sqdiff.reshape(1,-1)
variance = self.psi + 1.0
logdet = np.sum(np.log(variance))
variance = np.reciprocal(variance) #求方差的倒数对应 (Ψ+I)^(-1)
variance=variance.reshape(-1,1)
loglike_without_class = -0.5 * (logdet + M_LOG_2PI * self.dim + np.dot(sqdiff, variance))
loglike_ratio = loglike_given_class - loglike_without_class
return loglike_ratio
def smooth_within_class_covariance(self, smoothing_factor):
within_class_covar = np.ones(self.dim)
smooth = np.full(self.dim,smoothing_factor*within_class_covar*self.psi.T)
within_class_covar = np.add(within_class_covar,
smooth)
self.psi = np.divide(self.psi, within_class_covar)
within_class_covar = np.power(within_class_covar,
np.full(within_class_covar.shape, -0.5))
self.transform = np.diag(within_class_covar) * self.transform
self.compute_derived_vars()
def compute_derived_vars(self):
self.offset = np.zeros(self.dim)
self.offset = -1.0 * np.matmul(self.transform,self.mean)
return self.offset
def get_normalization_factor(self, transform_ivector, num_example):
transform_ivector_sq = np.power(transform_ivector, 2.0)
inv_covar = self.psi + 1.0/num_example
inv_covar = np.reciprocal(inv_covar)
dot_prob = np.dot(inv_covar, transform_ivector_sq)
return math.sqrt(self.dim/dot_prob)
def plda_read(self,plda):
with kaldi_io.open_or_fd(plda,'rb') as f:
for key,vec in kaldi_io.read_vec_flt_ark(f):
if key == 'mean':
self.mean = vec.reshape(-1,1)
self.dim = self.mean.shape[0]
elif key == 'within_var':
self.within_var = vec.reshape(self.dim, self.dim)
else:
self.between_var = vec.reshape(self.dim, self.dim)
def compute_normalizing_transform(self,covar):
c = np.linalg.cholesky(covar)
c = np.linalg.inv(c)
return c
def get_output(self):
transform1 = self.compute_normalizing_transform(self.within_var)
'''
// now transform is a matrix that if we project with it,
// within_var_ becomes unit.
// between_var_proj is between_var after projecting with transform1.
'''
between_var_proj =transform1.dot(self.between_var).dot(transform1.T)
'''
// Do symmetric eigenvalue decomposition between_var_proj = U diag(s) U^T,
// where U is orthogonal.
'''
s, U = np.linalg.eig(between_var_proj)
# Sorting the feature values from small to large
sorted_indices = np.argsort(s)
U = U[:,sorted_indices[:-len(sorted_indices)-1:-1]]
s = s[sorted_indices[:-len(sorted_indices)-1:-1]]
assert s.min()>0 #For safe
'''
// The transform U^T will make between_var_proj diagonal with value s
// (i.e. U^T U diag(s) U U^T = diag(s)). The final transform that
// makes within_var_ unit and between_var_ diagonal is U^T transform1,
// i.e. first transform1 and then U^T.
'''
self.transform = np.matmul(U.T,transform1)
self.psi = s
self.compute_derived_vars()
def plda_trans_write(self,plda):
with open(plda,'w') as f:
f.write('<Plda> [ '+' '.join(list(map(str,list(self.mean.reshape(self.mean.shape[0])))))+' ]\n')
f.write(' [')
for i in range(len(self.transform)):
f.write('\n '+' '.join(list(map(str,list(self.transform[i])))))
f.write(' ]')
f.write('\n [ '+' '.join(list(map(str,list(self.psi.reshape(self.psi.shape[0])))))+' ]\n')
f.write('</Plda> ')
class PldaEstimation(object):
def __init__(self, Pldastats):
self.mean = 0
self.stats = Pldastats
is_sort = self.stats.is_sorted()
if not is_sort:
logger.info('The stats is not in order...')
sys.exit()
self.dim = Pldastats.dim_
self.between_var =np.eye(self.dim)
self.between_var_stats = np.zeros([self.dim,self.dim])
self.between_var_count = 0
self.within_var = np.eye(self.dim)
self.within_var_stats = np.zeros([self.dim,self.dim])
self.within_var_count = 0
def estimate(self,num_em_iters=10):
for i in range(num_em_iters):
logger.info("iteration times:{}".format(i))
self.estimate_one_iter()
self.mean = (1.0 / self.stats.class_weight) * self.stats.sum
def estimate_one_iter(self):
self.reset_per_iter_stats()
self.get_stats_from_intraclass()
self.get_stats_from_class_mean()
self.estimate_from_stats()
def reset_per_iter_stats(self):
self.within_var_stats = np.zeros([self.dim,self.dim])
self.within_var_count = 0
self.between_var_stats = np.zeros([self.dim,self.dim])
self.between_var_count = 0
def get_stats_from_intraclass(self):
self.within_var_stats += self.stats.offset_scatter
self.within_var_count += (self.stats.example_weight - self.stats.class_weight)
def get_stats_from_class_mean(self):
within_var_inv = np.linalg.inv(self.within_var)
between_var_inv =np.linalg.inv(self.between_var)
for i in range(self.stats.num_classes):
info = self.stats.classinfo[i]
weight = info.weight
if info.num_example:
n = info.num_example
mix_var = np.linalg.inv(between_var_inv + n * within_var_inv) # 〖(Φ_b^(-1)+n〖Φ_w〗^(-1))〗^(-1)
m = info.mean - (self.stats.sum / self.stats.class_weight) # mk
m=m.reshape((-1,1))
temp = n * np.matmul(within_var_inv,m) # n〖Φ_w〗^(-1) m_k
w = np.matmul(mix_var,temp) # w=〖(Φ ̂)〗^(-1) n〖Φ_w〗^(-1) m_k
w=w.reshape(-1,1)
m_w = m - w
m_w=m_w.reshape(-1,1)
self.between_var_stats += weight *mix_var #〖(Φ_b^(-1)+n〖Φ_w〗^(-1))〗^(-1)
self.between_var_stats += weight *np.matmul(w,w.T) #〖(Φ_b^(-1)+n〖Φ_w〗^(-1))〗^(-1)+ww^T
self.between_var_count += weight
self.within_var_stats += weight * n * mix_var # n_k(〖(Φ_b^(-1)+n〖Φ_w〗^(-1))〗^(-1))
self.within_var_stats += weight *n *np.matmul(m_w,m_w.T) # n_k(〖(Φ_b^(-1)+n〖Φ_w〗^(-1))〗^(-1)) + (w_k-m_k ) (w_k-m_k )^T
self.within_var_count += weight
def estimate_from_stats(self):
self.within_var = (1.0 / self.within_var_count) * self.within_var_stats #1/K ∑_k▒〖n_k (Φ ̂_k+(w_k-m_k)〖(w_k-m_k)〗^T)〗
self.between_var = (1.0 / self.between_var_count) * self.between_var_stats # Φ_b=1/K ∑_k▒〖(Φ ̂_k+w_k 〖w_k〗^T)〗
def get_output(self):
Plda_output = PLDA()
# Plda_output.mean = (1.0 / self.stats.class_weight) * self.stats.sum #整体均值
Plda_output.mean = self.mean
transform1 = self.compute_normalizing_transform(self.within_var) #得到decomposition分解的逆
'''
// now transform is a matrix that if we project with it,
// within_var_ becomes unit.
// between_var_proj is between_var after projecting with transform1.
'''
between_var_proj =np.matmul(np.matmul(transform1 , self.between_var),transform1.T) #对类间协方差矩阵进行对角化得到对角阵Ψ
'''
// Do symmetric eigenvalue decomposition between_var_proj = U diag(s) U^T,
// where U is orthogonal.
'''
s, U = np.linalg.eig(between_var_proj) #返回矩阵Ψ升序的特征值,以及特征值所对应的特征向量
assert s.min()>0
'''
// The transform U^T will make between_var_proj diagonal with value s
// (i.e. U^T U diag(s) U U^T = diag(s)). The final transform that
// makes within_var_ unit and between_var_ diagonal is U^T transform1,
// i.e. first transform1 and then U^T.
'''
Plda_output.transform = np.matmul(U.T,transform1)
Plda_output.psi = s
Plda_output.compute_derived_vars()
return Plda_output
def compute_normalizing_transform(self,covar):
c = np.linalg.cholesky(covar) #根据合同对角化的性质求解得到使类间协方差矩阵和类内协方差矩阵对角化的矩阵
c = np.linalg.inv(c)
return c
def plda_write(self,plda):
with kaldi_io.open_or_fd(plda,'wb') as f:
kaldi_io.write_vec_flt(f, self.mean, key='mean')
kaldi_io.write_vec_flt(f, self.within_var.reshape(-1,1), key='within_var')
kaldi_io.write_vec_flt(f, self.between_var.reshape(-1,1), key='between_var')
class PldaUnsupervisedAdaptor(object):
"""
通过Add_stats将新的数据添加进来,通过update_plda进行更新
"""
def __init__(self,
mean_diff_scale=1.0,
within_covar_scale=0.3,
between_covar_scale=0.7):
self.tot_weight = 0
self.mean_stats = 0
self.variance_stats = 0
self.mean_diff_scale = mean_diff_scale
self.within_covar_scale = within_covar_scale
self.between_covar_scale = between_covar_scale
def add_stats(self, weight, ivector):
ivector = np.reshape(ivector,(-1,1))
if type(self.mean_stats)==int:
self.mean_stats = np.zeros(ivector.shape)
self.variance_stats = np.zeros((ivector.shape[0],ivector.shape[0]))
self.tot_weight += weight
self.mean_stats += weight * ivector
self.variance_stats += weight * np.matmul(ivector,ivector.T)
def update_plda(self, plda):
dim = self.mean_stats.shape[0]
#TODO:Add assert
'''
// mean_diff of the adaptation data from the training data. We optionally add
// this to our total covariance matrix
'''
mean = (1.0 / self.tot_weight) * self.mean_stats
'''
D(x)= E[x^2]-[E(x)]^2
'''
variance = (1.0 / self.tot_weight) * self.variance_stats - np.matmul(mean,mean.T)
'''
// update the plda's mean data-member with our adaptation-data mean.
'''
mean_diff = mean - plda.mean
variance += self.mean_diff_scale * np.matmul(mean_diff,mean_diff.T)
plda.mean = mean
transform_mod = plda.transform
'''
// transform_model_ is a row-scaled version of plda->transform_ that
// transforms into the space where the total covariance is 1.0. Because
// plda->transform_ transforms into a space where the within-class covar is
// 1.0 and the the between-class covar is diag(plda->psi_), we need to scale
// each dimension i by 1.0 / sqrt(1.0 + plda->psi_(i))
'''
for i in range(dim):
transform_mod[i] *= 1.0 / math.sqrt(1.0 + plda.psi[i])
'''
// project the variance of the adaptation set into this space where
// the total covariance is unit.
'''
variance_proj = np.matmul(np.matmul(transform_mod, variance),transform_mod.T)
'''
// Do eigenvalue decomposition of variance_proj; this will tell us the
// directions in which the adaptation-data covariance is more than
// the training-data covariance.
'''
s, P = np.linalg.eig(variance_proj)
'''
// W, B are the (within,between)-class covars in the space transformed by
// transform_mod.
'''
W = np.zeros([dim, dim])
B = np.zeros([dim, dim])
for i in range(dim):
W[i][i] | |
import http.cookies
import grpc
import pytest
from google.protobuf import empty_pb2, wrappers_pb2
from sqlalchemy.sql import delete, func
from couchers import errors
from couchers.crypto import hash_password, random_hex
from couchers.db import session_scope
from couchers.models import (
ContributeOption,
ContributorForm,
LoginToken,
PasswordResetToken,
SignupFlow,
User,
UserSession,
)
from couchers.sql import couchers_select as select
from proto import api_pb2, auth_pb2
from tests.test_fixtures import ( # noqa
api_session,
auth_api_session,
db,
fast_passwords,
generate_user,
real_api_session,
testconfig,
)
@pytest.fixture(autouse=True)
def _(testconfig, fast_passwords):
pass
def get_session_cookie_token(metadata_interceptor):
return http.cookies.SimpleCookie(metadata_interceptor.latest_headers["set-cookie"])["couchers-sesh"].value
def test_UsernameValid(db):
with auth_api_session() as (auth_api, metadata_interceptor):
assert auth_api.UsernameValid(auth_pb2.UsernameValidReq(username="test")).valid
with auth_api_session() as (auth_api, metadata_interceptor):
assert not auth_api.UsernameValid(auth_pb2.UsernameValidReq(username="")).valid
def test_signup_incremental(db):
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
basic=auth_pb2.SignupBasic(name="testing", email="<EMAIL>"),
)
)
flow_token = res.flow_token
assert res.flow_token
assert not res.HasField("auth_res")
assert not res.need_basic
assert res.need_account
assert res.need_feedback
assert res.need_verify_email
assert res.need_accept_community_guidelines
# read out the signup token directly from the database for now
with session_scope() as session:
flow = session.execute(select(SignupFlow).where(SignupFlow.flow_token == flow_token)).scalar_one()
assert flow.email_sent
assert not flow.email_verified
email_token = flow.email_token
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.SignupFlow(auth_pb2.SignupFlowReq(flow_token=flow_token))
assert res.flow_token == flow_token
assert not res.HasField("auth_res")
assert not res.need_basic
assert res.need_account
assert res.need_feedback
assert res.need_verify_email
assert res.need_accept_community_guidelines
# Add feedback
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
flow_token=flow_token,
feedback=auth_pb2.ContributorForm(
ideas="I'm a robot, incapable of original ideation",
features="I love all your features",
experience="I haven't done couch surfing before",
contribute=auth_pb2.CONTRIBUTE_OPTION_YES,
contribute_ways=["serving", "backend"],
expertise="I'd love to be your server: I can compute very fast, but only simple opcodes",
),
)
)
assert res.flow_token == flow_token
assert not res.HasField("auth_res")
assert not res.need_basic
assert res.need_account
assert not res.need_feedback
assert res.need_verify_email
assert res.need_accept_community_guidelines
# Agree to community guidelines
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
flow_token=flow_token,
accept_community_guidelines=wrappers_pb2.BoolValue(value=True),
)
)
assert res.flow_token == flow_token
assert not res.HasField("auth_res")
assert not res.need_basic
assert res.need_account
assert not res.need_feedback
assert res.need_verify_email
assert not res.need_accept_community_guidelines
# Verify email
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
flow_token=flow_token,
email_token=email_token,
)
)
assert res.flow_token == flow_token
assert not res.HasField("auth_res")
assert not res.need_basic
assert res.need_account
assert not res.need_feedback
assert not res.need_verify_email
assert not res.need_accept_community_guidelines
# Finally finish off account info
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
flow_token=flow_token,
account=auth_pb2.SignupAccount(
username="frodo",
password="<PASSWORD>",
birthdate="1970-01-01",
gender="Bot",
hosting_status=api_pb2.HOSTING_STATUS_MAYBE,
city="New York City",
lat=40.7331,
lng=-73.9778,
radius=500,
accept_tos=True,
),
)
)
assert not res.flow_token
assert res.HasField("auth_res")
assert res.auth_res.user_id
assert not res.auth_res.jailed
assert not res.need_basic
assert not res.need_account
assert not res.need_feedback
assert not res.need_verify_email
assert not res.need_accept_community_guidelines
user_id = res.auth_res.user_id
sess_token = get_session_cookie_token(metadata_interceptor)
with api_session(sess_token) as api:
res = api.GetUser(api_pb2.GetUserReq(user=str(user_id)))
assert res.username == "frodo"
assert res.gender == "Bot"
assert res.hosting_status == api_pb2.HOSTING_STATUS_MAYBE
assert res.city == "New York City"
assert res.lat == 40.7331
assert res.lng == -73.9778
assert res.radius == 500
with session_scope() as session:
form = session.execute(select(ContributorForm)).scalar_one()
assert form.ideas == "I'm a robot, incapable of original ideation"
assert form.features == "I love all your features"
assert form.experience == "I haven't done couch surfing before"
assert form.contribute == ContributeOption.yes
assert form.contribute_ways == ["serving", "backend"]
assert form.expertise == "I'd love to be your server: I can compute very fast, but only simple opcodes"
def _quick_signup():
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
basic=auth_pb2.SignupBasic(name="testing", email="<EMAIL>"),
account=auth_pb2.SignupAccount(
username="frodo",
password="<PASSWORD>",
birthdate="1970-01-01",
gender="Bot",
hosting_status=api_pb2.HOSTING_STATUS_CAN_HOST,
city="New York City",
lat=40.7331,
lng=-73.9778,
radius=500,
accept_tos=True,
),
feedback=auth_pb2.ContributorForm(),
accept_community_guidelines=wrappers_pb2.BoolValue(value=True),
)
)
flow_token = res.flow_token
assert res.flow_token
assert not res.HasField("auth_res")
assert not res.need_basic
assert not res.need_account
assert not res.need_feedback
assert res.need_verify_email
# read out the signup token directly from the database for now
with session_scope() as session:
flow = session.execute(select(SignupFlow).where(SignupFlow.flow_token == flow_token)).scalar_one()
assert flow.email_sent
assert not flow.email_verified
email_token = flow.email_token
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.SignupFlow(auth_pb2.SignupFlowReq(email_token=email_token))
assert not res.flow_token
assert res.HasField("auth_res")
assert res.auth_res.user_id
assert not res.auth_res.jailed
assert not res.need_basic
assert not res.need_account
assert not res.need_feedback
assert not res.need_verify_email
user_id = res.auth_res.user_id
# make sure we got the right token in a cookie
with session_scope() as session:
token = (
session.execute(
select(UserSession).join(User, UserSession.user_id == User.id).where(User.username == "frodo")
).scalar_one()
).token
assert get_session_cookie_token(metadata_interceptor) == token
def test_signup(db):
_quick_signup()
def test_basic_login(db):
# Create our test user using signup
_quick_signup()
with auth_api_session() as (auth_api, metadata_interceptor):
reply = auth_api.Login(auth_pb2.LoginReq(user="frodo"))
assert reply.next_step == auth_pb2.LoginRes.LoginStep.NEED_PASSWORD
with auth_api_session() as (auth_api, metadata_interceptor):
auth_api.Authenticate(auth_pb2.AuthReq(user="frodo", password="<PASSWORD>"))
reply_token = get_session_cookie_token(metadata_interceptor)
with session_scope() as session:
token = (
session.execute(
select(UserSession)
.join(User, UserSession.user_id == User.id)
.where(User.username == "frodo")
.where(UserSession.token == reply_token)
).scalar_one_or_none()
).token
assert token
# log out
with auth_api_session() as (auth_api, metadata_interceptor):
auth_api.Deauthenticate(empty_pb2.Empty(), metadata=(("cookie", f"couchers-sesh={reply_token}"),))
def test_basic_login_without_password(db):
# Create our test user using signup
_quick_signup()
with session_scope() as session:
user = session.execute(select(User)).scalar_one()
user.hashed_password = <PASSWORD>
with auth_api_session() as (auth_api, metadata_interceptor):
reply = auth_api.Login(auth_pb2.LoginReq(user="frodo"))
assert reply.next_step == auth_pb2.LoginRes.LoginStep.SENT_LOGIN_EMAIL
# backdoor to find login token
with session_scope() as session:
entry = session.execute(select(LoginToken)).scalar_one()
login_token = entry.token
with auth_api_session() as (auth_api, metadata_interceptor):
auth_api.CompleteTokenLogin(auth_pb2.CompleteTokenLoginReq(login_token=login_token))
reply_token = get_session_cookie_token(metadata_interceptor)
with session_scope() as session:
token = (
session.execute(
select(UserSession)
.join(User, UserSession.user_id == User.id)
.where(User.username == "frodo")
.where(UserSession.token == reply_token)
).scalar_one_or_none()
).token
assert token
# log out
with auth_api_session() as (auth_api, metadata_interceptor):
auth_api.Deauthenticate(empty_pb2.Empty(), metadata=(("cookie", f"couchers-sesh={reply_token}"),))
def test_login_tokens_invalidate_after_use(db):
_quick_signup()
with session_scope() as session:
user = session.execute(select(User)).scalar_one()
user.hashed_password = <PASSWORD>
with auth_api_session() as (auth_api, metadata_interceptor):
reply = auth_api.Login(auth_pb2.LoginReq(user="frodo"))
assert reply.next_step == auth_pb2.LoginRes.LoginStep.SENT_LOGIN_EMAIL
with session_scope() as session:
login_token = session.execute(select(LoginToken)).scalar_one().token
with auth_api_session() as (auth_api, metadata_interceptor):
auth_api.CompleteTokenLogin(auth_pb2.CompleteTokenLoginReq(login_token=login_token))
session_token = get_session_cookie_token(metadata_interceptor)
with auth_api_session() as (auth_api, metadata_interceptor), pytest.raises(grpc.RpcError):
# check we can't login again
auth_api.CompleteTokenLogin(auth_pb2.CompleteTokenLoginReq(login_token=login_token))
def test_banned_user(db):
_quick_signup()
with auth_api_session() as (auth_api, metadata_interceptor):
reply = auth_api.Login(auth_pb2.LoginReq(user="frodo"))
assert reply.next_step == auth_pb2.LoginRes.LoginStep.NEED_PASSWORD
with session_scope() as session:
session.execute(select(User)).scalar_one().is_banned = True
with auth_api_session() as (auth_api, metadata_interceptor):
with pytest.raises(grpc.RpcError) as e:
auth_api.Authenticate(auth_pb2.AuthReq(user="frodo", password="<PASSWORD>"))
assert e.value.details() == "Your account is suspended."
def test_banned_user_without_password(db):
_quick_signup()
with session_scope() as session:
user = session.execute(select(User)).scalar_one()
user.hashed_password = None
with auth_api_session() as (auth_api, metadata_interceptor):
reply = auth_api.Login(auth_pb2.LoginReq(user="frodo"))
assert reply.next_step == auth_pb2.LoginRes.LoginStep.SENT_LOGIN_EMAIL
with session_scope() as session:
login_token = session.execute(select(LoginToken)).scalar_one().token
with session_scope() as session:
session.execute(select(User)).scalar_one().is_banned = True
with auth_api_session() as (auth_api, metadata_interceptor):
with pytest.raises(grpc.RpcError) as e:
auth_api.CompleteTokenLogin(auth_pb2.CompleteTokenLoginReq(login_token=login_token))
assert e.value.details() == "Your account is suspended."
def test_deleted_user(db):
_quick_signup()
with session_scope() as session:
session.execute(select(User)).scalar_one().is_deleted = True
with auth_api_session() as (auth_api, metadata_interceptor):
with pytest.raises(grpc.RpcError) as e:
reply = auth_api.Login(auth_pb2.LoginReq(user="frodo"))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert e.value.details() == errors.USER_NOT_FOUND
def test_invalid_token(db):
user1, token1 = generate_user()
user2, token2 = generate_user()
wrong_token = random_hex(32)
with real_api_session(wrong_token) as api, pytest.raises(grpc.RpcError) as e:
res = api.GetUser(api_pb2.GetUserReq(user=user2.username))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert e.value.details() == "Unauthorized"
def test_password_reset(db):
user, token = generate_user(hashed_password=<PASSWORD>_password("<PASSWORD>"))
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.ResetPassword(
auth_pb2.ResetPasswordReq(
user=user.username,
)
)
with session_scope() as session:
token = session.execute(select(PasswordResetToken)).scalar_one().token
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.CompletePasswordReset(auth_pb2.CompletePasswordResetReq(password_reset_token=token))
with session_scope() as session:
user = session.execute(select(User)).scalar_one()
assert not user.has_password
def test_password_reset_no_such_user(db):
user, token = generate_user()
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.ResetPassword(
auth_pb2.ResetPasswordReq(
user="nonexistentuser",
)
)
with session_scope() as session:
res = session.execute(select(PasswordResetToken)).scalar_one_or_none()
assert res is None
def test_password_reset_invalid_token(db):
password = <PASSWORD>()
user, token = generate_user(hashed_password=<PASSWORD>(password))
with auth_api_session() as (auth_api, metadata_interceptor):
res = auth_api.ResetPassword(
auth_pb2.ResetPasswordReq(
user=user.username,
)
)
with auth_api_session() as (auth_api, metadata_interceptor), pytest.raises(grpc.RpcError) as e:
res = auth_api.CompletePasswordReset(auth_pb2.CompletePasswordResetReq(password_reset_token="wrong<PASSWORD>"))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert e.value.details() == errors.INVALID_TOKEN
with session_scope() as session:
user = session.execute(select(User)).scalar_one()
assert user.hashed_password == <PASSWORD>(password)
def test_logout_invalid_token(db):
# Create our test user using signup
_quick_signup()
with auth_api_session() as (auth_api, metadata_interceptor):
reply = auth_api.Login(auth_pb2.LoginReq(user="frodo"))
assert reply.next_step == auth_pb2.LoginRes.LoginStep.NEED_PASSWORD
with auth_api_session() as (auth_api, metadata_interceptor):
auth_api.Authenticate(auth_pb2.AuthReq(user="frodo", password="<PASSWORD>"))
reply_token = get_session_cookie_token(metadata_interceptor)
# delete all login tokens
with session_scope() as session:
session.execute(delete(LoginToken))
# log out with non-existent token should still return a valid result
with auth_api_session() as (auth_api, metadata_interceptor):
auth_api.Deauthenticate(empty_pb2.Empty(), metadata=(("cookie", f"couchers-sesh={reply_token}"),))
reply_token = get_session_cookie_token(metadata_interceptor)
# make sure we set an empty cookie
assert reply_token == ""
def test_signup_without_password(db):
with auth_api_session() as (auth_api, metadata_interceptor):
with pytest.raises(grpc.RpcError) as e:
auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
basic=auth_pb2.SignupBasic(name="Räksmörgås", email="<EMAIL>"),
account=auth_pb2.SignupAccount(
username="frodo",
password="<PASSWORD>",
city="Minas Tirith",
birthdate="9999-12-31", # arbitrary future birthdate
gender="Robot",
hosting_status=api_pb2.HOSTING_STATUS_CAN_HOST,
lat=1,
lng=1,
radius=100,
accept_tos=True,
),
feedback=auth_pb2.ContributorForm(),
)
)
assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
assert e.value.details() == errors.PASSWORD_TOO_SHORT
def test_signup_invalid_birthdate(db):
with auth_api_session() as (auth_api, metadata_interceptor):
with pytest.raises(grpc.RpcError) as e:
auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
basic=auth_pb2.SignupBasic(name="Räksmörgås", email="<EMAIL>"),
account=auth_pb2.SignupAccount(
username="frodo",
password="<PASSWORD>",
city="Minas Tirith",
birthdate="9999-12-31", # arbitrary future birthdate
gender="Robot",
hosting_status=api_pb2.HOSTING_STATUS_CAN_HOST,
lat=1,
lng=1,
radius=100,
accept_tos=True,
),
feedback=auth_pb2.ContributorForm(),
)
)
assert e.value.code() == grpc.StatusCode.FAILED_PRECONDITION
assert e.value.details() == errors.INVALID_BIRTHDATE
res = auth_api.SignupFlow(
auth_pb2.SignupFlowReq(
basic=auth_pb2.SignupBasic(name="Christopher", email="<EMAIL>"),
account=auth_pb2.SignupAccount(
username="ceelo",
password="<PASSWORD>",
city="New York City",
birthdate="2000-12-31", # arbitrary birthdate older than 18 years
gender="Helicopter",
hosting_status=api_pb2.HOSTING_STATUS_CAN_HOST,
lat=1,
lng=1,
| |
<reponame>ultimateabhi719/ChIA-PIPE
#!/usr/bin/env python
"""
test_oldindex.py
Used to run tests on the test files found in /samples/old_index2/
From root, execute using `python test/test.py`
First, ensure you have fully installed the pypairix package:
`pip install pypairix --user`
OR
`sudo python setup.py install`
If you're having trouble running this file, try installing
python-dev and zlib1g-dev.
Note: tests are run to anticipate either juicer-formatted pairs files or 4DN-
formatted pairs files.
The columns (given in form <attribute [col#]):
Juicer: chr1[1] pos1[2] chr2[5] pos2[6]
4DN: chr1[1] pos1[2] chr2[3] pos2[4]
"""
from __future__ import unicode_literals
import unittest
import gzip
import sys
import pypairix
import warnings
TEST_FILE_2D = 'samples/old_index2/merged_nodup.tab.chrblock_sorted.txt.gz'
TEST_FILE_2D_4DN = 'samples/old_index2/4dn.bsorted.chr21_22_only.pairs.gz'
TEST_FILE_2D_4DN_2 = 'samples/old_index2/test_4dn.pairs.gz'
TEST_FILE_2D_4DN_NOT_TRIANGLE = 'samples/old_index2/4dn.bsorted.chr21_22_only.nontriangle.pairs.gz'
TEST_FILE_1D = 'samples/old_index2/SRR1171591.variants.snp.vqsr.p.vcf.gz'
TEST_FILE_2D_SPACE = 'samples/old_index2/merged_nodups.space.chrblock_sorted.subsample1.txt.gz'
def get_header(filename, meta_char='#'):
"""Read gzipped file and retrieve lines beginning with '#'."""
retval = []
for line in gzip.open(filename):
try:
line = line.decode('utf-8')
except AttributeError:
pass
if line.startswith(meta_char):
retval.append(line.rstrip())
return retval
def get_chromsize(filename):
"""Read gzipped file and retrieve chromsize."""
retval = []
for line in gzip.open(filename):
try:
line = line.decode('utf-8')
except AttributeError:
pass
if line.startswith('#chromsize: '):
fields = line.rstrip().split('\s+')
chrname = fields[1]
chrsize = fields[2]
retval.append([chrname, chrsize])
return retval
def read_vcf(filename):
"""Read a VCF file and return a list of [chrom, start, end] items."""
retval = []
for line in gzip.open(filename):
try:
line = line.decode('utf-8')
except AttributeError:
pass
fields = line.rstrip().split('\t')
chrom = fields[0]
start = fields[1]
end = fields[1]
retval.append([chrom, start, end])
return retval
def find_pairs_type(filename, delimiter='\t'):
"""Attempt to determine if input pairs file is of type: juicer, 4DN,
or undetermined. Do this by testing string values of """
is_juicer = False
is_4DN = False
for line in gzip.open(filename):
try:
line = line.decode('utf-8')
except AttributeError:
pass
fields = line.rstrip().split(delimiter)
if len(fields)>=6 and is_str(fields[2]) and is_str(fields[6]):
is_juicer = True
if is_str(fields[2]) and is_str(fields[4]):
is_4DN = True
if not is_juicer and is_4DN:
return '4DN'
elif is_juicer:
return 'juicer'
return 'undetermined'
def is_str(s):
"""Helper function to see if a string is an int. Return True if so"""
try:
int(s)
return True
except ValueError:
return False
def read_pairs(filename, file_type='undetermined', delimiter='\t'):
"""Read a pairs file and return a list of [chrom1, start1, end1, chrom2, start2, end2] items."""
# handle this a different way?
if file_type == 'undetermined':
return []
retval = []
for line in gzip.open(filename):
try:
line = line.decode('utf-8')
except AttributeError:
pass
if line.startswith('#'):
continue
fields = line.rstrip().split(delimiter)
if file_type == 'juicer':
chrom1 = fields[1]
start1 = fields[2]
chrom2 = fields[5]
start2 = fields[6]
elif file_type == '4DN':
chrom1 = fields[1]
start1 = fields[2]
chrom2 = fields[3]
start2 = fields[4]
retval.append([chrom1, start1, start1, chrom2, start2, start2])
return retval
def overlap1(a0, a1, b0, b1):
return int(a0) <= int(b1) and int(a1) >= int(b0)
def get_result(regions, chrom, start, end):
retval = []
for r in regions:
if r[0] == chrom and overlap1(r[1], r[2], start, end):
retval.append(r)
return retval
def get_result_2D(regions, chrom, start, end, chrom2, start2, end2):
retval = []
for reg in regions:
if reg[0] == chrom and overlap1(reg[1], reg[2], start, end) and reg[3] == chrom2 and overlap1(reg[4], reg[5], start2, end2):
retval.append(reg)
return retval
def get_result_1D_on_2D(regions, chrom, start, end, chrom2, start2, end2):
retval = []
for reg in regions:
if reg[0] == chrom and overlap1(reg[2], reg[2], start, end) and reg[3] == chrom2 and overlap1(reg[4], reg[4], start2, end2):
retval.append(reg)
return retval
def build_it_result(it, f_type):
"""Build results using the pairix iterator based on the filetype"""
if f_type == 'juicer':
pr_result = [[x[1], x[2], x[2], x[5], x[6], x[6]] for x in it]
elif f_type == '4DN':
pr_result = [[x[1], x[2], x[2], x[3], x[4], x[4]] for x in it]
elif f_type == 'undetermined':
pr_result = []
return pr_result
## 1D query on 1D indexed file
class PairixTest(unittest.TestCase):
regions = read_vcf(TEST_FILE_1D)
chrom = 'chr10'
start = 25944
end = 27000000
result = get_result(regions, chrom, start, end)
pr = pypairix.open(TEST_FILE_1D)
def test_query(self):
it = self.pr.query(self.chrom, self.start, self.end)
pr_result = [[x[0], x[1], x[1]] for x in it]
self.assertEqual(self.result, pr_result)
def test_querys(self):
query = '{}:{}-{}'.format(self.chrom, self.start, self.end)
it = self.pr.querys(query)
pr_result = [[x[0], x[1], x[1]] for x in it]
self.assertEqual(self.result, pr_result)
## semi-2D query on 2D indexed file
class PairixTest_2(unittest.TestCase):
f_type = find_pairs_type(TEST_FILE_2D)
regions = read_pairs(TEST_FILE_2D, f_type)
chrom = '10'
start = 25944
end = 27000000
chrom2 = '20'
result = get_result_2D(regions, chrom, start, end, chrom2, 0, sys.maxsize)
pr = pypairix.open(TEST_FILE_2D)
def test_querys(self):
query = '{}:{}-{}|{}'.format(self.chrom, self.start, self.end, self.chrom2)
it = self.pr.querys2D(query)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
## 2D query on 2D indexed file
class PairixTest2D(unittest.TestCase):
f_type = find_pairs_type(TEST_FILE_2D)
regions = read_pairs(TEST_FILE_2D, f_type)
chrom = '10'
start = 1
end = 1000000
chrom2 = '20'
start2 = 50000000
end2 = 60000000
result = get_result_2D(regions, chrom, start, end, chrom2, start2, end2)
pr = pypairix.open(TEST_FILE_2D)
def test_query2(self):
it = self.pr.query2D(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
def test_querys_2(self):
query = '{}:{}-{}|{}:{}-{}'.format(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2)
it = self.pr.querys2D(query)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
def test_querys_2_bad_order(self):
# build the query with coordinates in the wrong order
query = '{}:{}-{}|{}:{}-{}'.format(self.chrom, self.end, self.start, self.chrom2, self.start2, self.end2)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# trigger a warning
self.pr.querys2D(query)
# verify some things about the warning
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, pypairix.PairixWarning))
## 2D query on 2D indexed file with chromosomes input in reverse order
class PairixTest2D_reverse(unittest.TestCase):
f_type = find_pairs_type(TEST_FILE_2D)
regions = read_pairs(TEST_FILE_2D, f_type)
chrom2 = '10'
start2 = 1
end2 = 1000000
chrom = '20'
start = 50000000
end = 60000000
# reverse reversed results to get them in the required order here
result = get_result_2D(regions, chrom2, start2, end2, chrom, start, end)
pr = pypairix.open(TEST_FILE_2D)
def test_query2_rev(self):
# 1 is included as last argument to test flipping chromosome order
it = self.pr.query2D(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2, 1)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
def test_querys_2_rev(self):
query = '{}:{}-{}|{}:{}-{}'.format(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2)
# 1 is included as last argument to test flipping chromosome order
it = self.pr.querys2D(query, 1)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
def test_query2_rev_fail(self):
# do not include 1 to test flipped order of chrs; expect this to hit a PairixWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# trigger a warning
self.pr.query2D(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2)
# verify some things about the warning
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, pypairix.PairixWarning))
## 2D query on 2D indexed file with chromosomes using a 4DN pairs file
class PairixTest2D_4DN(unittest.TestCase):
f_type = find_pairs_type(TEST_FILE_2D_4DN)
regions = read_pairs(TEST_FILE_2D_4DN, f_type)
chrom = 'chr21'
start = 1
end = 48129895
chrom2 = 'chr22'
start2 = 1
end2 = 51304566
# reverse reversed results to get them in the required order here
result = get_result_2D(regions, chrom, start, end, chrom2, start2, end2)
pr = pypairix.open(TEST_FILE_2D_4DN)
def test_query2_4dn(self):
it = self.pr.query2D(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
def test_querys_2_4dn(self):
query = '{}:{}-{}|{}:{}-{}'.format(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2)
it = self.pr.querys2D(query)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
## 2D query on 2D indexed space-delimited file
class PairixTest2DSpace(unittest.TestCase):
f_type = find_pairs_type(TEST_FILE_2D_SPACE, ' ')
regions = read_pairs(TEST_FILE_2D_SPACE, f_type, ' ')
chrom = '10'
start = 1
end = 1000000
chrom2 = '20'
start2 = 50000000
end2 = 60000000
result = get_result_2D(regions, chrom, start, end, chrom2, start2, end2)
pr = pypairix.open(TEST_FILE_2D_SPACE)
def test_query2(self):
it = self.pr.query2D(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
def test_querys_2(self):
query = '{}:{}-{}|{}:{}-{}'.format(self.chrom, self.start, self.end, self.chrom2, self.start2, self.end2)
it = self.pr.querys2D(query)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
## 1D query on 2D indexed file
class PairixTest_1_on_2(unittest.TestCase):
f_type='4DN'
regions = read_pairs(TEST_FILE_2D_4DN_2, f_type)
chrom = 'chrY'
start = 1
end = 2000000
chrom2 = chrom
start2 = start
end2 = end
result = get_result_1D_on_2D(regions, chrom, start, end, chrom2, start2, end2)
pr = pypairix.open(TEST_FILE_2D_4DN_2)
def test_querys(self):
query = '{}:{}-{}'.format(self.chrom, self.start, self.end)
it = self.pr.querys2D(query)
pr_result = build_it_result(it, self.f_type)
self.assertEqual(self.result, pr_result)
class PairixTestBlocknames(unittest.TestCase):
def test_blocknames(self):
# block list obtained from get_blocknames()
pr = pypairix.open(TEST_FILE_2D)
retrieved_blocklist = pr.get_blocknames()
retrieved_blocklist.sort()
# true block list
blocklist=[]
f_type = find_pairs_type(TEST_FILE_2D)
regions = read_pairs(TEST_FILE_2D, f_type)
for a in regions:
blocklist.append(a[0] + '|' + a[3])
blocklist_uniq = list(set(blocklist))
blocklist_uniq.sort()
self.assertEqual(retrieved_blocklist, blocklist_uniq)
class PairixTestGetColumnIndex(unittest.TestCase):
def test_columnindex(self):
pr = pypairix.open(TEST_FILE_2D)
pr2 = pypairix.open(TEST_FILE_2D_4DN)
self.assertEqual(pr.get_chr1_col(),1)
self.assertEqual(pr.get_chr2_col(),5)
self.assertEqual(pr.get_startpos1_col(),2)
self.assertEqual(pr.get_startpos2_col(),6)
self.assertEqual(pr.get_endpos1_col(),2)
self.assertEqual(pr.get_endpos2_col(),6)
self.assertEqual(pr2.get_chr1_col(),1)
self.assertEqual(pr2.get_chr2_col(),3)
self.assertEqual(pr2.get_startpos1_col(),2)
self.assertEqual(pr2.get_startpos2_col(),4)
self.assertEqual(pr2.get_endpos1_col(),2)
self.assertEqual(pr2.get_endpos2_col(),4)
class PairixTestExists(unittest.TestCase):
def test_exists(self):
pr = pypairix.open(TEST_FILE_2D_4DN)
self.assertEqual(pr.exists("chr21|chr21"),1)
self.assertEqual(pr.exists("chr21|chr22"),1)
self.assertEqual(pr.exists("chr22|chr22"),1)
self.assertEqual(pr.exists("chr22|chr21"),0)
self.assertEqual(pr.exists("chr1|chr2"),0)
self.assertEqual(pr.exists("chr21"),0)
self.assertEqual(pr.exists("1|2"),0)
class PairixTestExists2(unittest.TestCase):
def test_exists2(self):
pr = pypairix.open(TEST_FILE_2D_4DN)
self.assertEqual(pr.exists2("chr21","chr21"),1)
self.assertEqual(pr.exists2("chr21","chr22"),1)
| |
#standard
import time
import unittest
import os
import sys
import xml.etree.ElementTree as ET
from copy import deepcopy
#TCA
from TCACore import logger, curdir, control_values, strategy_values
class ControlFiles():
"""
Call for reading and validating all Control Files
"""
def __init__(self, control_file = 'TCA_input.xml', TCA_version = 'standalone' ):
"""
Set all key default values for Control and Strategy file
:param control_file: string of master control file
:param TCA_version: string version of the TCA_version
"""
self.control_values = deepcopy(control_values)
self.strategy_values = deepcopy(strategy_values)
self.control_file = control_file
if sys.platform in ['win32']:
if ':' in control_file:
self.control_file = control_file
else:
self.control_file = curdir + control_file
else:
self.control_file = control_file
self.TCA_version = TCA_version
self.Control = {}
self.Strategy = {}
def Error_count(self):
"""
Counts the number of errors in control files
:return count: integer with the number of errors
"""
count = 0
for key in self.control_values:
if self.control_values[key][2] != '':
count +=1
for key in self.strategy_values:
if self.strategy_values[key][2] != '':
count +=1
return count
def map_dictionary(self):
"""
changes 2 dimensional list to single
"""
for key in self.control_values:
self.Control[key] = self.control_values[key][0]
for key in self.strategy_values:
self.Strategy[key] = self.strategy_values[key][0]
def Load_files(self):
"""
Main method to read control files
"""
self.Load_Control()
if self.control_values["StrategyFile"][0] != None:
self.Load_Strategy()
self.Create_input_summary_file()
if self.Error_count() >= 1.0:
logger.error('There is an error in the input values. Please check TCA_Input_Summary.csv for more information')
sys.exit(0)
self.map_dictionary()
def int_check(self, value, key):
"""
checks value to make sure they are integers. if not reports error
:param value: value of given input
:param key: key name of input
:return: value as int and string of error
"""
try:
int(value)
return int(value), ''
except:
return None, 'Error: %s value must be an integer' % (key)
def float_check(self, value, key):
"""
checks value to make sure they are floats. if not reports error
:param value: value of the given input
:param key: key name of input
:return: value as float and string of error
"""
try:
float(value)
return float(value), ''
except:
return None, 'Error: %s value must be a float' % (key)
def file_check(self, value):
"""
:param value: string value of the file
:return:
"""
if sys.platform in ['win32']:
if ':' in value:
return value
else:
return curdir + value
else:
return value
def Load_Control(self):
"""
Loads and checks control file values
"""
try:
tree = ET.parse(self.control_file)
root = tree.getroot()
except:
if self.control_file != curdir + 'not a file.xml':
logger.info("Error: cannot find or invalid format for control_values file %s" % self.control_file)
raise
for key, value in self.control_values.iteritems():
if root.find(value[4]) != None:
if value[3] == 'int':
self.control_values[key][0], self.control_values[key][2] = self.int_check(root.find(value[4]).text, key)
elif value[3] == 'List_int':
if len(str(root.find(value[4]).text).split(',')) > 0:
self.control_values[key][0] = str(root.find(value[4]).text).split(',')
try:
self.control_values[key][0] = [ int(i) for i in self.control_values[key][0]]
except:
pass
elif value[3] == 'file':
if root.find(value[4]).text != 'None':
self.control_values[key][0] = self.file_check(root.find(value[4]).text)
elif value[3] == 'Upper':
self.control_values[key][0] = root.find(value[4]).text.upper()
else:
self.control_values[key][0] = root.find(value[4]).text
self.control_values[key][1] = 'User_Defined'
if self.control_values["OutputLevel"][0] !=0:
if self.control_file == "TCAinput.xml":
logger.info("Using default file name: TCAinput.xml")
logger.info("TCA Version 2.3 created by Noblis")
logger.info("Start time %s" % (time.strftime('%X', time.localtime(time.time()))))
logger.info("Loading control_values file %s" % (self.control_file))
#Addtional Error checking
if self.control_values["OutputLevel"][0] not in [0,1,2,3]:
self.control_values["OutputLevel"][2] = 'Error: OutputLevel can only be values 0,1,2,3'
if (self.control_values["PDMMarketPenetration"][0] < 0 or self.control_values["PDMMarketPenetration"][0] > 100) \
and (self.control_values["PDMMarketPenetration"][0] != 0):
self.control_values["PDMMarketPenetration"][2] = 'Error: PDMMarketPenetration is less than 0 or greater than 100'
if (self.control_values["BSMMarketPenetration"][0] < 0 or self.control_values["BSMMarketPenetration"][0] > 100) \
and (self.control_values["BSMMarketPenetration"][0] != 0):
self.control_values["BSMMarketPenetration"][2] = 'Error: BSMMarketPenetration is less than 0 or greater than 100'
if (self.control_values["PDMDSRCMarketPenetration"][0] < 0 or self.control_values["PDMDSRCMarketPenetration"][0] > 100) \
and (self.control_values["PDMDSRCMarketPenetration"][0] != 0):
self.control_values["PDMDSRCMarketPenetration"][2] = 'Error: PDMDSRCMarketPenetration is less than 0 or greater than 100'
if (self.control_values["CAMMarketPenetration"][0] < 0 or self.control_values["CAMMarketPenetration"][0] > 100) \
and (self.control_values["CAMMarketPenetration"][0] != 0):
self.control_values["CAMMarketPenetration"][2] = 'Error: CAMMarketPenetration is less than 0 or greater than 100'
if (self.control_values["SPOTMarketPenetration"][0] < 0 or self.control_values["SPOTMarketPenetration"][0] > 100) \
and (self.control_values["SPOTMarketPenetration"][0] != 0):
self.control_values["SPOTMarketPenetration"][2] = 'Error: SPOTMarketPenetration is less than 0 or greater than 100'
if (self.control_values["PDMCellularMarketPenetration"][0] < 0 or self.control_values["PDMCellularMarketPenetration"][0] > 100) \
and (self.control_values["PDMCellularMarketPenetration"][0] != None):
self.control_values["PDMCellularMarketPenetration"][2] = 'Error: PDMCellularMarketPenetration is less than 0 or greater than 100'
if (self.control_values["PDMDualCommMarketPenetration"][0] < 0 or self.control_values["PDMDualCommMarketPenetration"][0] > 100) \
and (self.control_values["PDMDualCommMarketPenetration"][0] != None):
self.control_values["PDMDualCommMarketPenetration"][2] = 'Error: PDMDualCommMarketPenetration is less than 0 or greater than 100'
if (self.control_values["BSMDSRCMarketPenetration"][0] < 0 or self.control_values["BSMDSRCMarketPenetration"][0] > 100) \
and (self.control_values["BSMDSRCMarketPenetration"][0] != None):
self.control_values["BSMDSRCMarketPenetration"][2] = 'Error: BSMDSRCMarketPenetration is less than 0 or greater than 100'
if (self.control_values["BSMCellularMarketPenetration"][0] < 0 or self.control_values["BSMCellularMarketPenetration"][0] > 100) \
and (self.control_values["BSMCellularMarketPenetration"][0] != None):
self.control_values["BSMCellularMarketPenetration"][2] = 'Error: BSMCellularMarketPenetration is less than 0 or greater than 100'
if (self.control_values["BSMDualCommMarketPenetration"][0] < 0 or self.control_values["BSMDualCommMarketPenetration"][0] > 100) \
and (self.control_values["BSMDualCommMarketPenetration"][0] != None):
self.control_values["BSMDualCommMarketPenetration"][2] = 'Error: BSMDualCommMarketPenetration is less than 0 or greater than 100'
if self.control_values["BSMMarketPenetration"][0] != 0 and self.control_values["PDMMarketPenetration"][0] != 0:
if (self.control_values["BSMMarketPenetration"][0] + self.control_values["PDMMarketPenetration"][0]) > 100:
self.control_values["BSMMarketPenetration"][2] = 'Error: BSM and PDM equipage market penetration is more than 100%'
self.control_values["PDMMarketPenetration"][2] = 'Error: BSM and PDM equipage market penetration is more than 100%'
if self.control_values["PDMMarketPenetration"][0] != 0:
if (len(self.control_values["PDMDSRCVehicleTypes"][0]) > 0) or (len(self.control_values["PDMDSRCVehicleIDs"][0]) > 0) or \
(len(self.control_values["PDMCellularVehicleTypes"][0]) > 0) or (len(self.control_values["PDMCellularVehicleIDs"][0]) > 0):
self.control_values["PDMDSRCVehicleTypes"][2] = 'Error: Must use ONLY MarketPenetration, VehicleTypes, or VehicleIDs'
self.control_values["PDMDSRCVehicleIDs"][2] = 'Error: Must use ONLY MarketPenetration, VehicleTypes, or VehicleIDs'
self.control_values["PDMCellularVehicleTypes"][2] = 'Error: Must use ONLY MarketPenetration, VehicleTypes, or VehicleIDs'
self.control_values["PDMCellularVehicleIDs"][2] = 'Error: Must use ONLY MarketPenetration, VehicleTypes, or VehicleIDs'
if self.control_values["BSMMarketPenetration"][0] != 0:
if (len(self.control_values["BSMDSRCVehicleTypes"][0]) > 0) or (len(self.control_values["BSMDSRCVehicleIDs"][0]) > 0) or \
(len(self.control_values["BSMCellularVehicleTypes"][0]) > 0) or (len(self.control_values["BSMCellularVehicleIDs"][0]) > 0):
self.control_values["BSMDSRCVehicleTypes"][2] = 'Error: Must use ONLY MarketPenetration, VehicleTypes, or VehicleIDs'
self.control_values["BSMDSRCVehicleIDs"][2] = 'Error: Must use ONLY MarketPenetration, VehicleTypes, or VehicleIDs'
self.control_values["BSMCellularVehicleTypes"][2] = 'Error: Must use ONLY MarketPenetration, VehicleTypes, or VehicleIDs'
self.control_values["BSMCellularVehicleIDs"][2] = 'Error: Must use ONLY MarketPenetration, VehicleTypes, or VehicleIDs'
if (self.TCA_version == 'standalone') and (self.control_values["FileType"][0] == 'CSV') and \
((self.control_values["XColumn"][0] == None) or (self.control_values["YColumn"][0] == None) or \
(self.control_values["SpdColumn"][0] == None) or (self.control_values["IDColumn"][0] == None) or \
(self.control_values["TimeColumn"][0] == None)):
self.control_values["XColumn"][2] = 'Error: Missing either XColumn YColumn SpdColumn IDColumn or TimeColumn xml tag'
self.control_values["YColumn"][2] = 'Error: Missing either XColumn YColumn SpdColumn IDColumn or TimeColumn xml tag'
self.control_values["SpdColumn"][2] = 'Error: Missing either XColumn YColumn SpdColumn IDColumn or TimeColumn xml tag'
self.control_values["IDColumn"][2] = 'Error: Missing either XColumn YColumn SpdColumn IDColumn or TimeColumn xml tag'
self.control_values["TimeColumn"][2] = 'Error: Missing either XColumn YColumn SpdColumn IDColumn or TimeColumn xml tag'
if (self.TCA_version == 'standalone') and (self.control_values["PDMMarketPenetration"][0] == 0) and \
(self.control_values["PDMVehicleIDs"][0] == None) and (self.control_values["PDMVehicleTypes"][0] == None) and \
(self.control_values["BSMVehicleIDs"][0] == None) and (self.control_values["BSMVehicleTypes"][0] == None) and \
(self.control_values["BSMMarketPenetration"][0] == None) and (self.control_values["CAMMarketPenetration"][0] == 0) and \
(self.control_values["CAMVehicleIDs"][0] == None) and (self.control_values["CAMVehicleTypes"][0] == None) and \
(self.control_values["SPOTMarketPenetration"][0] == 0) and (self.control_values["SPOTVehicleIDs"][0] == None) and \
(self.control_values["SPOTVehicleTypes"][0] == None) and (self.control_values["DualPDMBSMMarketPenetration"][0] == 0) and \
(self.control_values["DualPDMBSMVehicleIDs"][0] == None) and (self.control_values["DualPDMBSMVehicleTypes"][0] == None):
self.control_values["PDMMarketPenetration"][2] = 'Error: No vehicle equipage detected. Must select either include MarketPenetration, VehicleIDs, ' \
'or VehicleTypes to define equipped vehicles'
self.control_values["BSMMarketPenetration"][2] = 'Error: No vehicle equipage detected. Must select either include MarketPenetration, VehicleIDs, ' \
'or VehicleTypes to define equipped vehicles'
self.control_values["DualPDMBSMMarketPenetration"][2] = 'Error: No vehicle equipage detected. Must select either include MarketPenetration, VehicleIDs, ' \
'or VehicleTypes to define equipped vehicles'
self.control_values["CAMMarketPenetration"][2] = 'Error: No vehicle equipage detected. Must select either include MarketPenetration, VehicleIDs, ' \
'or VehicleTypes to define equipped vehicles'
self.control_values["SPOTMarketPenetration"][2] = 'Error: No vehicle equipage detected. Must select either include MarketPenetration, VehicleIDs, ' \
'or VehicleTypes to define equipped vehicles'
if (self.TCA_version != 'standalone'):
color_values = ['BSMTransColor','PDMPeriodicColor','PDMStopColor','PDMStartColor','PDMDSRCTransColor','PDMCellularTransColor','SpotBehaviorColor','SpotTravelColor',\
'SpotTransColor','DualPDMBSMColor','PDMDefaultColor','BSMDefaultColor','DefaultColor']
for color in color_values:
if len(self.control_values[color][0]) != 3:
self.control_values[color][2] = 'Error: Vehicle color must be specified as a list of three integers RGB values'
def Load_Strategy(self):
"""
Load and checks strategy file values
"""
if self.control_values['OutputLevel'][0] >= 1:
logger.info("Loading strategy_values File %s" % (self.control_values["StrategyFile"][0]))
try:
tree = ET.parse(self.control_values["StrategyFile"][0])
root = tree.getroot()
except:
if self.control_file != 'not a file.xml':
logger.info("Error: cannot find or invalid format for strategy_values file %s" % self.control_values["StrategyFile"][0])
print
raise
for key, value in self.strategy_values.iteritems():
if root.find(value[4]) != None:
if value[3] == 'int':
self.strategy_values[key][0], self.strategy_values[key][2] | |
'mount_fuji', 'mount_fuji'),
(b'\xe2\x9b\xb0', 'mountain', 'mountain_vs16'),
(b'\xf0\x9f\x9a\xb5', 'mountain_bicyclist', 'mountain_bicyclist', 'person_mountain_biking'),
(b'\xf0\x9f\x9a\xb5\xf0\x9f\x8f\xbb', 'mountain_bicyclist_tone1', 'mountain_bicyclist_tone1', 'person_mountain_biking_tone1'),
(b'\xf0\x9f\x9a\xb5\xf0\x9f\x8f\xbc', 'mountain_bicyclist_tone2', 'mountain_bicyclist_tone2', 'person_mountain_biking_tone2'),
(b'\xf0\x9f\x9a\xb5\xf0\x9f\x8f\xbd', 'mountain_bicyclist_tone3', 'mountain_bicyclist_tone3', 'person_mountain_biking_tone3'),
(b'\xf0\x9f\x9a\xb5\xf0\x9f\x8f\xbe', 'mountain_bicyclist_tone4', 'mountain_bicyclist_tone4', 'person_mountain_biking_tone4'),
(b'\xf0\x9f\x9a\xb5\xf0\x9f\x8f\xbf', 'mountain_bicyclist_tone5', 'mountain_bicyclist_tone5', 'person_mountain_biking_tone5'),
(b'\xf0\x9f\x9a\xa0', 'mountain_cableway', 'mountain_cableway'),
(b'\xf0\x9f\x9a\x9e', 'mountain_railway', 'mountain_railway'),
(b'\xf0\x9f\x8f\x94', 'mountain_snow', 'mountain_snow_vs16'),
(b'\xf0\x9f\x90\x81', 'mouse2', 'mouse2'),
(b'\xf0\x9f\x90\xad', 'mouse', 'mouse'),
(b'\xf0\x9f\x96\xb1', 'mouse_three_button', 'mouse_three_button_vs16'),
(b'\xf0\x9f\x8e\xa5', 'movie_camera', 'movie_camera'),
(b'\xf0\x9f\x97\xbf', 'moyai', 'moyai'),
(b'\xf0\x9f\x92\xaa', 'muscle', 'muscle'),
(b'\xf0\x9f\x92\xaa\xf0\x9f\x8f\xbb', 'muscle_tone1', 'muscle_tone1'),
(b'\xf0\x9f\x92\xaa\xf0\x9f\x8f\xbc', 'muscle_tone2', 'muscle_tone2'),
(b'\xf0\x9f\x92\xaa\xf0\x9f\x8f\xbd', 'muscle_tone3', 'muscle_tone3'),
(b'\xf0\x9f\x92\xaa\xf0\x9f\x8f\xbe', 'muscle_tone4', 'muscle_tone4'),
(b'\xf0\x9f\x92\xaa\xf0\x9f\x8f\xbf', 'muscle_tone5', 'muscle_tone5'),
(b'\xf0\x9f\x8d\x84', 'mushroom', 'mushroom'),
(b'\xf0\x9f\x8e\xb9', 'musical_keyboard', 'musical_keyboard'),
(b'\xf0\x9f\x8e\xb5', 'musical_note', 'musical_note'),
(b'\xf0\x9f\x8e\xbc', 'musical_score', 'musical_score'),
(b'\xf0\x9f\x94\x87', 'mute', 'mute'),
(b'\xf0\x9f\x92\x85', 'nail_care', 'nail_care'),
(b'\xf0\x9f\x92\x85\xf0\x9f\x8f\xbb', 'nail_care_tone1', 'nail_care_tone1'),
(b'\xf0\x9f\x92\x85\xf0\x9f\x8f\xbc', 'nail_care_tone2', 'nail_care_tone2'),
(b'\xf0\x9f\x92\x85\xf0\x9f\x8f\xbd', 'nail_care_tone3', 'nail_care_tone3'),
(b'\xf0\x9f\x92\x85\xf0\x9f\x8f\xbe', 'nail_care_tone4', 'nail_care_tone4'),
(b'\xf0\x9f\x92\x85\xf0\x9f\x8f\xbf', 'nail_care_tone5', 'nail_care_tone5'),
(b'\xf0\x9f\x93\x9b', 'name_badge', 'name_badge'),
(b'\xf0\x9f\x8f\x9e', 'national_park', 'national_park_vs16'),
(b'\xf0\x9f\xa4\xa2', 'nauseated_face', 'nauseated_face', 'sick'),
(b'\xf0\x9f\x91\x94', 'necktie', 'necktie'),
(b'\xe2\x9d\x8e', 'negative_squared_cross_mark', 'negative_squared_cross_mark'),
(b'\xf0\x9f\xa4\x93', 'nerd', 'nerd', 'nerd_face'),
(b'\xf0\x9f\x86\x95', 'new', 'new'),
(b'\xf0\x9f\x8c\x91', 'new_moon', 'new_moon'),
(b'\xf0\x9f\x8c\x9a', 'new_moon_with_face', 'new_moon_with_face'),
(b'\xf0\x9f\x97\x9e', 'newspaper2', 'newspaper2_vs16'),
(b'\xf0\x9f\x93\xb0', 'newspaper', 'newspaper'),
(b'\xe2\x8f\xad', 'next_track', 'next_track_vs16'),
(b'\xf0\x9f\x86\x96', 'ng', 'ng'),
(b'\xf0\x9f\x8c\x83', 'night_with_stars', 'night_with_stars'),
(b'9\xe2\x83\xa3', 'nine', 'nine_vs16'),
(b'\xf0\x9f\x94\x95', 'no_bell', 'no_bell'),
(b'\xf0\x9f\x9a\xb3', 'no_bicycles', 'no_bicycles'),
(b'\xe2\x9b\x94', 'no_entry', 'no_entry'),
(b'\xf0\x9f\x9a\xab', 'no_entry_sign', 'no_entry_sign'),
(b'\xf0\x9f\x99\x85', 'no_good', 'no_good', 'person_gesturing_no'),
(b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbb', 'no_good_tone1', 'no_good_tone1', 'person_gesturing_no_tone1'),
(b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbc', 'no_good_tone2', 'no_good_tone2', 'person_gesturing_no_tone2'),
(b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbd', 'no_good_tone3', 'no_good_tone3', 'person_gesturing_no_tone3'),
(b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbe', 'no_good_tone4', 'no_good_tone4', 'person_gesturing_no_tone4'),
(b'\xf0\x9f\x99\x85\xf0\x9f\x8f\xbf', 'no_good_tone5', 'no_good_tone5', 'person_gesturing_no_tone5'),
(b'\xf0\x9f\x93\xb5', 'no_mobile_phones', 'no_mobile_phones'),
(b'\xf0\x9f\x98\xb6', 'no_mouth', 'no_mouth'),
(b'\xf0\x9f\x9a\xb7', 'no_pedestrians', 'no_pedestrians'),
(b'\xf0\x9f\x9a\xad', 'no_smoking', 'no_smoking'),
(b'\xf0\x9f\x9a\xb1', 'non_potable_water', 'non_potable_water'),
(b'\xf0\x9f\x91\x83', 'nose', 'nose'),
(b'\xf0\x9f\x91\x83\xf0\x9f\x8f\xbb', 'nose_tone1', 'nose_tone1'),
(b'\xf0\x9f\x91\x83\xf0\x9f\x8f\xbc', 'nose_tone2', 'nose_tone2'),
(b'\xf0\x9f\x91\x83\xf0\x9f\x8f\xbd', 'nose_tone3', 'nose_tone3'),
(b'\xf0\x9f\x91\x83\xf0\x9f\x8f\xbe', 'nose_tone4', 'nose_tone4'),
(b'\xf0\x9f\x91\x83\xf0\x9f\x8f\xbf', 'nose_tone5', 'nose_tone5'),
(b'\xf0\x9f\x93\x93', 'notebook', 'notebook'),
(b'\xf0\x9f\x93\x94', 'notebook_with_decorative_cover', 'notebook_with_decorative_cover'),
(b'\xf0\x9f\x97\x92', 'notepad_spiral', 'notepad_spiral_vs16'),
(b'\xf0\x9f\x8e\xb6', 'notes', 'notes'),
(b'\xf0\x9f\x94\xa9', 'nut_and_bolt', 'nut_and_bolt'),
(b'\xf0\x9f\x85\xbe', 'o2', 'o2_vs16'),
(b'\xe2\xad\x95', 'o', 'o'),
(b'\xf0\x9f\x8c\x8a', 'ocean', 'ocean'),
(b'\xf0\x9f\x9b\x91', 'octagonal_sign', 'octagonal_sign', 'stop_sign'),
(b'\xf0\x9f\x90\x99', 'octopus', 'octopus'),
(b'\xf0\x9f\x8d\xa2', 'oden', 'oden'),
(b'\xf0\x9f\x8f\xa2', 'office', 'office'),
(b'\xf0\x9f\x9b\xa2', 'oil', 'oil_vs16'),
(b'\xf0\x9f\x86\x97', 'ok', 'ok'),
(b'\xf0\x9f\x91\x8c', 'ok_hand', 'ok_hand'),
(b'\xf0\x9f\x91\x8c\xf0\x9f\x8f\xbb', 'ok_hand_tone1', 'ok_hand_tone1'),
(b'\xf0\x9f\x91\x8c\xf0\x9f\x8f\xbc', 'ok_hand_tone2', 'ok_hand_tone2'),
(b'\xf0\x9f\x91\x8c\xf0\x9f\x8f\xbd', 'ok_hand_tone3', 'ok_hand_tone3'),
(b'\xf0\x9f\x91\x8c\xf0\x9f\x8f\xbe', 'ok_hand_tone4', 'ok_hand_tone4'),
(b'\xf0\x9f\x91\x8c\xf0\x9f\x8f\xbf', 'ok_hand_tone5', 'ok_hand_tone5'),
(b'\xf0\x9f\x99\x86', 'ok_woman', 'ok_woman', 'person_gesturing_ok'),
(b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbb', 'ok_woman_tone1', 'ok_woman_tone1', 'person_gesturing_ok_tone1'),
(b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbc', 'ok_woman_tone2', 'ok_woman_tone2', 'person_gesturing_ok_tone2'),
(b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbd', 'ok_woman_tone3', 'ok_woman_tone3', 'person_gesturing_ok_tone3'),
(b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbe', 'ok_woman_tone4', 'ok_woman_tone4', 'person_gesturing_ok_tone4'),
(b'\xf0\x9f\x99\x86\xf0\x9f\x8f\xbf', 'ok_woman_tone5', 'ok_woman_tone5', 'person_gesturing_ok_tone5'),
(b'\xf0\x9f\x91\xb4', 'older_man', 'older_man'),
(b'\xf0\x9f\x91\xb4\xf0\x9f\x8f\xbb', 'older_man_tone1', 'older_man_tone1'),
(b'\xf0\x9f\x91\xb4\xf0\x9f\x8f\xbc', 'older_man_tone2', 'older_man_tone2'),
(b'\xf0\x9f\x91\xb4\xf0\x9f\x8f\xbd', 'older_man_tone3', 'older_man_tone3'),
(b'\xf0\x9f\x91\xb4\xf0\x9f\x8f\xbe', 'older_man_tone4', 'older_man_tone4'),
(b'\xf0\x9f\x91\xb4\xf0\x9f\x8f\xbf', 'older_man_tone5', 'older_man_tone5'),
(b'\xf0\x9f\x95\x89', 'om_symbol', 'om_symbol_vs16'),
(b'\xf0\x9f\x94\x9b', 'on', 'on'),
(b'\xf0\x9f\x9a\x98', 'oncoming_automobile', 'oncoming_automobile'),
(b'\xf0\x9f\x9a\x8d', 'oncoming_bus', 'oncoming_bus'),
(b'\xf0\x9f\x9a\x94', 'oncoming_police_car', 'oncoming_police_car'),
(b'\xf0\x9f\x9a\x96', 'oncoming_taxi', 'oncoming_taxi'),
(b'1\xe2\x83\xa3', 'one', 'one_vs16'),
(b'\xf0\x9f\x93\x82', 'open_file_folder', 'open_file_folder'),
(b'\xf0\x9f\x91\x90', 'open_hands', 'open_hands'),
(b'\xf0\x9f\x91\x90\xf0\x9f\x8f\xbb', 'open_hands_tone1', 'open_hands_tone1'),
(b'\xf0\x9f\x91\x90\xf0\x9f\x8f\xbc', 'open_hands_tone2', 'open_hands_tone2'),
(b'\xf0\x9f\x91\x90\xf0\x9f\x8f\xbd', 'open_hands_tone3', 'open_hands_tone3'),
(b'\xf0\x9f\x91\x90\xf0\x9f\x8f\xbe', 'open_hands_tone4', 'open_hands_tone4'),
(b'\xf0\x9f\x91\x90\xf0\x9f\x8f\xbf', 'open_hands_tone5', 'open_hands_tone5'),
(b'\xe2\x9b\x8e', 'ophiuchus', 'ophiuchus'),
(b'\xf0\x9f\x93\x99', 'orange_book', 'orange_book'),
(b'\xe2\x98\xa6', 'orthodox_cross', 'orthodox_cross_vs16'),
(b'\xf0\x9f\x93\xa4', 'outbox_tray', 'outbox_tray'),
(b'\xf0\x9f\xa6\x89', 'owl', 'owl'),
(b'\xf0\x9f\x90\x82', 'ox', 'ox'),
(b'\xf0\x9f\x93\xa6', 'package', 'package'),
(b'\xf0\x9f\xa5\x98', 'paella', 'paella', 'shallow_pan_of_food'),
(b'\xf0\x9f\x93\x84', 'page_facing_up', 'page_facing_up'),
(b'\xf0\x9f\x93\x83', 'page_with_curl', 'page_with_curl'),
(b'\xf0\x9f\x93\x9f', 'pager', 'pager'),
(b'\xf0\x9f\x8c\xb4', 'palm_tree', 'palm_tree'),
(b'\xf0\x9f\xa5\x9e', 'pancakes', 'pancakes'),
(b'\xf0\x9f\x90\xbc', 'panda_face', 'panda_face'),
(b'\xf0\x9f\x93\x8e', 'paperclip', 'paperclip'),
(b'\xf0\x9f\x85\xbf', 'parking', 'parking_vs16'),
(b'\xe3\x80\xbd', 'part_alternation_mark', 'part_alternation_mark_vs16'),
(b'\xe2\x9b\x85', 'partly_sunny', 'partly_sunny'),
(b'\xf0\x9f\x9b\x82', 'passport_control', 'passport_control'),
(b'\xe2\x98\xae', 'peace', 'peace_vs16'),
(b'\xf0\x9f\x8d\x91', 'peach', 'peach'),
(b'\xf0\x9f\xa5\x9c', 'peanuts', 'peanuts', 'shelled_peanut'),
(b'\xf0\x9f\x8d\x90', 'pear', 'pear'),
(b'\xe2\x9c\x8f', 'pencil2', 'pencil2_vs16'),
(b'\xf0\x9f\x93\x9d', 'pencil', 'pencil', 'memo'),
(b'\xf0\x9f\x90\xa7', 'penguin', 'penguin'),
(b'\xf0\x9f\x98\x94', 'pensive', 'pensive'),
(b'\xf0\x9f\x8e\xad', 'performing_arts', 'performing_arts'),
(b'\xf0\x9f\x98\xa3', 'persevere', 'persevere'),
(b'\xf0\x9f\x99\x8d', 'person_frowning', 'person_frowning'),
(b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbb', 'person_frowning_tone1', 'person_frowning_tone1'),
(b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbc', 'person_frowning_tone2', 'person_frowning_tone2'),
(b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbd', 'person_frowning_tone3', 'person_frowning_tone3'),
(b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbe', 'person_frowning_tone4', 'person_frowning_tone4'),
(b'\xf0\x9f\x99\x8d\xf0\x9f\x8f\xbf', 'person_frowning_tone5', 'person_frowning_tone5'),
(b'\xf0\x9f\x91\xb1', 'person_with_blond_hair', 'person_with_blond_hair', 'blond_haired_person'),
(b'\xf0\x9f\x91\xb1\xf0\x9f\x8f\xbb', 'person_with_blond_hair_tone1', 'person_with_blond_hair_tone1', 'blond_haired_person_tone1'),
(b'\xf0\x9f\x91\xb1\xf0\x9f\x8f\xbc', 'person_with_blond_hair_tone2', 'person_with_blond_hair_tone2', 'blond_haired_person_tone2'),
(b'\xf0\x9f\x91\xb1\xf0\x9f\x8f\xbd', 'person_with_blond_hair_tone3', 'person_with_blond_hair_tone3', 'blond_haired_person_tone3'),
(b'\xf0\x9f\x91\xb1\xf0\x9f\x8f\xbe', 'person_with_blond_hair_tone4', 'person_with_blond_hair_tone4', 'blond_haired_person_tone4'),
(b'\xf0\x9f\x91\xb1\xf0\x9f\x8f\xbf', 'person_with_blond_hair_tone5', 'person_with_blond_hair_tone5', 'blond_haired_person_tone5'),
(b'\xf0\x9f\x99\x8e', 'person_with_pouting_face', 'person_with_pouting_face', 'person_pouting'),
(b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbb', 'person_with_pouting_face_tone1', 'person_with_pouting_face_tone1', 'person_pouting_tone1'),
(b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbc', 'person_with_pouting_face_tone2', 'person_with_pouting_face_tone2', 'person_pouting_tone2'),
(b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbd', 'person_with_pouting_face_tone3', 'person_with_pouting_face_tone3', 'person_pouting_tone3'),
(b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbe', 'person_with_pouting_face_tone4', 'person_with_pouting_face_tone4', 'person_pouting_tone4'),
(b'\xf0\x9f\x99\x8e\xf0\x9f\x8f\xbf', 'person_with_pouting_face_tone5', 'person_with_pouting_face_tone5', 'person_pouting_tone5'),
(b'\xe2\x9b\x8f', 'pick', 'pick_vs16'),
(b'\xf0\x9f\x90\x96', 'pig2', 'pig2'),
(b'\xf0\x9f\x90\xb7', 'pig', 'pig'),
(b'\xf0\x9f\x90\xbd', 'pig_nose', 'pig_nose'),
(b'\xf0\x9f\x92\x8a', 'pill', 'pill'),
(b'\xf0\x9f\x8d\x8d', 'pineapple', 'pineapple'),
(b'\xf0\x9f\x8f\x93', 'ping_pong', 'ping_pong', 'table_tennis'),
(b'\xe2\x99\x93', 'pisces', 'pisces'),
(b'\xf0\x9f\x8d\x95', 'pizza', 'pizza'),
(b'\xf0\x9f\x9b\x90', 'place_of_worship', 'place_of_worship', 'worship_symbol'),
(b'\xe2\x8f\xaf', 'play_pause', 'play_pause_vs16'),
(b'\xf0\x9f\x91\x87', 'point_down', 'point_down'),
(b'\xf0\x9f\x91\x87\xf0\x9f\x8f\xbb', 'point_down_tone1', 'point_down_tone1'),
(b'\xf0\x9f\x91\x87\xf0\x9f\x8f\xbc', 'point_down_tone2', 'point_down_tone2'),
(b'\xf0\x9f\x91\x87\xf0\x9f\x8f\xbd', 'point_down_tone3', 'point_down_tone3'),
(b'\xf0\x9f\x91\x87\xf0\x9f\x8f\xbe', 'point_down_tone4', 'point_down_tone4'),
(b'\xf0\x9f\x91\x87\xf0\x9f\x8f\xbf', 'point_down_tone5', 'point_down_tone5'),
(b'\xf0\x9f\x91\x88', 'point_left', 'point_left'),
(b'\xf0\x9f\x91\x88\xf0\x9f\x8f\xbb', 'point_left_tone1', 'point_left_tone1'),
(b'\xf0\x9f\x91\x88\xf0\x9f\x8f\xbc', 'point_left_tone2', 'point_left_tone2'),
(b'\xf0\x9f\x91\x88\xf0\x9f\x8f\xbd', 'point_left_tone3', 'point_left_tone3'),
(b'\xf0\x9f\x91\x88\xf0\x9f\x8f\xbe', 'point_left_tone4', 'point_left_tone4'),
(b'\xf0\x9f\x91\x88\xf0\x9f\x8f\xbf', 'point_left_tone5', 'point_left_tone5'),
(b'\xf0\x9f\x91\x89', 'point_right', 'point_right'),
(b'\xf0\x9f\x91\x89\xf0\x9f\x8f\xbb', 'point_right_tone1', 'point_right_tone1'),
(b'\xf0\x9f\x91\x89\xf0\x9f\x8f\xbc', 'point_right_tone2', 'point_right_tone2'),
(b'\xf0\x9f\x91\x89\xf0\x9f\x8f\xbd', 'point_right_tone3', 'point_right_tone3'),
(b'\xf0\x9f\x91\x89\xf0\x9f\x8f\xbe', 'point_right_tone4', 'point_right_tone4'),
(b'\xf0\x9f\x91\x89\xf0\x9f\x8f\xbf', 'point_right_tone5', 'point_right_tone5'),
(b'\xe2\x98\x9d', 'point_up', 'point_up_vs16'),
(b'\xe2\x98\x9d\xf0\x9f\x8f\xbb', 'point_up_tone1', 'point_up_tone1'),
(b'\xe2\x98\x9d\xf0\x9f\x8f\xbc', 'point_up_tone2', 'point_up_tone2'),
(b'\xe2\x98\x9d\xf0\x9f\x8f\xbd', 'point_up_tone3', 'point_up_tone3'),
(b'\xe2\x98\x9d\xf0\x9f\x8f\xbe', 'point_up_tone4', 'point_up_tone4'),
(b'\xe2\x98\x9d\xf0\x9f\x8f\xbf', 'point_up_tone5', 'point_up_tone5'),
(b'\xf0\x9f\x91\x86', 'point_up_2', 'point_up_2'),
(b'\xf0\x9f\x91\x86\xf0\x9f\x8f\xbb', 'point_up_2_tone1', 'point_up_2_tone1'),
(b'\xf0\x9f\x91\x86\xf0\x9f\x8f\xbc', 'point_up_2_tone2', 'point_up_2_tone2'),
(b'\xf0\x9f\x91\x86\xf0\x9f\x8f\xbd', 'point_up_2_tone3', 'point_up_2_tone3'),
(b'\xf0\x9f\x91\x86\xf0\x9f\x8f\xbe', 'point_up_2_tone4', 'point_up_2_tone4'),
(b'\xf0\x9f\x91\x86\xf0\x9f\x8f\xbf', 'point_up_2_tone5', 'point_up_2_tone5'),
(b'\xf0\x9f\x9a\x93', 'police_car', 'police_car'),
(b'\xf0\x9f\x90\xa9', 'poodle', 'poodle'),
(b'\xf0\x9f\x8d\xbf', 'popcorn', 'popcorn'),
(b'\xf0\x9f\x8f\xa3', 'post_office', 'post_office'),
(b'\xf0\x9f\x93\xaf', 'postal_horn', 'postal_horn'),
(b'\xf0\x9f\x93\xae', 'postbox', 'postbox'),
(b'\xf0\x9f\x9a\xb0', 'potable_water', 'potable_water'),
(b'\xf0\x9f\xa5\x94', 'potato', 'potato'),
(b'\xf0\x9f\x91\x9d', 'pouch', 'pouch'),
(b'\xf0\x9f\x8d\x97', 'poultry_leg', 'poultry_leg'),
(b'\xf0\x9f\x92\xb7', 'pound', 'pound'),
(b'\xf0\x9f\x98\xbe', 'pouting_cat', 'pouting_cat'),
(b'\xf0\x9f\x99\x8f', 'pray', 'pray'),
(b'\xf0\x9f\x99\x8f\xf0\x9f\x8f\xbb', 'pray_tone1', 'pray_tone1'),
(b'\xf0\x9f\x99\x8f\xf0\x9f\x8f\xbc', 'pray_tone2', 'pray_tone2'),
(b'\xf0\x9f\x99\x8f\xf0\x9f\x8f\xbd', 'pray_tone3', 'pray_tone3'),
(b'\xf0\x9f\x99\x8f\xf0\x9f\x8f\xbe', 'pray_tone4', 'pray_tone4'),
(b'\xf0\x9f\x99\x8f\xf0\x9f\x8f\xbf', 'pray_tone5', 'pray_tone5'),
(b'\xf0\x9f\x93\xbf', 'prayer_beads', 'prayer_beads'),
(b'\xe2\x8f\xae', 'previous_track', 'previous_track_vs16'),
(b'\xf0\x9f\xa4\xb4', 'prince', 'prince'),
(b'\xf0\x9f\xa4\xb4\xf0\x9f\x8f\xbb', 'prince_tone1', 'prince_tone1'),
(b'\xf0\x9f\xa4\xb4\xf0\x9f\x8f\xbc', 'prince_tone2', 'prince_tone2'),
(b'\xf0\x9f\xa4\xb4\xf0\x9f\x8f\xbd', 'prince_tone3', 'prince_tone3'),
(b'\xf0\x9f\xa4\xb4\xf0\x9f\x8f\xbe', 'prince_tone4', 'prince_tone4'),
(b'\xf0\x9f\xa4\xb4\xf0\x9f\x8f\xbf', 'prince_tone5', 'prince_tone5'),
(b'\xf0\x9f\x91\xb8', 'princess', 'princess'),
(b'\xf0\x9f\x91\xb8\xf0\x9f\x8f\xbb', 'princess_tone1', 'princess_tone1'),
(b'\xf0\x9f\x91\xb8\xf0\x9f\x8f\xbc', 'princess_tone2', 'princess_tone2'),
(b'\xf0\x9f\x91\xb8\xf0\x9f\x8f\xbd', 'princess_tone3', 'princess_tone3'),
(b'\xf0\x9f\x91\xb8\xf0\x9f\x8f\xbe', 'princess_tone4', 'princess_tone4'),
(b'\xf0\x9f\x91\xb8\xf0\x9f\x8f\xbf', 'princess_tone5', 'princess_tone5'),
(b'\xf0\x9f\x96\xa8', 'printer', 'printer_vs16'),
(b'\xf0\x9f\x91\x8a', 'punch', 'punch'),
(b'\xf0\x9f\x91\x8a\xf0\x9f\x8f\xbb', 'punch_tone1', 'punch_tone1'),
(b'\xf0\x9f\x91\x8a\xf0\x9f\x8f\xbc', 'punch_tone2', 'punch_tone2'),
(b'\xf0\x9f\x91\x8a\xf0\x9f\x8f\xbd', 'punch_tone3', 'punch_tone3'),
(b'\xf0\x9f\x91\x8a\xf0\x9f\x8f\xbe', 'punch_tone4', 'punch_tone4'),
(b'\xf0\x9f\x91\x8a\xf0\x9f\x8f\xbf', 'punch_tone5', 'punch_tone5'),
(b'\xf0\x9f\x92\x9c', 'purple_heart', 'purple_heart'),
(b'\xf0\x9f\x91\x9b', 'purse', 'purse'),
(b'\xf0\x9f\x93\x8c', 'pushpin', 'pushpin'),
(b'\xf0\x9f\x9a\xae', 'put_litter_in_its_place', 'put_litter_in_its_place'),
(b'\xe2\x9d\x93', 'question', 'question'),
(b'\xf0\x9f\x90\x87', 'rabbit2', 'rabbit2'),
(b'\xf0\x9f\x90\xb0', 'rabbit', 'rabbit'),
(b'\xf0\x9f\x8f\x8e', 'race_car', 'race_car_vs16'),
(b'\xf0\x9f\x90\x8e', 'racehorse', 'racehorse'),
(b'\xf0\x9f\x93\xbb', 'radio', 'radio'),
(b'\xf0\x9f\x94\x98', 'radio_button', 'radio_button'),
(b'\xe2\x98\xa2', 'radioactive', 'radioactive_vs16'),
(b'\xf0\x9f\x9b\xa4', 'railroad_track', 'railroad_track_vs16'),
(b'\xf0\x9f\x9a\x83', 'railway_car', 'railway_car'),
(b'\xf0\x9f\x8c\x88', 'rainbow', 'rainbow'),
(b'\xe2\x9c\x8b', 'raised_hand', 'raised_hand'),
(b'\xe2\x9c\x8b\xf0\x9f\x8f\xbb', 'raised_hand_tone1', 'raised_hand_tone1'),
(b'\xe2\x9c\x8b\xf0\x9f\x8f\xbc', 'raised_hand_tone2', 'raised_hand_tone2'),
(b'\xe2\x9c\x8b\xf0\x9f\x8f\xbd', 'raised_hand_tone3', 'raised_hand_tone3'),
(b'\xe2\x9c\x8b\xf0\x9f\x8f\xbe', 'raised_hand_tone4', 'raised_hand_tone4'),
(b'\xe2\x9c\x8b\xf0\x9f\x8f\xbf', 'raised_hand_tone5', 'raised_hand_tone5'),
(b'\xf0\x9f\x96\x96', 'raised_hand_with_part_between_middle_and_ring_fingers', 'raised_hand_with_part_between_middle_and_ring_fingers', 'vulcan'),
(b'\xf0\x9f\x96\x96\xf0\x9f\x8f\xbb', 'raised_hand_with_part_between_middle_and_ring_fingers_tone1', 'raised_hand_with_part_between_middle_and_ring_fingers_tone1', 'vulcan_tone1'),
(b'\xf0\x9f\x96\x96\xf0\x9f\x8f\xbc', 'raised_hand_with_part_between_middle_and_ring_fingers_tone2', 'raised_hand_with_part_between_middle_and_ring_fingers_tone2', 'vulcan_tone2'),
(b'\xf0\x9f\x96\x96\xf0\x9f\x8f\xbd', 'raised_hand_with_part_between_middle_and_ring_fingers_tone3', 'raised_hand_with_part_between_middle_and_ring_fingers_tone3', 'vulcan_tone3'),
(b'\xf0\x9f\x96\x96\xf0\x9f\x8f\xbe', 'raised_hand_with_part_between_middle_and_ring_fingers_tone4', 'raised_hand_with_part_between_middle_and_ring_fingers_tone4', 'vulcan_tone4'),
(b'\xf0\x9f\x96\x96\xf0\x9f\x8f\xbf', 'raised_hand_with_part_between_middle_and_ring_fingers_tone5', 'raised_hand_with_part_between_middle_and_ring_fingers_tone5', 'vulcan_tone5'),
(b'\xf0\x9f\x99\x8c', 'raised_hands', 'raised_hands'),
(b'\xf0\x9f\x99\x8c\xf0\x9f\x8f\xbb', 'raised_hands_tone1', 'raised_hands_tone1'),
(b'\xf0\x9f\x99\x8c\xf0\x9f\x8f\xbc', 'raised_hands_tone2', 'raised_hands_tone2'),
(b'\xf0\x9f\x99\x8c\xf0\x9f\x8f\xbd', 'raised_hands_tone3', 'raised_hands_tone3'),
(b'\xf0\x9f\x99\x8c\xf0\x9f\x8f\xbe', 'raised_hands_tone4', 'raised_hands_tone4'),
(b'\xf0\x9f\x99\x8c\xf0\x9f\x8f\xbf', 'raised_hands_tone5', 'raised_hands_tone5'),
(b'\xf0\x9f\x99\x8b', 'raising_hand', 'raising_hand', 'person_raising_hand'),
(b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbb', 'raising_hand_tone1', 'raising_hand_tone1', 'person_raising_hand_tone1'),
(b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbc', 'raising_hand_tone2', 'raising_hand_tone2', 'person_raising_hand_tone2'),
(b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbd', 'raising_hand_tone3', 'raising_hand_tone3', 'person_raising_hand_tone3'),
(b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbe', 'raising_hand_tone4', 'raising_hand_tone4', 'person_raising_hand_tone4'),
(b'\xf0\x9f\x99\x8b\xf0\x9f\x8f\xbf', 'raising_hand_tone5', 'raising_hand_tone5', 'person_raising_hand_tone5'),
(b'\xf0\x9f\x90\x8f', 'ram', 'ram'),
(b'\xf0\x9f\x8d\x9c', 'ramen', 'ramen'),
(b'\xf0\x9f\x90\x80', 'rat', 'rat'),
(b'\xe2\x8f\xba', 'record_button', 'record_button_vs16'),
(b'\xe2\x99\xbb', 'recycle', 'recycle_vs16'),
(b'\xf0\x9f\x9a\x97', 'red_car', 'red_car'),
(b'\xf0\x9f\x94\xb4', 'red_circle', 'red_circle'),
(b'\xf0\x9f\x87\xa6', 'regional_indicator_a', 'regional_indicator_a'),
(b'\xf0\x9f\x87\xa7', 'regional_indicator_b', 'regional_indicator_b'),
(b'\xf0\x9f\x87\xa8', 'regional_indicator_c', 'regional_indicator_c'),
(b'\xf0\x9f\x87\xa9', 'regional_indicator_d', 'regional_indicator_d'),
(b'\xf0\x9f\x87\xaa', 'regional_indicator_e', 'regional_indicator_e'),
(b'\xf0\x9f\x87\xab', 'regional_indicator_f', 'regional_indicator_f'),
(b'\xf0\x9f\x87\xac', 'regional_indicator_g', 'regional_indicator_g'),
(b'\xf0\x9f\x87\xad', 'regional_indicator_h', 'regional_indicator_h'),
(b'\xf0\x9f\x87\xae', 'regional_indicator_i', 'regional_indicator_i'),
(b'\xf0\x9f\x87\xaf', 'regional_indicator_j', 'regional_indicator_j'),
(b'\xf0\x9f\x87\xb0', 'regional_indicator_k', 'regional_indicator_k'),
(b'\xf0\x9f\x87\xb1', 'regional_indicator_l', 'regional_indicator_l'),
(b'\xf0\x9f\x87\xb2', 'regional_indicator_m', 'regional_indicator_m'),
(b'\xf0\x9f\x87\xb3', 'regional_indicator_n', 'regional_indicator_n'),
(b'\xf0\x9f\x87\xb4', 'regional_indicator_o', 'regional_indicator_o'),
(b'\xf0\x9f\x87\xb5', 'regional_indicator_p', 'regional_indicator_p'),
(b'\xf0\x9f\x87\xb6', 'regional_indicator_q', 'regional_indicator_q'),
(b'\xf0\x9f\x87\xb7', 'regional_indicator_r', 'regional_indicator_r'),
(b'\xf0\x9f\x87\xb8', 'regional_indicator_s', 'regional_indicator_s'),
(b'\xf0\x9f\x87\xb9', 'regional_indicator_t', 'regional_indicator_t'),
(b'\xf0\x9f\x87\xba', 'regional_indicator_u', 'regional_indicator_u'),
(b'\xf0\x9f\x87\xbb', 'regional_indicator_v', 'regional_indicator_v'),
(b'\xf0\x9f\x87\xbc', 'regional_indicator_w', 'regional_indicator_w'),
(b'\xf0\x9f\x87\xbd', 'regional_indicator_x', 'regional_indicator_x'),
(b'\xf0\x9f\x87\xbe', 'regional_indicator_y', 'regional_indicator_y'),
(b'\xf0\x9f\x87\xbf', 'regional_indicator_z', 'regional_indicator_z'),
(b'\xc2\xae', 'registered', 'registered_vs16'),
(b'\xe2\x98\xba', 'relaxed', 'relaxed_vs16'),
(b'\xf0\x9f\x98\x8c', 'relieved', 'relieved'),
(b'\xf0\x9f\x8e\x97', 'reminder_ribbon', 'reminder_ribbon_vs16'),
(b'\xf0\x9f\x94\x81', 'repeat', 'repeat'),
(b'\xf0\x9f\x94\x82', 'repeat_one', 'repeat_one'),
(b'\xf0\x9f\x9a\xbb', 'restroom', 'restroom'),
(b'\xf0\x9f\x92\x9e', 'revolving_hearts', 'revolving_hearts'),
(b'\xe2\x8f\xaa', 'rewind', 'rewind'),
(b'\xf0\x9f\xa6\x8f', 'rhino', 'rhino', 'rhinoceros'),
(b'\xf0\x9f\x8e\x80', 'ribbon', 'ribbon'),
(b'\xf0\x9f\x8d\x9a', 'rice', 'rice'),
(b'\xf0\x9f\x8d\x99', 'rice_ball', 'rice_ball'),
(b'\xf0\x9f\x8d\x98', 'rice_cracker', 'rice_cracker'),
(b'\xf0\x9f\x8e\x91', 'rice_scene', 'rice_scene'),
(b'\xf0\x9f\xa4\x9c', 'right_facing_fist', 'right_facing_fist', 'right_fist'),
(b'\xf0\x9f\xa4\x9c\xf0\x9f\x8f\xbb', 'right_facing_fist_tone1', 'right_facing_fist_tone1', 'right_fist_tone1'),
(b'\xf0\x9f\xa4\x9c\xf0\x9f\x8f\xbc', 'right_facing_fist_tone2', 'right_facing_fist_tone2', 'right_fist_tone2'),
(b'\xf0\x9f\xa4\x9c\xf0\x9f\x8f\xbd', 'right_facing_fist_tone3', 'right_facing_fist_tone3', 'right_fist_tone3'),
(b'\xf0\x9f\xa4\x9c\xf0\x9f\x8f\xbe', 'right_facing_fist_tone4', 'right_facing_fist_tone4', 'right_fist_tone4'),
(b'\xf0\x9f\xa4\x9c\xf0\x9f\x8f\xbf', 'right_facing_fist_tone5', 'right_facing_fist_tone5', 'right_fist_tone5'),
(b'\xf0\x9f\x92\x8d', 'ring', 'ring'),
(b'\xf0\x9f\xa4\x96', 'robot', 'robot', 'robot_face'),
(b'\xf0\x9f\x9a\x80', 'rocket', 'rocket'),
(b'\xf0\x9f\xa4\xa3', 'rofl', 'rofl', 'rolling_on_the_floor_laughing'),
(b'\xf0\x9f\x8e\xa2', 'roller_coaster', 'roller_coaster'),
(b'\xf0\x9f\x90\x93', 'rooster', 'rooster'),
(b'\xf0\x9f\x8c\xb9', 'rose', 'rose'),
(b'\xf0\x9f\x8f\xb5', 'rosette', 'rosette_vs16'),
(b'\xf0\x9f\x9a\xa8', 'rotating_light', 'rotating_light'),
(b'\xf0\x9f\x93\x8d', 'round_pushpin', 'round_pushpin'),
(b'\xf0\x9f\x9a\xa3', 'rowboat', 'rowboat', 'person_rowing_boat'),
(b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbb', 'rowboat_tone1', 'rowboat_tone1', 'person_rowing_boat_tone1'),
(b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbc', 'rowboat_tone2', 'rowboat_tone2', 'person_rowing_boat_tone2'),
(b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbd', 'rowboat_tone3', 'rowboat_tone3', 'person_rowing_boat_tone3'),
(b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbe', 'rowboat_tone4', 'rowboat_tone4', 'person_rowing_boat_tone4'),
(b'\xf0\x9f\x9a\xa3\xf0\x9f\x8f\xbf', 'rowboat_tone5', 'rowboat_tone5', 'person_rowing_boat_tone5'),
(b'\xf0\x9f\x8f\x89', 'rugby_football', 'rugby_football'),
(b'\xf0\x9f\x8f\x83', 'runner', 'runner', 'person_running'),
(b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbb', 'runner_tone1', 'runner_tone1', 'person_running_tone1'),
(b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbc', 'runner_tone2', 'runner_tone2', 'person_running_tone2'),
(b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbd', 'runner_tone3', 'runner_tone3', 'person_running_tone3'),
(b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbe', 'runner_tone4', 'runner_tone4', 'person_running_tone4'),
(b'\xf0\x9f\x8f\x83\xf0\x9f\x8f\xbf', 'runner_tone5', 'runner_tone5', 'person_running_tone5'),
(b'\xf0\x9f\x8e\xbd', 'running_shirt_with_sash', 'running_shirt_with_sash'),
(b'\xf0\x9f\x88\x82', 'sa', 'sa_vs16'),
(b'\xe2\x99\x90', 'sagittarius', 'sagittarius'),
(b'\xe2\x9b\xb5', 'sailboat', 'sailboat'),
(b'\xf0\x9f\x8d\xb6', 'sake', 'sake'),
(b'\xf0\x9f\x91\xa1', 'sandal', 'sandal'),
(b'\xf0\x9f\x8e\x85', 'santa', 'santa'),
(b'\xf0\x9f\x8e\x85\xf0\x9f\x8f\xbb', 'santa_tone1', 'santa_tone1'),
(b'\xf0\x9f\x8e\x85\xf0\x9f\x8f\xbc', 'santa_tone2', 'santa_tone2'),
(b'\xf0\x9f\x8e\x85\xf0\x9f\x8f\xbd', 'santa_tone3', 'santa_tone3'),
(b'\xf0\x9f\x8e\x85\xf0\x9f\x8f\xbe', 'santa_tone4', 'santa_tone4'),
(b'\xf0\x9f\x8e\x85\xf0\x9f\x8f\xbf', 'santa_tone5', 'santa_tone5'),
(b'\xf0\x9f\x93\xa1', 'satellite', 'satellite'),
(b'\xf0\x9f\x9b\xb0', 'satellite_orbital', 'satellite_orbital_vs16'),
(b'\xf0\x9f\x8e\xb7', 'saxophone', 'saxophone'),
(b'\xe2\x9a\x96', 'scales', 'scales_vs16'),
(b'\xf0\x9f\x8f\xab', 'school', 'school'),
(b'\xf0\x9f\x8e\x92', 'school_satchel', 'school_satchel'),
(b'\xe2\x9c\x82', 'scissors', 'scissors_vs16'),
(b'\xf0\x9f\x9b\xb4', 'scooter', 'scooter'),
(b'\xf0\x9f\xa6\x82', 'scorpion', 'scorpion'),
(b'\xe2\x99\x8f', 'scorpius', 'scorpius'),
(b'\xf0\x9f\x98\xb1', 'scream', 'scream'),
(b'\xf0\x9f\x99\x80', 'scream_cat', 'scream_cat'),
(b'\xf0\x9f\x93\x9c', 'scroll', 'scroll'),
(b'\xf0\x9f\x92\xba', 'seat', 'seat'),
(b'\xf0\x9f\xa5\x88', 'second_place', 'second_place', 'second_place_medal'),
(b'\xe3\x8a\x99', 'secret', 'secret_vs16'),
(b'\xf0\x9f\x99\x88', 'see_no_evil', 'see_no_evil'),
(b'\xf0\x9f\x8c\xb1', 'seedling', 'seedling'),
(b'\xf0\x9f\xa4\xb3', 'selfie', 'selfie'),
(b'\xf0\x9f\xa4\xb3\xf0\x9f\x8f\xbb', 'selfie_tone1', 'selfie_tone1'),
(b'\xf0\x9f\xa4\xb3\xf0\x9f\x8f\xbc', 'selfie_tone2', 'selfie_tone2'),
(b'\xf0\x9f\xa4\xb3\xf0\x9f\x8f\xbd', 'selfie_tone3', 'selfie_tone3'),
(b'\xf0\x9f\xa4\xb3\xf0\x9f\x8f\xbe', 'selfie_tone4', 'selfie_tone4'),
(b'\xf0\x9f\xa4\xb3\xf0\x9f\x8f\xbf', 'selfie_tone5', 'selfie_tone5'),
(b'7\xe2\x83\xa3', 'seven', 'seven_vs16'),
(b'\xe2\x98\x98', 'shamrock', 'shamrock_vs16'),
(b'\xf0\x9f\xa6\x88', 'shark', 'shark'),
(b'\xf0\x9f\x8d\xa7', 'shaved_ice', 'shaved_ice'),
(b'\xf0\x9f\x90\x91', 'sheep', 'sheep'),
(b'\xf0\x9f\x90\x9a', 'shell', 'shell'),
(b'\xf0\x9f\x9b\xa1', 'shield', 'shield_vs16'),
(b'\xe2\x9b\xa9', 'shinto_shrine', 'shinto_shrine_vs16'),
(b'\xf0\x9f\x9a\xa2', 'ship', 'ship'),
| |
<reponame>N5GEH/FiLiP
"""Vocabulary Models for Ontology Entities"""
from enum import Enum
from pydantic import BaseModel, Field
from typing import List, TYPE_CHECKING, Dict, Union, Set
from .source import DependencyStatement
if TYPE_CHECKING:
from . import \
CombinedObjectRelation, \
CombinedDataRelation, \
CombinedRelation, \
Relation, \
Vocabulary, \
Source
class Entity(BaseModel):
"""
Representing an OWL Entity (Class, Datatype, DataProperty, ObjectProperty,
Individual)
An Entity is characterised by a unique IRI and originates from a source
An Entity needs a unique Label (displayname) as it is used in FIWARE as
field key. The user can overwrite the given
label
"""
iri: str = Field(description="Unique Internationalized Resource Identifier")
label: str = Field(
default="",
description="Label (displayname) extracted from source file "
"(multiple Entities could have the same label)")
user_set_label = Field(
default="",
description="Given by user and overwrites 'label'."
" Needed to make labels unique")
comment: str = Field(
default="",
description="Comment extracted from the ontology/source")
source_ids: Set[str] = Field(
default=set(),
description="IDs of the sources that influenced this class")
predefined: bool = Field(
default=False,
description="Stats if the entity is not extracted from a source, "
"but predefined in the program (Standard Datatypes)")
def get_label(self) -> str:
""" Get the label for the entity.
If the user has set a label it is returned, else the label extracted
from the source
Returns:
str
"""
if not self.user_set_label == "":
return self.user_set_label
return self.get_original_label()
def set_label(self, label:str):
""" Change the display label of the entity
Args:
label (str): Label that the label should have
"""
self.user_set_label = label
def get_ontology_iri(self) -> str:
""" Get the IRI of the ontology that this entity belongs to
(extracted from IRI)
Returns:
str
"""
index = self.iri.find("#")
return self.iri[:index]
def get_source_names(self, vocabulary: 'Vocabulary') -> List[str]:
""" Get the names of all the sources
Args:
vocabulary (Vocabulary): Vocabulary of the project
Returns:
str
"""
names = [vocabulary.get_source(id).get_name() for
id in self.source_ids]
return names
def get_sources(self, vocabulary: 'Vocabulary') -> List['Source']:
""" Get all the source objects that influenced this entity.
The sources are sorted according to their names
Args:
vocabulary (Vocabulary): Vocabulary of the project
Returns:
str
"""
sources = [vocabulary.get_source(id) for id in self.source_ids]
sources.sort(key=lambda x: x.source_name, reverse=False)
return sources
def _lists_are_identical(self, a: List, b: List) -> bool:
""" Methode to test if to lists contain the same entries
Args:
a (List): first list
b (List): second list
Returns:
bool
"""
return len(set(a).intersection(b)) == len(set(a)) and len(a) == len(b)
def is_renamed(self) -> bool:
""" Check if the entity was renamed by the user
Returns:
bool
"""
return not self.user_set_label == ""
def get_original_label(self) -> str:
""" Get label as defined in the source
It can be that the label is empty, then extract the label from the iri
Returns:
str
"""
if not self.label == "":
return self.label
index = self.iri.find("#") + 1
return self.iri[index:]
class Class(Entity):
"""
Representation of OWL:CLASS
A class has a set of relations that are combined into CombinedRelations
Instances are instantiations of a class
A class can represent Devices, Agents, None or both
"""
# The objects whose ids/iris are listed here can be looked up in the
# vocabulary of this class
child_class_iris: List[str] = Field(
default=[],
description="All class_iris of classes that inherit from this class")
ancestor_class_iris: List[str] = Field(
default=[],
description="All class_iris of classes from which this class inherits")
parent_class_iris: List[str] = Field(
default=[],
description="All class_iris of classes that are direct parents of this "
"class")
relation_ids: List[str] = Field(
default=[],
description="All ids of relations defined for this class")
combined_object_relation_ids: List[str] = Field(
default=[],
description="All combined_object_relations ids defined for this class")
combined_data_relation_ids: List[str] = Field(
default=[],
description="All combined_data_relations ids defined for this class")
def get_relation_ids(self) -> List[str]:
"""Get all ids of relations belonging to this class
Returns:
List[str]
"""
return self.relation_ids
def get_relations(self, vocabulary: 'Vocabulary') -> List['Relation']:
"""Get all relations belonging to this class
Args:
vocabulary (Vocabulary): Vocabulary of this project
Returns:
List[Relation]
"""
result = []
for id in self.relation_ids:
result.append(vocabulary.get_relation_by_id(id))
return result
def get_combined_object_relations(self, vocabulary: 'Vocabulary') -> \
List['CombinedObjectRelation']:
"""Get all combined object relations belonging to this class
Args:
vocabulary (Vocabulary): Vocabulary of this project
Returns:
List[CombinedObjectRelation]
"""
result = []
for id in self.combined_object_relation_ids:
result.append(vocabulary.get_combined_object_relation_by_id(id))
return result
def get_combined_data_relations(self, vocabulary: 'Vocabulary') -> \
List['CombinedDataRelation']:
"""Get all combined data relations belonging to this class
Args:
vocabulary (Vocabulary): Vocabulary of this project
Returns:
List[CombinedDataRelation]
"""
result = []
for id in self.combined_data_relation_ids:
result.append(vocabulary.get_combined_data_relation_by_id(id))
return result
def get_combined_relations(self, vocabulary: 'Vocabulary') -> \
List['CombinedRelation']:
"""Get all combined relations belonging to this class
Args:
vocabulary (Vocabulary): Vocabulary of this project
Returns:
List[CombinedRelation]
"""
result = self.get_combined_object_relations(vocabulary)
result.extend(self.get_combined_data_relations(vocabulary))
return result
def is_child_of_all_classes(self, target_list: List[str]) -> bool:
"""Tests if this class is a child class for each of the given classes
Args:
target_list (List[str]): List of ancestor class_iris
Returns:
bool
"""
for target in target_list:
if not target == self.iri:
if target not in self.ancestor_class_iris:
return False
return True
def get_combined_object_relation_with_property_iri(
self, obj_prop_iri: str, vocabulary: 'Vocabulary') \
-> 'CombinedObjectRelation':
"""
Get the CombinedObjectRelation of this class that combines the
relations of the given ObjectProperty
Args:
obj_prop_iri (str): Iri of the ObjectProperty
vocabulary (Vocabulary): Vocabulary of this project
Returns:
CombinedObjectRelation
"""
for cor in self.get_combined_object_relations(vocabulary):
if cor.property_iri == obj_prop_iri:
return cor
return None
def get_combined_data_relation_with_property_iri(self, property_iri,
vocabulary):
"""
Get the CombinedDataRelation of this class that combines the
relations of the given DataProperty
Args:
property_iri (str): Iri of the DataProperty
vocabulary (Vocabulary): Vocabulary of this project
Returns:
CombinedDataRelation
"""
for cdr in self.get_combined_data_relations(vocabulary):
if cdr.property_iri == property_iri:
return cdr
return None
def get_combined_relation_with_property_iri(self, property_iri, vocabulary)\
-> Union['CombinedRelation', None]:
"""
Get the CombinedRelation of this class that combines the relations
of the given Property
If possible use the more specific access functions to save runtime.
Args:
property_iri (str): Iri of the Property
vocabulary (Vocabulary): Vocabulary of this project
Returns:
CombinedRelation, None if iri is unknown
"""
for cdr in self.get_combined_data_relations(vocabulary):
if cdr.property_iri == property_iri:
return cdr
for cor in self.get_combined_object_relations(vocabulary):
if cor.property_iri == property_iri:
return cor
return None
def get_ancestor_classes(self, vocabulary: 'Vocabulary') -> List['Class']:
"""Get all ancestor classes of this class
Args:
vocabulary (Vocabulary): Vocabulary of this project
Returns:
List[Class]
"""
ancestors = []
for ancestor_iri in self.ancestor_class_iris:
ancestors.append(vocabulary.get_class_by_iri(ancestor_iri))
return ancestors
def get_parent_classes(self,
vocabulary: 'Vocabulary',
remove_redundancy: bool = False) -> List['Class']:
"""Get all parent classes of this class
Args:
vocabulary (Vocabulary): Vocabulary of this project
remove_redundancy (bool): if true the parents that are child of
other parents are not included
Returns:
List[Class]
"""
parents = []
for parent_iri in self.parent_class_iris:
parents.append(vocabulary.get_class_by_iri(parent_iri))
if remove_redundancy:
child_iris = set()
for parent in parents:
child_iris.update(parent.child_class_iris)
for parent in parents:
if parent.iri in child_iris:
parents.remove(parent)
return parents
def treat_dependency_statements(self, vocabulary: 'Vocabulary') -> \
List[DependencyStatement]:
"""
Purge and list all pointers/iris that are not contained in
the vocabulary
Args:
vocabulary (Vocabulary): Vocabulary of this project
Returns:
List[Dict[str, str]]: List of purged statements dicts with keys:
Parent Class, class, dependency, fulfilled
"""
statements = []
# parent classes:
parents_to_purge = []
for parent_iri in self.parent_class_iris:
found = parent_iri in vocabulary.classes
statements.append(DependencyStatement(type="Parent Class",
class_iri=self.iri,
dependency_iri=parent_iri,
fulfilled=found
))
if not found:
parents_to_purge.append(parent_iri)
for iri in parents_to_purge:
self.parent_class_iris.remove(iri)
# relations
relation_ids_to_purge = set()
for relation in self.get_relations(vocabulary):
relation_statements = relation.get_dependency_statements(
vocabulary, self.get_ontology_iri(), self.iri)
for statement in relation_statements:
if statement.fulfilled == False:
relation_ids_to_purge.add(relation.id)
statements.extend(relation_statements)
for id in relation_ids_to_purge:
self.relation_ids.remove(id)
del vocabulary.relations[id]
return statements
def get_next_combined_relation_id(self, current_cr_id: str,
object_relations: bool) -> str:
"""Get the alphabetically(Property label) next CombinedRelation.
If no CR is after the given one, the first is returned
Args:
current_cr_id (str): ID of the CombinedRelation of which the next
should be found
object_relations (bool):
True if Searching for CombinedObjectRelations
Returns:
str: ID of next CR
"""
list_ = self.combined_data_relation_ids
if object_relations:
list_ = self.combined_object_relation_ids
current_index = list_.index(current_cr_id)
res_index = current_index+1
if res_index >= len(list_):
res_index = 0
return list_[res_index]
def get_previous_combined_relation_id(self, current_cr_id: str,
object_relations: bool) -> str:
"""Get the alphabetically(Property label) previous CombinedRelation.
If no CR is before the given one, the last is returned
Args:
| |
import numpy as np
import gudhi as gd
from numpy.lib.stride_tricks import as_strided
import tensorflow as tf
from tensorflow.python.framework import ops
import timeit
def compute_dgm(f, card, hom_dim):
"""
Computes the persistence diagram of an image.
:param f: image
:param card: maximum number of bars kept
:param hom_dim: dimension of homology
:return: persistence diagram, critical pixels
"""
dgm = np.zeros([card, 2], dtype=np.float32)
cof = np.zeros([card, 2], dtype=np.int32)
cc = gd.CubicalComplex(dimensions=f.shape, top_dimensional_cells=f.ravel())
cc.compute_persistence()
# Return zero arrays if no finite bars
num_bars = len(cc.persistence_intervals_in_dimension(hom_dim))
if ((hom_dim == 0) and (num_bars == 1)) or ((hom_dim > 0) and (num_bars == 0)):
return dgm, cof
# These are all the critical pixels
all_cof = cc.cofaces_of_persistence_pairs()[0][hom_dim]
# Generate the persistence diagram
birth_times, death_times = f.flat[all_cof[:, 0]], f.flat[all_cof[:, 1]]
# Return at most param:card bars
min_card = min(len(birth_times), card)
dgm[:min_card, 0], dgm[:min_card, 1] = birth_times[:min_card], death_times[:min_card]
cof[:min_card, :] = all_cof[:min_card, :]
return dgm, cof
def compute_dgm_grad(grad_dgm, cof, f):
"""
Uses grad_dgm to compute birth/death critical pixels
:param grad_dgm: gradient wrt dgm
:param cof: critical pixels
:param f: input image
:return: gradient of births/deaths wrt f
"""
grad_f_births = np.zeros(f.shape, dtype=np.float32)
grad_f_deaths = np.zeros(f.shape, dtype=np.float32)
# Identify which rows correspond to a persistence dot.
is_nonzero = cof.any(axis=1)
if not np.any(is_nonzero):
return grad_f_births, grad_f_deaths
# Filter by relevant rows
cof_nonzero = cof[is_nonzero, :]
grad_dgm_nonzero = grad_dgm[is_nonzero, :]
# Add gradient at appropriate places.
np.add.at(grad_f_births.ravel(), cof_nonzero[:, 0].ravel(), grad_dgm_nonzero[:, 0].ravel())
np.add.at(grad_f_deaths.ravel(), cof_nonzero[:, 1].ravel(), grad_dgm_nonzero[:, 1].ravel())
return grad_f_births, grad_f_deaths
def compute_thresh_dgm(f, card, hom_dim, pers_region=None):
"""
Computes thresholded persistent homology of an image.
:param f: input image
:param card: max cardinality of persistence diagram
:param hom_dim: degree of homology
:param pers_region: np.array([birth_low, birth_high, lifetime_low, lifetime_high])
:return: persistence diagram and associated critical pixels
"""
dgm = np.zeros([card, 2], dtype=np.float32)
cof = np.zeros([card, 2], dtype=np.int32)
cc = gd.CubicalComplex(dimensions=f.shape, top_dimensional_cells=f.ravel())
cc.compute_persistence()
# Return zero arrays if no finite bars
num_bars = len(cc.persistence_intervals_in_dimension(hom_dim))
if ((hom_dim == 0) and (num_bars == 1)) or ((hom_dim > 0) and (num_bars == 0)):
return dgm, cof
# These are all the critical pixels
all_cof = cc.cofaces_of_persistence_pairs()[0][hom_dim]
# Generate the persistence diagram
birth_times, death_times = f.flat[all_cof[:, 0]], f.flat[all_cof[:, 1]]
# Threshold by persistence region if one was provided
if pers_region is not None:
lifetimes = death_times - birth_times
rel_ind = (pers_region[0] < birth_times) & (birth_times < pers_region[1]) & \
(pers_region[2] < lifetimes) & (lifetimes < pers_region[3])
birth_times, death_times, all_cof = birth_times[rel_ind], death_times[rel_ind], all_cof[rel_ind, :]
min_card = min(len(birth_times), card)
dgm[:min_card, 0], dgm[:min_card, 1] = birth_times[:min_card], death_times[:min_card]
cof[:min_card, :] = all_cof[:min_card, :]
return dgm, cof
def compute_spawn_sw(grad_dgm, dgm, f, card,
hom_dim, kernel_size, pool_mode, noise, samples, M,
pers_region=None):
bsm = np.zeros(f.shape, dtype='float32')
dsm = np.zeros(f.shape, dtype='float32')
# Find nonzero rows of dgm
dgm_up_nonzero = dgm.any(axis=1)
if not np.any(dgm_up_nonzero):
return bsm, dsm
dgm_up = dgm[dgm_up_nonzero, :]
grad_dgm_up = grad_dgm[dgm_up_nonzero, :]
# Project nonzero rows of dgm to diagonal
dgm_up_proj = np.column_stack(((dgm_up[:, 0] + dgm_up[:, 1]) / 2, (dgm_up[:, 0] + dgm_up[:, 1]) / 2))
# For each random sample, compute fuzzy sliced-Wasserstein pairing
for t in range(samples):
g = f + np.random.uniform(-noise, noise, size=f.shape)
x_down, switch = spool(g, kernel_size, pool_mode)
# Compute persistence diagram and critical pixels.
dgm_down, cof_down = compute_thresh_dgm(x_down, card, hom_dim, pers_region)
bsm_down, dsm_down = np.zeros(x_down.shape), np.zeros(x_down.shape) # Initialize low-res smears.
# Get nonzero rows of dgm_down
dgm_down_nonzero = dgm_down.any(axis=1)
if not np.any(dgm_down_nonzero): # Skip iteration if downsampled image has no persistent homology.
continue
dgm_down = dgm_down[dgm_down_nonzero, :]
cof_down = cof_down[dgm_down_nonzero, :]
# Project nonzero rows of downsampled dgm onto diagonal
dgm_down_proj = np.column_stack(((dgm_down[:, 0] + dgm_down[:, 1]) / 2, (dgm_down[:, 0] + dgm_down[:, 1]) / 2))
theta = -np.pi / 2
for i in range(M):
theta_vec = np.array([np.cos(theta), np.sin(theta)])
# Symmetrize the pair dgm_up and dgm_down
V1 = np.concatenate([np.dot(dgm_up, theta_vec), np.dot(dgm_down_proj, theta_vec)])
V2 = np.concatenate([np.dot(dgm_down, theta_vec), np.dot(dgm_up_proj, theta_vec)])
V1_sort = V1.argsort()
V2_sort = V2.argsort()
for j in range(len(V1)):
dot1 = V1_sort[j]
dot2 = V2_sort[j]
# Check if pair happened between non-diagonal points
if (dot1 < dgm_up.shape[0]) and (dot2 < dgm_down.shape[0]):
bsm_down.ravel()[cof_down[dot2, 0]] += (grad_dgm_up[dot1, 0] / M)
dsm_down.ravel()[cof_down[dot2, 1]] += (grad_dgm_up[dot1, 1] / M)
theta += np.pi / M
bsm += unspool(bsm_down, kernel_size, switch)
dsm += unspool(dsm_down, kernel_size, switch)
bsm, dsm = bsm / samples, dsm / samples
return bsm, dsm
def robustness_test(f, eps, n, pers_region, p, hom_dim):
num_eps = len(eps)
pers_avgs = np.zeros(num_eps)
pers_mins = np.zeros(num_eps)
pers_maxs = np.zeros(num_eps)
for t in range(num_eps):
S = np.zeros(n)
for i in range(n):
g = f + np.random.uniform(low=-eps[t], high=eps[t], size=np.shape(f))
g = np.clip(g, 0, 255)
dgm = compute_dgm(g, 10000, hom_dim)[0]
lifetimes = dgm[:, 1] - dgm[:, 0]
idx = (pers_region[0] < dgm[:, 0]) & (dgm[:, 0] < pers_region[1]) & \
(pers_region[2] < lifetimes) & (lifetimes < pers_region[3])
S[i] = np.linalg.norm(lifetimes[idx], p)
pers_avgs[t] = np.average(S)
pers_mins[t] = np.min(S)
pers_maxs[t] = np.max(S)
return pers_avgs, pers_mins, pers_maxs
def spool(f, kernel_size, pool_mode):
"""
Stochastically pools an image.
:param f: image
:param kernel_size: integer kernel size
:param pool_mode: 'max', 'min', 'uniform', 'simplex'
:return: downsampled image, switch for unspooling
"""
# Set stride to kernel size
stride = kernel_size
# Check that pool_mode is valid
assert pool_mode in ['max', 'min', 'uniform', 'simplex']
# Reshape image according to kernel size and stride
assert ~((f.shape[0] - kernel_size) % stride or (f.shape[1] - kernel_size) % stride), \
'Chosen kernel and stride misses some of the image.'
downsample_shape = ((f.shape[0] - kernel_size) // stride + 1, (f.shape[1] - kernel_size) // stride + 1)
f_window = as_strided(f,
shape=downsample_shape + (kernel_size, kernel_size),
strides=(stride * f.strides[0], stride * f.strides[1]) + f.strides)
# Reshape f_window so each row corresponds to a window.
f_window = f_window.reshape(-1, kernel_size ** 2)
# Choose switch according to pool_mode
if pool_mode == 'max':
switch = np.zeros(f_window.shape, dtype=np.float32)
switch[np.arange(switch.shape[0]), f_window.argmax(1)] = 1
if pool_mode == 'min':
switch = np.zeros(f_window.shape, dtype=np.float32)
switch[np.arange(switch.shape[0]), f_window.argmin(1)] = 1
if pool_mode == 'uniform':
switch = np.zeros(f_window.shape, dtype=np.float32)
switch[np.arange(switch.shape[0]),
np.random.randint(0, switch.shape[1], switch.shape[0])] = 1
if pool_mode == 'simplex':
switch = np.random.uniform(0, 1, f_window.shape).astype('float32')
switch = switch / switch.sum(axis=1)[:, None]
# Get corresponding values and reshape to downsampled image size.
f_down = np.sum(f_window * switch, axis=1).reshape(downsample_shape)
return f_down, switch
def unspool(f, kernel_size, switch):
"""
Deterministically un-pools an image using a switch.
:param f: image
:param kernel_size: kernel_size used in spool()
:param switch: switch output by spool()
:return: upscaled image
"""
stride = kernel_size
# Initialize upsampled image.
f_up = np.zeros(((f.shape[0] - 1) * stride + kernel_size, (f.shape[1] - 1) * stride + kernel_size),
dtype=np.float32)
f_window = as_strided(f_up,
shape=f.shape + (kernel_size, kernel_size),
strides=(stride * f_up.strides[0], stride * f_up.strides[1]) + f_up.strides)
f_window[:, :, :, :] = (switch * f.ravel()[:, None]).reshape(f.shape + (kernel_size, kernel_size))
return f_up
# py_func() and Cubical() are modified from GUDHI tutorials here: https://github.com/GUDHI/TDA-tutorial
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
"""
Wraps Python function as TensorFlow op
:param func: Python function
:param inp: inputs to func
:param Tout: types of func's outputs
:param stateful:
:param name:
:param grad: TensorFlow function computing gradient of func
:return: TensorFlow wrapper of func
"""
rnd_name = "PyFuncGrad" + str(np.random.randint(0, 1e+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def Spool(x, kernel_size, pool_mode, name=None):
"""
TF op that stochastically pools an image.
:param x: image
:param kernel_size: integer kernel size
:param pool_mode: 'max', 'min', 'uniform', 'simplex'
:param name:
:return: TF operation
"""
# Define override gradient
def _Spool(op, grad_xdown, grad_switch):
switch = op.outputs[1]
grad_x = tf.py_func(lambda y, z: unspool(y, kernel_size, z),
[grad_xdown, switch],
[tf.float32])[0]
return grad_x
# Create the operation
with ops.op_scope([x], name, "Spool") as name:
return py_func(lambda y: spool(y, kernel_size, pool_mode),
[x],
[tf.float32, tf.float32],
name=name,
grad=_Spool)
def Cubical(x, card, hom_dim, update_func, name=None):
"""
TF op that computes the persistence diagram of an image.
:param x: image
:param card: maximum number of bars kept
:param hom_dim: dimension of homology
:param update_func: update_func(grad_dgm, dgm, cof, x) gives the direction of update
:param name:
:return: TF operation
"""
# Define override gradient
def _Cubical(op, grad_dgm, grad_cof):
dgm, cof = op.outputs[0], op.outputs[1]
x = op.inputs[0]
grad_x = tf.py_func(lambda a, b, | |
<filename>src/command_modules/azure-cli-storage/azure/cli/command_modules/storage/_help.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['storage entity insert'] = """
type: command
short-summary: Insert an entity into the table.
long-summary: Inserts an entity into the table. When inserting an entity into a table, you must specify values for the PartitionKey and RowKey system properties. Together, these properties form the primary key and must be unique within the table. Both the PartitionKey and RowKey values may be up to 64 KB in size. If you are using an integer value as a key, you should convert the integer to a fixed-width string, because they are canonically sorted. For example, you should convert the value 1 to 0000001 to ensure proper sorting.
parameters:
- name: --table-name -t
type: string
short-summary: 'The name of the table to insert the entity into.'
- name: --entity -e
type: list
short-summary: 'A space-separated list of key=value pairs. Must contain a PartitionKey and a RowKey.'
- name: --if-exists
type: string
short-summary: 'Specify what should happen if an entity already exists for the specified PartitionKey and RowKey.'
- name: --timeout
short-summary: The server timeout, expressed in seconds.
"""
helps['storage blob upload'] = """
type: command
short-summary: Upload a specified file to a storage blob.
long-summary: Creates a new blob from a file path, or updates the content of an existing blob, with automatic chunking and progress notifications.
examples:
- name: Upload to a blob with all required fields.
text: az storage blob upload -f /path/to/file -c MyContainer -n MyBlob
"""
helps['storage file upload'] = """
type: command
short-summary: Upload a specified file to a file share that uses the standard SMB 3.0 protocol
long-summary: Creates or updates an azure file from a source path with automatic chunking and progress notifications.
examples:
- name: Upload to a file share with all required fields.
text: az storage file upload -s MyShare -source /path/to/file
"""
helps['storage blob show'] = """
type: command
short-summary: Returns properties for a named blob in a container in a storage account.
long-summary: Blob properties only. To show contents of a blob, use az storage blob list
examples:
- name: Show properties of a blob with all required fields.
text: az storage blob show -c MyContainer -n MyBlob
"""
helps['storage blob delete'] = """
type: command
short-summary: Marks the specified blob or snapshot for deletion.
long-summary: The blob is marked for later deletion during garbage collection. Note that in order to delete a blob, you must delete all of its snapshots. You can delete both at the same time with the Delete Blob operation.
examples:
- name: Delete a blob with all required fields.
text: az storage blob delete -c MyContainer -n MyBlob
"""
helps['storage account create'] = """
type: command
short-summary: Creates a storage account.
examples:
- name: Create a storage account with minimal options.
text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS
- name: Create a storage account saccount1 in resource group rg1 in the West US region with locally redundant storage.
text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS
"""
helps['storage container create'] = """
type: command
short-summary: Creates a container in a storage account.
examples:
- name: Create a storage container in a storage account.
text: az storage container create -n MyStorageContainer
- name: Create a storage container in a storage account and return an error if the container already exists.
text: az storage container create -n MyStorageContainer --fail-on-exist
"""
helps['storage account list'] = """
type: command
short-summary: Lists storage accounts
examples:
- name: List all storage accounts in a subscription.
text: az storage account list
- name: List all storage accounts in a region.
text: az storage account list -g MyResourceGroup
"""
helps['storage account show'] = """
type: command
short-summary: Returns storage account properties
examples:
- name: Show properties for a storage account using one or more resource ID.
text: az storage account show --ids ${storage_account_resource_id}
- name: Show properties for a storage account using an account name and resource group.
text: az storage account show -g MyResourceGroup -n MyStorageAccount
"""
helps['storage blob list'] = """
type: command
short-summary: Lists storage blobs in a container.
examples:
- name: List all storage blobs in a container.
text: az storage blob list -c MyContainer
"""
helps['storage account delete'] = """
type: command
short-summary: Deletes a storage account.
examples:
- name: Delete a storage account using one or more resource ID.
text: az storage account delete --ids ${storage_account_resource_id}
- name: Delete a storage account using an account name and resource group.
text: az storage account delete -n MyStorageAccount -g MyResourceGroup
"""
helps['storage account show-connection-string'] = """
type: command
short-summary: Returns the properties for the specified storage account.
examples:
- name: Get a connection string for a storage account.
text: az storage account show-connection-string -g MyResourceGroup -n MyStorageAccount
"""
helps['storage'] = """
type: group
short-summary: Durable, highly available, and massively scalable cloud storage.
"""
helps['storage account'] = """
type: group
short-summary: Manage storage accounts.
"""
helps['storage account update'] = """
type: command
short-summary: Update the properties of a storage account.
"""
helps['storage account keys'] = """
type: group
short-summary: Manage storage account keys.
"""
helps['storage account keys list'] = """
type: command
short-summary: Lists the primary and secondary keys for a storage account.
examples:
- name: List the primary and secondary keys for a storage account.
text: az storage account keys list -g MyResourceGroup -n MyStorageAccount
"""
helps['storage blob'] = """
type: group
short-summary: Object storage for unstructured data.
"""
helps['storage blob exists'] = """
type: command
short-summary: Indicates whether the blob exists.
"""
helps['storage blob list'] = """
type: command
short-summary: List blobs in a given container.
"""
helps['storage blob copy'] = """
type: group
short-summary: Manage blob copy operations.
"""
helps['storage blob lease'] = """
type: group
short-summary: Manage storage blob leases.
"""
helps['storage blob metadata'] = """
type: group
short-summary: Manage blob metadata.
"""
helps['storage blob service-properties'] = """
type: group
short-summary: Manage storage blob service properties.
"""
helps['storage blob copy start-batch'] = """
type: command
short-summary: Copy multiple blobs or files to a blob container.
parameters:
- name: --destination-container
type: string
short-summary: The blob container where the selected source files or blobs to be copied to.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List of files or blobs to be uploaded. No actual data transfer will occur.
- name: --source-account-name
type: string
short-summary: The source storage account from which the files or blobs are copied to the destination. If omitted, it is assumed that source is in the same storage account as destination.
- name: --source-account-key
type: string
short-summary: The account key for the source storage account.
- name: --source-container
type: string
short-summary: The source container from which the blobs are copied to the destination.
- name: --source-share
type: string
short-summary: The source share from which the files are copied to the destination.
- name: --source-uri
type: string
short-summary: A URI specifies an file share or blob container from which the files or blobs are copied to the destination. If the source is in another account, the source must either be public or must be authenticated by using a shared access signature. If the source is public, no authentication is required.
- name: --source-sas
type: string
short-summary: The shared access signature for the source storage account.
"""
helps['storage container'] = """
type: group
short-summary: Manage blob storage containers.
"""
helps['storage container exists'] = """
type: command
short-summary: Indicates whether the container exists.
"""
helps['storage container list'] = """
type: command
short-summary: List containers in a storage account.
"""
helps['storage container lease'] = """
type: group
short-summary: Manage blob storage container leases.
"""
helps['storage container metadata'] = """
type: group
short-summary: Manage container metadata.
"""
helps['storage container policy'] = """
type: group
short-summary: Manage container stored access policies.
"""
helps['storage cors'] = """
| |
from models.base import ImplicitSurface, NeRF, RadianceNet
from utils import rend_util, train_util
import copy
import functools
import numpy as np
from tqdm import tqdm
from typing import Optional
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
# def pdf_phi_s(x: torch.Tensor, s):
# esx = torch.exp(-s*x)
# y = s*esx / ((1+esx) ** 2)
# return y
def cdf_Phi_s(x, s):
# den = 1 + torch.exp(-s*x)
# y = 1./den
# return y
return torch.sigmoid(x*s)
def sdf_to_alpha(sdf: torch.Tensor, s):
# [(B), N_rays, N_pts]
cdf = cdf_Phi_s(sdf, s)
# [(B), N_rays, N_pts-1]
# TODO: check sanity.
opacity_alpha = (cdf[..., :-1] - cdf[..., 1:]) / (cdf[..., :-1] + 1e-10)
opacity_alpha = torch.clamp_min(opacity_alpha, 0)
return cdf, opacity_alpha
def sdf_to_w(sdf: torch.Tensor, s):
device = sdf.device
# [(B), N_rays, N_pts-1]
cdf, opacity_alpha = sdf_to_alpha(sdf, s)
# [(B), N_rays, N_pts]
shifted_transparency = torch.cat(
[
torch.ones([*opacity_alpha.shape[:-1], 1], device=device),
1.0 - opacity_alpha + 1e-10,
], dim=-1)
# [(B), N_rays, N_pts-1]
visibility_weights = opacity_alpha *\
torch.cumprod(shifted_transparency, dim=-1)[..., :-1]
return cdf, opacity_alpha, visibility_weights
def alpha_to_w(alpha: torch.Tensor):
device = alpha.device
# [(B), N_rays, N_pts]
shifted_transparency = torch.cat(
[
torch.ones([*alpha.shape[:-1], 1], device=device),
1.0 - alpha + 1e-10,
], dim=-1)
# [(B), N_rays, N_pts-1]
visibility_weights = alpha *\
torch.cumprod(shifted_transparency, dim=-1)[..., :-1]
return visibility_weights
class NeuS(nn.Module):
def __init__(self,
variance_init=0.05,
speed_factor=1.0,
input_ch=3,
W_geo_feat=-1,
use_outside_nerf=False,
obj_bounding_radius=1.0,
surface_cfg=dict(),
radiance_cfg=dict()):
super().__init__()
self.ln_s = nn.Parameter(data=torch.Tensor([-np.log(variance_init) / speed_factor]), requires_grad=True)
self.speed_factor = speed_factor
#------- surface network
self.implicit_surface = ImplicitSurface(
W_geo_feat=W_geo_feat, input_ch=input_ch, obj_bounding_size=obj_bounding_radius, **surface_cfg)
#------- radiance network
if W_geo_feat < 0:
W_geo_feat = self.implicit_surface.W
self.radiance_net = RadianceNet(
W_geo_feat=W_geo_feat, **radiance_cfg)
#-------- outside nerf++
if use_outside_nerf:
self.nerf_outside = NeRF(input_ch=4, multires=10, multires_view=4, use_view_dirs=True)
def forward_radiance(self, x: torch.Tensor, view_dirs: torch.Tensor):
_, nablas, geometry_feature = self.implicit_surface.forward_with_nablas(x)
radiance = self.radiance_net.forward(x, view_dirs, nablas, geometry_feature)
return radiance
def forward_s(self):
return torch.exp(self.ln_s * self.speed_factor)
def forward(self, x: torch.Tensor, view_dirs: torch.Tensor):
sdf, nablas, geometry_feature = self.implicit_surface.forward_with_nablas(x)
radiances = self.radiance_net.forward(x, view_dirs, nablas, geometry_feature)
return radiances, sdf, nablas
def volume_render(
rays_o,
rays_d,
model: NeuS,
obj_bounding_radius=1.0,
batched = False,
batched_info = {},
# render algorithm config
calc_normal = False,
use_view_dirs = True,
rayschunk = 65536,
netchunk = 1048576,
white_bkgd = False,
near_bypass: Optional[float] = None,
far_bypass: Optional[float] = None,
# render function config
detailed_output = True,
show_progress = False,
# sampling related
perturb = False, # config whether do stratified sampling
fixed_s_recp = 1/64.,
N_samples = 64,
N_importance = 64,
N_outside = 0, # whether to use outside nerf
# upsample related
upsample_algo = 'official_solution',
N_nograd_samples = 2048,
N_upsample_iters = 4,
**dummy_kwargs # just place holder
):
"""
input:
rays_o: [(B,) N_rays, 3]
rays_d: [(B,) N_rays, 3] NOTE: not normalized. contains info about ratio of len(this ray)/len(principle ray)
"""
device = rays_o.device
if batched:
DIM_BATCHIFY = 1
B = rays_d.shape[0] # batch_size
flat_vec_shape = [B, -1, 3]
else:
DIM_BATCHIFY = 0
flat_vec_shape = [-1, 3]
rays_o = torch.reshape(rays_o, flat_vec_shape).float()
rays_d = torch.reshape(rays_d, flat_vec_shape).float()
# NOTE: already normalized
rays_d = F.normalize(rays_d, dim=-1)
batchify_query = functools.partial(train_util.batchify_query, chunk=netchunk, dim_batchify=DIM_BATCHIFY)
# ---------------
# Render a ray chunk
# ---------------
def render_rayschunk(rays_o: torch.Tensor, rays_d: torch.Tensor):
# rays_o: [(B), N_rays, 3]
# rays_d: [(B), N_rays, 3]
# [(B), N_rays] x 2
near, far = rend_util.near_far_from_sphere(rays_o, rays_d, r=obj_bounding_radius)
if near_bypass is not None:
near = near_bypass * torch.ones_like(near).to(device)
if far_bypass is not None:
far = far_bypass * torch.ones_like(far).to(device)
if use_view_dirs:
view_dirs = rays_d
else:
view_dirs = None
prefix_batch = [B] if batched else []
N_rays = rays_o.shape[-2]
# ---------------
# Sample points on the rays
# ---------------
# ---------------
# Coarse Points
# [(B), N_rays, N_samples]
# d_coarse = torch.linspace(near, far, N_samples).float().to(device)
# d_coarse = d_coarse.view([*[1]*len(prefix_batch), 1, N_samples]).repeat([*prefix_batch, N_rays, 1])
_t = torch.linspace(0, 1, N_samples).float().to(device)
d_coarse = near * (1 - _t) + far * _t
# ---------------
# Up Sampling
with torch.no_grad():
# -------- option 1: directly use
if upsample_algo == 'direct_use': # nerf-like
# [(B), N_rays, N_samples, 3]
pts_coarse = rays_o.unsqueeze(-2) + d_coarse.unsqueeze(-1) * rays_d.unsqueeze(-2)
# query network to get sdf
# [(B), N_rays, N_samples]
sdf_coarse = model.implicit_surface.forward(pts_coarse)
# [(B), N_rays, N_samples-1]
*_, w_coarse = sdf_to_w(sdf_coarse, 1./fixed_s_recp)
# Fine points
# [(B), N_rays, N_importance]
d_fine = rend_util.sample_pdf(d_coarse, w_coarse, N_importance, det=not perturb)
# Gather points
d_all = torch.cat([d_coarse, d_fine], dim=-1)
d_all, d_sort_indices = torch.sort(d_all, dim=-1)
# -------- option 2: just using more points to calculate visibility weights for upsampling
# used config: N_nograd_samples
elif upsample_algo == 'direct_more':
_t = torch.linspace(0, 1, N_nograd_samples).float().to(device)
_d = near * (1 - _t) + far * _t
_pts = rays_o.unsqueeze(-2) + _d.unsqueeze(-1) * rays_d.unsqueeze(-2)
# _sdf = model.implicit_surface.forward(_pts)
_sdf = batchify_query(model.implicit_surface.forward, _pts)
*_, _w = sdf_to_w(_sdf, 1./fixed_s_recp)
d_fine = rend_util.sample_pdf(_d, _w, N_importance, det=not perturb)
# Gather points
d_all = torch.cat([d_coarse, d_fine], dim=-1)
d_all, d_sort_indices = torch.sort(d_all, dim=-1)
# -------- option 3: modified from NeuS official implementation: estimate sdf slopes and middle points' sdf
# https://github.com/Totoro97/NeuS/blob/9dc9275d3a8c7266994a3b9cf9f36071621987dd/models/renderer.py#L131
# used config: N_upsample_iters
elif upsample_algo == 'official_solution':
_d = d_coarse
_sdf = batchify_query(model.implicit_surface.forward, rays_o.unsqueeze(-2) + _d.unsqueeze(-1) * rays_d.unsqueeze(-2))
for i in range(N_upsample_iters):
prev_sdf, next_sdf = _sdf[..., :-1], _sdf[..., 1:]
prev_z_vals, next_z_vals = _d[..., :-1], _d[..., 1:]
mid_sdf = (prev_sdf + next_sdf) * 0.5
dot_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
prev_dot_val = torch.cat([torch.zeros_like(dot_val[..., :1], device=device), dot_val[..., :-1]], dim=-1) # jianfei: prev_slope, right shifted
dot_val = torch.stack([prev_dot_val, dot_val], dim=-1) # jianfei: concat prev_slope with slope
dot_val, _ = torch.min(dot_val, dim=-1, keepdim=False) # jianfei: find the minimum of prev_slope and current slope. (forward diff vs. backward diff., or the prev segment's slope vs. this segment's slope)
dot_val = dot_val.clamp(-10.0, 0.0)
dist = (next_z_vals - prev_z_vals)
prev_esti_sdf = mid_sdf - dot_val * dist * 0.5
next_esti_sdf = mid_sdf + dot_val * dist * 0.5
prev_cdf = cdf_Phi_s(prev_esti_sdf, 64 * (2**i))
next_cdf = cdf_Phi_s(next_esti_sdf, 64 * (2**i))
alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
_w = alpha_to_w(alpha)
d_fine = rend_util.sample_pdf(_d, _w, N_importance // N_upsample_iters, det=not perturb)
_d = torch.cat([_d, d_fine], dim=-1)
sdf_fine = batchify_query(model.implicit_surface.forward, rays_o.unsqueeze(-2) + d_fine.unsqueeze(-1) * rays_d.unsqueeze(-2))
_sdf = torch.cat([_sdf, sdf_fine], dim=-1)
_d, d_sort_indices = torch.sort(_d, dim=-1)
_sdf = torch.gather(_sdf, DIM_BATCHIFY+1, d_sort_indices)
d_all = _d
else:
raise NotImplementedError
# ------------------
# Calculate Points
# [(B), N_rays, N_samples+N_importance, 3]
pts = rays_o[..., None, :] + rays_d[..., None, :] * d_all[..., :, None]
# [(B), N_rays, N_pts-1, 3]
# pts_mid = 0.5 * (pts[..., 1:, :] + pts[..., :-1, :])
d_mid = 0.5 * (d_all[..., 1:] + d_all[..., :-1])
pts_mid = rays_o[..., None, :] + rays_d[..., None, :] * d_mid[..., :, None]
# ------------------
# Inside Scene
# ------------------
# sdf, nablas, _ = model.implicit_surface.forward_with_nablas(pts)
sdf, nablas, _ = batchify_query(model.implicit_surface.forward_with_nablas, pts)
# [(B), N_ryas, N_pts], [(B), N_ryas, N_pts-1]
cdf, opacity_alpha = sdf_to_alpha(sdf, model.forward_s())
# radiances = model.forward_radiance(pts_mid, view_dirs_mid)
radiances = batchify_query(model.forward_radiance, pts_mid, view_dirs.unsqueeze(-2).expand_as(pts_mid) if use_view_dirs else None)
# ------------------
# Outside Scene
# ------------------
if N_outside > 0:
_t = torch.linspace(0, 1, N_outside + 2)[..., 1:-1].float().to(device)
d_vals_out = far / torch.flip(_t, dims=[-1])
if perturb:
_mids = .5 * (d_vals_out[..., 1:] + d_vals_out[..., :-1])
_upper = torch.cat([_mids, d_vals_out[..., -1:]], -1)
_lower = torch.cat([d_vals_out[..., :1], _mids], -1)
_t_rand = torch.rand(_upper.shape).float().to(device)
d_vals_out = _lower + (_upper - _lower) * _t_rand
d_vals_out = torch.cat([d_mid, d_vals_out], dim=-1) # already sorted
pts_out = rays_o[..., None, :] + rays_d[..., None, :] * d_vals_out[..., :, None]
r = pts_out.norm(dim=-1, keepdim=True)
x_out = torch.cat([pts_out/r, 1./r], dim=-1)
views_out = view_dirs.unsqueeze(-2).expand_as(x_out[..., :3]) if use_view_dirs else None
sigma_out, radiance_out = batchify_query(model.nerf_outside.forward, x_out, views_out)
dists = d_vals_out[..., 1:] - d_vals_out[..., :-1]
dists = torch.cat([dists, 1e10 * torch.ones(dists[..., :1].shape).to(device)], dim=-1)
alpha_out = 1 - torch.exp(-F.softplus(sigma_out) * dists) # use softplus instead of relu as NeuS's official repo
# --------------
# Ray Integration
# --------------
# [(B), N_rays, N_pts-1]
if N_outside > 0:
N_pts_1 = d_mid.shape[-1]
# [(B), N_ryas, N_pts-1]
mask_inside = (pts_mid.norm(dim=-1) <= obj_bounding_radius)
# [(B), N_ryas, N_pts-1]
alpha_in = opacity_alpha * mask_inside.float() + alpha_out[..., :N_pts_1] * (~mask_inside).float()
# [(B), N_ryas, N_pts-1 + N_outside]
opacity_alpha = torch.cat([alpha_in, alpha_out[..., N_pts_1:]], dim=-1)
# [(B), | |
\"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_deployment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_deployment`")
collection_formats = {}
resource_path = '/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'grace_period_seconds' in params:
query_params['gracePeriodSeconds'] = params['grace_period_seconds']
if 'orphan_dependents' in params:
query_params['orphanDependents'] = params['orphan_dependents']
if 'propagation_policy' in params:
query_params['propagationPolicy'] = params['propagation_policy']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
delete a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_stateful_set(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_stateful_set_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'grace_period_seconds' in params:
query_params['gracePeriodSeconds'] = params['grace_period_seconds']
if 'orphan_dependents' in params:
query_params['orphanDependents'] = params['orphan_dependents']
if 'propagation_policy' in params:
query_params['propagationPolicy'] = params['propagation_policy']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/apps/v1beta1/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = | |
<gh_stars>1-10
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD Style.
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel
from ..base import RegressorMixin
from .base import sparse_center_data, center_data
from ..utils import array2d, atleast2d_or_csc
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter
alpha = 0 is equivalent to an ordinary least square, solved
by the LinearRegression object in the scikit. For numerical
reasons, using alpha = 0 is with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with 0 <= l1_ratio <= 1. For
l1_ratio = 0 the penalty is an L2 penalty. For l1_ratio = 1 it is an L1
penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and
L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
normalize : boolean, optional
If True, the regressors X are normalized
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always True to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If True, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to True, forces the coefficients to be positive.
Attributes
----------
`coef_` : array, shape = (n_features,)
parameter vector (w in the cost function formula)
`sparse_coef_` : scipy.sparse matrix, shape = (n_features, 1)
`sparse_coef_` is a readonly property derived from `coef_`
`intercept_` : float | array, shape = (n_targets,)
independent term in decision function.
`dual_gap_` : float
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := `dual_gap_`
`eps_` : float
`eps_` is used to check if the fit converged to the requested
`tol`
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, copy_X=True,
tol=1e-4, warm_start=False, positive=False, rho=None):
self.alpha = alpha
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit model with coordinate descent
Parameters
-----------
X: ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y: ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
coef_init: ndarray of shape n_features or (n_targets, n_features)
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a fortran contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this aglorithm does not converge"
"well. You are advised to use the LinearRegression estimator",
stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
# now all computation with X can be done inplace
fit = self._sparse_fit if sparse.isspmatrix(X) else self._dense_fit
fit(X, y, Xy, coef_init)
return self
def _dense_fit(self, X, y, Xy=None, coef_init=None):
X, y, X_mean, y_mean, X_std = center_data(X, y,
self.fit_intercept, self.normalize,
copy=False) # copy was done in fit if necessary
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
precompute = self.precompute
if hasattr(precompute, '__array__') \
and not np.allclose(X_mean, np.zeros(n_features)) \
and not np.allclose(X_std, np.ones(n_features)):
# recompute Gram
precompute = 'auto'
Xy = None
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
# precompute if n_samples > n_features
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == True or \
(precompute == 'auto' and n_samples > n_features):
Gram = np.dot(X.T, X)
else:
Gram = None
for k in xrange(n_targets):
if Gram is None:
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent(coef_[k, :],
l1_reg, l2_reg, X, y[:, k], self.max_iter, self.tol,
self.positive)
else:
Gram = Gram.copy()
if Xy is None:
this_Xy = np.dot(X.T, y[:, k])
else:
this_Xy = Xy[:, k]
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent_gram(coef_[k, :],
l1_reg, l2_reg, Gram, this_Xy, y[:, k], self.max_iter,
self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in (
coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _sparse_fit(self, X, y, Xy=None, coef_init=None):
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"Note: Sparse matrices cannot be indexed w/" +
"boolean masks (use `indices=True` in CV).")
# NOTE: we are explicitly not centering the data the naive way to
# avoid breaking the sparsity of X
X_data, y, X_mean, y_mean, X_std = sparse_center_data(X, y,
self.fit_intercept,
self.normalize)
if y.ndim == 1:
y = y[:, np.newaxis]
n_samples, n_features = X.shape[0], X.shape[1]
n_targets = y.shape[1]
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
for k in xrange(n_targets):
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.sparse_enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X_data, X.indices,
X.indptr, y[:, k], X_mean / X_std,
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in (
coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _init_coef(self, coef_init, n_features, n_targets):
if coef_init is None:
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64)
else:
coef_ = self.coef_
else:
coef_ | |
<reponame>dopplerchase/GewitterGefahr
"""Unit tests for polygons.py."""
import unittest
import numpy
import shapely.geometry
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import projections
TOLERANCE = 1e-6
TOLERANCE_DECIMAL_PLACE = 6
# The following constants are used to test _get_longest_inner_list.
SHORT_LIST = []
MEDIUM_LIST = [0, 1, 2, 3]
LONG_LIST = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
LIST_OF_LISTS = [SHORT_LIST, MEDIUM_LIST, LONG_LIST]
# The following constants are used to test _get_longest_simple_polygon.
VERTEX_X_METRES_SHORT = numpy.array([0., 4., 2., 0.])
VERTEX_Y_METRES_SHORT = numpy.array([0., 0., 4., 0.])
VERTEX_X_METRES_MEDIUM = numpy.array([0., 2., 4., 4., 2., 0., 0.])
VERTEX_Y_METRES_MEDIUM = numpy.array([0., -1., 0., 2., 3., 2., 0.])
VERTEX_X_METRES_LONG = numpy.array([0., 2., 2., 4., 4., 1., 1., 0., 0.])
VERTEX_Y_METRES_LONG = numpy.array([0., 0., -1., -1., 4., 4., 2., 2., 0.])
NAN_ARRAY = numpy.array([numpy.nan])
VERTEX_X_METRES_COMPLEX = numpy.concatenate((
VERTEX_X_METRES_LONG, NAN_ARRAY, VERTEX_X_METRES_MEDIUM, NAN_ARRAY,
VERTEX_X_METRES_SHORT))
VERTEX_Y_METRES_COMPLEX = numpy.concatenate((
VERTEX_Y_METRES_LONG, NAN_ARRAY, VERTEX_Y_METRES_MEDIUM, NAN_ARRAY,
VERTEX_Y_METRES_SHORT))
# The following constants are used to test _vertex_arrays_to_list.
VERTEX_LIST_LONG_METRES = [
(0., 0.), (2., 0.), (2., -1.), (4., -1.), (4., 4.), (1., 4.), (1., 2.),
(0., 2.), (0., 0.)]
# The following constants are used to test _get_edge_direction.
FIRST_VERTEX_ROW = 5
FIRST_VERTEX_COLUMN = 5
SECOND_VERTEX_ROW_UP = 4
SECOND_VERTEX_COLUMN_UP = 5
SECOND_VERTEX_ROW_DOWN = 6
SECOND_VERTEX_COLUMN_DOWN = 5
SECOND_VERTEX_ROW_RIGHT = 5
SECOND_VERTEX_COLUMN_RIGHT = 6
SECOND_VERTEX_ROW_LEFT = 5
SECOND_VERTEX_COLUMN_LEFT = 4
SECOND_VERTEX_ROW_UP_RIGHT = 4
SECOND_VERTEX_COLUMN_UP_RIGHT = 6
SECOND_VERTEX_ROW_UP_LEFT = 4
SECOND_VERTEX_COLUMN_UP_LEFT = 4
SECOND_VERTEX_ROW_DOWN_RIGHT = 6
SECOND_VERTEX_COLUMN_DOWN_RIGHT = 6
SECOND_VERTEX_ROW_DOWN_LEFT = 6
SECOND_VERTEX_COLUMN_DOWN_LEFT = 4
# The following constants are used to test _vertices_from_grid_points_to_edges.
VERTEX_ROWS_ONLY_ONE_ORIG = numpy.array([5])
VERTEX_COLUMNS_ONLY_ONE_ORIG = numpy.array([3])
VERTEX_ROWS_ONE_UNIQUE_ORIG = numpy.array([5, 5, 5])
VERTEX_COLUMNS_ONE_UNIQUE_ORIG = numpy.array([3, 3, 3])
VERTEX_ROWS_ONLY_ONE_NEW = numpy.array([5.5, 5.5, 4.5, 4.5, 5.5])
VERTEX_COLUMNS_ONLY_ONE_NEW = numpy.array([2.5, 3.5, 3.5, 2.5, 2.5])
# The following constants are used to test _remove_redundant_vertices,
# _vertices_from_grid_points_to_edges, and fix_probsevere_vertices.
VERTEX_ROWS_GRID_POINTS = numpy.array(
[101, 102, 103, 103, 104, 102, 102, 101, 101])
VERTEX_ROWS_GRID_POINTS_COMPLEX = numpy.array(
[101, 102, 103, 103, 104, 102, 102, 101, 101, numpy.nan, 0, 1, 1, 0, 0])
VERTEX_COLUMNS_GRID_POINTS = numpy.array(
[501, 501, 502, 503, 504, 504, 503, 502, 501])
VERTEX_COLUMNS_GRID_POINTS_COMPLEX = numpy.array(
[501, 501, 502, 503, 504, 504, 503, 502, 501, numpy.nan, 0, 0, 1, 1, 0])
VERTEX_ROWS_GRID_CELL_EDGES_REDUNDANT = numpy.array(
[100.5, 102.5, 102.5, 103.5, 103.5, 103.5, 103.5, 103.5, 104.5, 104.5,
102.5, 103.5, 104.5, 101.5, 101.5, 100.5, 100.5])
VERTEX_COLUMNS_GRID_CELL_EDGES_REDUNDANT = numpy.array(
[500.5, 500.5, 501.5, 501.5, 503.5, 502.5, 501.5, 503.5, 503.5, 504.5,
504.5, 504.5, 504.5, 504.5, 502.5, 502.5, 500.5])
VERTEX_ROWS_GRID_CELL_EDGES_NON_REDUNDANT = numpy.array(
[100.5, 102.5, 102.5, 103.5, 103.5, 104.5, 104.5, 101.5, 101.5, 100.5,
100.5])
VERTEX_COLUMNS_GRID_CELL_EDGES_NON_REDUNDANT = numpy.array(
[500.5, 500.5, 501.5, 501.5, 503.5, 503.5, 504.5, 504.5, 502.5, 502.5,
500.5])
# The following constants are used to test
# _patch_diag_connections_in_binary_matrix.
BINARY_MATRIX_2DIAG_CONNECTIONS = numpy.array([[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 1]],
dtype=bool)
BINARY_MATRIX_1DIAG_CONNECTION = numpy.array([[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 1]],
dtype=bool)
BINARY_MATRIX_NO_DIAG_CONNECTIONS = numpy.array([[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 1]],
dtype=bool)
# The following constants are used to test grid_points_in_poly_to_binary_matrix,
# grid_points_in_poly_to_vertices, and simple_polygon_to_grid_points.
ROW_INDICES_IN_POLYGON = numpy.array(
[101, 101, 102, 102, 102, 102, 103, 103, 103, 104])
COLUMN_INDICES_IN_POLYGON = numpy.array(
[501, 502, 501, 502, 503, 504, 502, 503, 504, 504])
POINT_IN_OR_ON_POLYGON_MATRIX = numpy.array([[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0]]).astype(bool)
FIRST_ROW_INDEX = 100
FIRST_COLUMN_INDEX = 500
# The following constants are used to test project_latlng_to_xy and
# project_xy_to_latlng.
EXTERIOR_VERTEX_LATITUDES_DEG = numpy.array([49., 49., 60., 60., 53.8, 49.])
EXTERIOR_VERTEX_LONGITUDES_DEG = numpy.array(
[246., 250., 250., 240., 240., 246.])
HOLE1_VERTEX_LATITUDES_DEG = numpy.array(
[51.1, 52.2, 52.2, 53.3, 53.3, 51.1, 51.1])
HOLE1_VERTEX_LONGITUDES_DEG = numpy.array(
[246., 246., 246.1, 246.1, 246.4, 246.4, 246.])
POLYGON_OBJECT_LATLNG = polygons.vertex_arrays_to_polygon_object(
EXTERIOR_VERTEX_LONGITUDES_DEG, EXTERIOR_VERTEX_LATITUDES_DEG,
hole_x_coords_list=[HOLE1_VERTEX_LONGITUDES_DEG],
hole_y_coords_list=[HOLE1_VERTEX_LATITUDES_DEG])
PROJECTION_OBJECT = projections.init_azimuthal_equidistant_projection(
central_latitude_deg=55., central_longitude_deg=245.)
# The following constants are used to test vertex_arrays_to_polygon_object and
# polygon_object_to_vertex_arrays.
EXTERIOR_VERTEX_X_METRES = numpy.array([0., 0., 10., 10., 0.])
EXTERIOR_VERTEX_Y_METRES = numpy.array([0., 10., 10., 0., 0.])
EXTERIOR_VERTEX_METRES_LIST = [
(0., 0.), (0., 10.), (10., 10.), (10., 0.), (0., 0.)]
HOLE1_VERTEX_X_METRES = numpy.array([2., 2., 4., 4., 2.])
HOLE1_VERTEX_Y_METRES = numpy.array([2., 4., 4., 2., 2.])
HOLE1_VERTEX_METRES_LIST = [(2., 2.), (2., 4.), (4., 4.), (4., 2.), (2., 2.)]
HOLE2_VERTEX_X_METRES = numpy.array([6., 6., 8., 8., 6.])
HOLE2_VERTEX_Y_METRES = numpy.array([6., 8., 8., 6., 6.])
HOLE2_VERTEX_METRES_LIST = [(6., 6.), (6., 8.), (8., 8.), (8., 6.), (6., 6.)]
POLYGON_OBJECT_2HOLES_XY_METRES = shapely.geometry.Polygon(
shell=EXTERIOR_VERTEX_METRES_LIST,
holes=(HOLE1_VERTEX_METRES_LIST, HOLE2_VERTEX_METRES_LIST))
# The following constants are used to test simple_polygon_to_grid_points.
VERTEX_ROWS_SIMPLE = numpy.array(
[3.5, 3.5, 4.5, 4.5, -0.5, -0.5, 1.5, 1.5, 3.5])
VERTEX_COLUMNS_SIMPLE = numpy.array(
[-0.5, 1.5, 1.5, 3.5, 3.5, 0.5, 0.5, -0.5, -0.5])
GRID_POINT_ROWS_IN_SIMPLE_POLY = numpy.array(
[0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4])
GRID_POINT_COLUMNS_IN_SIMPLE_POLY = numpy.array(
[1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 2, 3])
# The following constants are used to test point_in_or_on_polygon and
# buffer_simple_polygon.
SMALL_BUFFER_DIST_METRES = 2.5
LARGE_BUFFER_DIST_METRES = 5.
SMALL_BUFFER_VERTEX_X_METRES = numpy.array([-2.5, -2.5, 12.5, 12.5, -2.5])
SMALL_BUFFER_VERTEX_Y_METRES = numpy.array([-2.5, 12.5, 12.5, -2.5, -2.5])
LARGE_BUFFER_VERTEX_X_METRES = numpy.array([-5., -5., 15., 15., -5.])
LARGE_BUFFER_VERTEX_Y_METRES = numpy.array([-5., 15., 15., -5., -5.])
EXCLUSIVE_BUFFER_EXTERIOR_X_METRES = numpy.array([-5., -5., 15., 15., -5.])
EXCLUSIVE_BUFFER_EXTERIOR_Y_METRES = numpy.array([-5., 15., 15., -5., -5.])
EXCLUSIVE_BUFFER_HOLE_X_METRES = numpy.array([-2.5, -2.5, 12.5, 12.5, -2.5])
EXCLUSIVE_BUFFER_HOLE_Y_METRES = numpy.array([-2.5, 12.5, 12.5, -2.5, -2.5])
POLYGON_OBJECT_EXCL_BUFFER_XY_METRES = polygons.vertex_arrays_to_polygon_object(
EXCLUSIVE_BUFFER_EXTERIOR_X_METRES, EXCLUSIVE_BUFFER_EXTERIOR_Y_METRES,
hole_x_coords_list=[EXCLUSIVE_BUFFER_HOLE_X_METRES],
hole_y_coords_list=[EXCLUSIVE_BUFFER_HOLE_Y_METRES])
X_IN_NESTED_BUFFER = -4.
X_ON_NESTED_BUFFER = -2.5
X_OUTSIDE_NESTED_BUFFER = 3.
Y_IN_NESTED_BUFFER = 5.
Y_ON_NESTED_BUFFER = 5.
Y_OUTSIDE_NESTED_BUFFER = 5.
class PolygonsTests(unittest.TestCase):
"""Each method is a unit test for polygons.py."""
def test_get_longest_inner_list(self):
"""Ensures correct output from _get_longest_inner_list."""
this_longest_list = polygons._get_longest_inner_list(LIST_OF_LISTS)
self.assertTrue(this_longest_list == LONG_LIST)
def test_get_longest_simple_polygon_complex(self):
"""Ensures correct output from _get_longest_simple_polygon.
In this case the input is a complex polygon.
"""
these_vertex_x_metres, these_vertex_y_metres = (
polygons._get_longest_simple_polygon(
VERTEX_X_METRES_COMPLEX, VERTEX_Y_METRES_COMPLEX))
self.assertTrue(numpy.allclose(
these_vertex_x_metres, VERTEX_X_METRES_LONG, atol=TOLERANCE))
self.assertTrue(numpy.allclose(
these_vertex_y_metres, VERTEX_Y_METRES_LONG, atol=TOLERANCE))
def test_get_longest_simple_polygon_simple(self):
"""Ensures correct output from _get_longest_simple_polygon.
In this case the input is a complex polygon.
"""
these_vertex_x_metres, these_vertex_y_metres = (
polygons._get_longest_simple_polygon(
VERTEX_X_METRES_LONG, VERTEX_Y_METRES_LONG))
self.assertTrue(numpy.allclose(
these_vertex_x_metres, VERTEX_X_METRES_LONG, atol=TOLERANCE))
self.assertTrue(numpy.allclose(
these_vertex_y_metres, VERTEX_Y_METRES_LONG, atol=TOLERANCE))
def test_vertex_arrays_to_list(self):
"""Ensures correct output from _vertex_arrays_to_list."""
this_vertex_list_metres = polygons._vertex_arrays_to_list(
VERTEX_X_METRES_LONG, VERTEX_Y_METRES_LONG)
self.assertTrue(
len(this_vertex_list_metres) == len(VERTEX_LIST_LONG_METRES))
for i in range(len(VERTEX_LIST_LONG_METRES)):
self.assertTrue(numpy.allclose(
numpy.asarray(this_vertex_list_metres[i]),
numpy.asarray(VERTEX_LIST_LONG_METRES[i]), atol=TOLERANCE,
equal_nan=True))
def test_get_edge_direction_up(self):
"""Ensures correct output from _get_edge_direction.
In this case, direction is up.
"""
this_direction_name = polygons._get_edge_direction(
first_row=FIRST_VERTEX_ROW, second_row=SECOND_VERTEX_ROW_UP,
first_column=FIRST_VERTEX_COLUMN,
second_column=SECOND_VERTEX_COLUMN_UP)
self.assertTrue(this_direction_name == polygons.UP_DIRECTION_NAME)
def test_get_edge_direction_down(self):
"""Ensures correct output from _get_edge_direction.
In this case, direction is down.
"""
this_direction_name = polygons._get_edge_direction(
first_row=FIRST_VERTEX_ROW, second_row=SECOND_VERTEX_ROW_DOWN,
first_column=FIRST_VERTEX_COLUMN,
second_column=SECOND_VERTEX_COLUMN_DOWN)
self.assertTrue(this_direction_name == polygons.DOWN_DIRECTION_NAME)
def test_get_edge_direction_left(self):
"""Ensures correct output from _get_edge_direction.
In this case, direction is left.
"""
this_direction_name = polygons._get_edge_direction(
first_row=FIRST_VERTEX_ROW, second_row=SECOND_VERTEX_ROW_LEFT,
first_column=FIRST_VERTEX_COLUMN,
second_column=SECOND_VERTEX_COLUMN_LEFT)
self.assertTrue(this_direction_name == polygons.LEFT_DIRECTION_NAME)
def test_get_edge_direction_right(self):
"""Ensures correct output from _get_edge_direction.
In this case, direction is right.
"""
this_direction_name = polygons._get_edge_direction(
first_row=FIRST_VERTEX_ROW, second_row=SECOND_VERTEX_ROW_RIGHT,
first_column=FIRST_VERTEX_COLUMN,
second_column=SECOND_VERTEX_COLUMN_RIGHT)
self.assertTrue(this_direction_name == polygons.RIGHT_DIRECTION_NAME)
def test_get_edge_direction_up_left(self):
"""Ensures correct output from _get_edge_direction.
In this case, direction is up-left.
"""
this_direction_name = polygons._get_edge_direction(
first_row=FIRST_VERTEX_ROW, second_row=SECOND_VERTEX_ROW_UP_LEFT,
first_column=FIRST_VERTEX_COLUMN,
second_column=SECOND_VERTEX_COLUMN_UP_LEFT)
self.assertTrue(this_direction_name == polygons.UP_LEFT_DIRECTION_NAME)
def test_get_edge_direction_up_right(self):
"""Ensures correct output from _get_edge_direction.
In this case, direction is up-right.
"""
this_direction_name = polygons._get_edge_direction(
first_row=FIRST_VERTEX_ROW, second_row=SECOND_VERTEX_ROW_UP_RIGHT,
first_column=FIRST_VERTEX_COLUMN,
second_column=SECOND_VERTEX_COLUMN_UP_RIGHT)
self.assertTrue(this_direction_name == polygons.UP_RIGHT_DIRECTION_NAME)
def test_get_edge_direction_down_left(self):
"""Ensures correct output from _get_edge_direction.
In this case, direction is down-left.
"""
this_direction_name = polygons._get_edge_direction(
first_row=FIRST_VERTEX_ROW, second_row=SECOND_VERTEX_ROW_DOWN_LEFT,
first_column=FIRST_VERTEX_COLUMN,
second_column=SECOND_VERTEX_COLUMN_DOWN_LEFT)
self.assertTrue(
this_direction_name == polygons.DOWN_LEFT_DIRECTION_NAME)
def test_get_edge_direction_down_right(self):
"""Ensures correct output from _get_edge_direction.
In this case, direction is down-right.
"""
this_direction_name = polygons._get_edge_direction(
first_row=FIRST_VERTEX_ROW, second_row=SECOND_VERTEX_ROW_DOWN_RIGHT,
first_column=FIRST_VERTEX_COLUMN,
second_column=SECOND_VERTEX_COLUMN_DOWN_RIGHT)
self.assertTrue(
this_direction_name == polygons.DOWN_RIGHT_DIRECTION_NAME)
def test_remove_redundant_vertices(self):
"""Ensures correct output from _remove_redundant_vertices."""
these_vertex_rows, these_vertex_columns = (
polygons._remove_redundant_vertices(
VERTEX_ROWS_GRID_CELL_EDGES_REDUNDANT,
VERTEX_COLUMNS_GRID_CELL_EDGES_REDUNDANT))
self.assertTrue(numpy.array_equal(
these_vertex_rows, VERTEX_ROWS_GRID_CELL_EDGES_NON_REDUNDANT))
self.assertTrue(numpy.array_equal(
these_vertex_columns, VERTEX_COLUMNS_GRID_CELL_EDGES_NON_REDUNDANT))
def test_patch_diag_connections_in_binary_matrix_2(self):
"""Ensures correct output from _patch_diag_connections_in_binary_matrix.
In this case there are 2 diagonal connections to patch.
"""
this_binary_matrix = polygons._patch_diag_connections_in_binary_matrix(
BINARY_MATRIX_2DIAG_CONNECTIONS)
self.assertTrue(numpy.array_equal(
this_binary_matrix, BINARY_MATRIX_NO_DIAG_CONNECTIONS))
def test_patch_diag_connections_in_binary_matrix_1(self):
"""Ensures correct output from _patch_diag_connections_in_binary_matrix.
In this case there is one diagonal connection to patch.
"""
this_binary_matrix = polygons._patch_diag_connections_in_binary_matrix(
BINARY_MATRIX_1DIAG_CONNECTION)
self.assertTrue(numpy.array_equal(
this_binary_matrix, BINARY_MATRIX_NO_DIAG_CONNECTIONS))
def test_patch_diag_connections_in_binary_matrix_0(self):
"""Ensures correct output from _patch_diag_connections_in_binary_matrix.
In this case there are no diagonal connections to patch.
"""
this_binary_matrix = polygons._patch_diag_connections_in_binary_matrix(
BINARY_MATRIX_NO_DIAG_CONNECTIONS)
self.assertTrue(numpy.array_equal(
this_binary_matrix, BINARY_MATRIX_NO_DIAG_CONNECTIONS))
def test_vertices_from_grid_points_to_edges_many_inputs(self):
"""Ensures correct output from _vertices_from_grid_points_to_edges.
In | |
# SOURCE: https://github.com/tiangolo/fastapi/issues/1800
# #2: https://github.com/encode/starlette/issues/658
from collections import defaultdict, deque
from datetime import datetime as dt
from asyncpg import create_pool
from json import dumps, loads
from copy import deepcopy
from os import environ
from re import findall
from inspect import iscoroutinefunction
from urllib.parse import quote as urllib_quote
if __package__ is None or __package__ == '': from utilities import text_san
else: from .utilities import text_san
class Database:
def __init__(self):
self.db = {'MAIN_DB':{k.replace('POSTGRES_', ''):environ[k] for k in ['POSTGRES_USER', 'POSTGRES_PASSWORD', 'POSTGRES_SERVER', 'POSTGRES_DB', 'POSTGRES_PORT']}}
if 'EXTRA_DBS' in environ:
dbs = environ['EXTRA_DBS'].split('|')
for adb in dbs:
try:
db_name = findall(r"^\^(.*)\^", adb)[0]
if db_name.find('^') >= 0: db_name = db_name[:db_name.find('^')]
self.db.update({db_name:adb[len(db_name)+2:]})
except IndexError: pass
self._cursor, self._connection_pool, self._con = None, None, None
self._cursor_2nd, self._connection_pool_2nd, self._con_2nd = None, None, None
self.DBQueries = {
"check_if_basic_DB_tables_exist":{
"Query":''' SELECT EXISTS(SELECT FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'Users') AS "Users", EXISTS(SELECT FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'Endpoints') AS "Endpoints", EXISTS(SELECT FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'DBQueries') AS "DBQueries", EXISTS(SELECT FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'Sessions') AS "Sessions"; '''
},
"create_DBQueries_table":{ # TODO
"Query":[""" DROP SEQUENCE IF EXISTS "DBQueries_ID_seq"; """,
""" DROP SEQUENCE IF EXISTS "Endpoints_ID_seq"; """,
""" CREATE SEQUENCE "DBQueries_ID_seq" INCREMENT 1 MINVALUE 1 MAXVALUE 9223372036854775807 START 1 CACHE 1; """,
""" CREATE TABLE IF NOT EXISTS "public"."DBQueries" (
"ID" integer DEFAULT nextval('"DBQueries_ID_seq"') NOT NULL,
"meta" jsonb NOT NULL,
"Name" text,
"Query" text
) WITH (oids = false); """,
""" INSERT INTO "DBQueries" ("ID", "meta", "Name", "Query") VALUES
(1, '{}', 'create_users_table', 'DROP SEQUENCE IF EXISTS "Users_ID_seq";
CREATE SEQUENCE "Users_ID_seq" INCREMENT 1 MINVALUE 1 MAXVALUE 9223372036854775807 START 1 CACHE 1;
CREATE TABLE IF NOT EXISTS "public"."Users" (
"ID" integer DEFAULT nextval(''"Users_ID_seq"'') NOT NULL,
"username" text NOT NULL,
"password" text NOT NULL,
"roles" jsonb,
"metadata" jsonb
) WITH (oids = false);'
),
(2, '{}', 'create_endpoints_table', 'DROP SEQUENCE IF EXISTS "Endpoints_ID_seq";
CREATE SEQUENCE "Endpoints_ID_seq" INCREMENT 1 MINVALUE 1 MAXVALUE 9223372036854775807 START 1 CACHE 1;
CREATE TABLE IF NOT EXISTS "public"."Endpoints" (
"ID" integer DEFAULT nextval(''"Endpoints_ID_seq"'') NOT NULL,
"endpoint" text NOT NULL,
"roles" jsonb NOT NULL,
"meta" jsonb NOT NULL
) WITH (oids = false);'
),
(3, '{}', 'create_sessions_table', 'DROP SEQUENCE IF EXISTS "Sessions_ID_seq";
CREATE SEQUENCE "Sessions_ID_seq" INCREMENT 1 MINVALUE 1 MAXVALUE 9223372036854775807 START 1 CACHE 1;
CREATE TABLE IF NOT EXISTS "public"."Sessions" (
"ID" integer DEFAULT nextval(''"Sessions_ID_seq"'') NOT NULL,
"metadata" jsonb NOT NULL
) WITH (oids = false);'
),
(4, '{"External": 1}', 'test', 'SELECT * FROM base WHERE "ID" IN (1,2);'),
(5, '{"External": 1}', 'test2', 'SELECT * FROM base WHERE "ID"=3;');"""],
"meta": {"External":0}
},
"load_all_DBQueries":{
"Query":''' SELECT * FROM "DBQueries"; '''
},
"load_endpoints":{
"Query":''' SELECT * FROM "Endpoints"; '''
},
"load_users":{
"Query":''' SELECT * FROM "Users"; '''
},
"load_sessions":{
"Query":''' SELECT "ID", metadata AS "m" FROM "Sessions"; '''
},
"populate_endpoints":{
"Query":''' INSERT INTO "Endpoints"("endpoint", "roles", "meta") VALUES({endpoint}, {roles}, {meta}); ''',
"meta":{"Payload":{"endpoint":"text", "roles":"json", "meta":"json"}}
},
"update_endpoints":{
"Query":''' UPDATE "Endpoints" SET "meta" = {meta} WHERE "ID"={ID}; ''',
"meta":{"Payload":{"ID":"int", "meta":"json"}}
},
"create_new_user":{
"Query":''' INSERT INTO "Users" ("username", "password", "roles", "metadata") VALUES({username}, {hashpass}, {roles}, {metadata}); ''',
"meta":{"Payload":{"username":"text", "hashpass":"text", "roles":"json", "metadata":"json"}}
}
}
async def connect(self, custom_DB=None):
if not self._connection_pool:
try:
self._connection_pool = await create_pool(min_size=1, max_size=10, command_timeout=60, host=self.db['MAIN_DB']['SERVER'], port=self.db['MAIN_DB']['PORT'], user=self.db['MAIN_DB']['USER'], password=self.db['MAIN_DB']['PASSWORD'], database=self.db['MAIN_DB']['DB'],)
except Exception as e: raise Exception("@MAIN_DB POOL CONNECTION ERROR: "+str(e))
if not self._connection_pool_2nd:
try:
self._connection_pool_2nd = {}
for adb, dsn in self.db.items():
if adb != 'MAIN_DB':
self._connection_pool_2nd[adb] = await create_pool(min_size=1, max_size=10, command_timeout=60, dsn=dsn.replace('#', urllib_quote('#')),)
except Exception as e: raise Exception("@MASSIVE EXTRA_DBS POOL CONNECTIONS - ERROR: "+str(e))
async def fetch_rows(self, queries, pool=None, multiple_queries_splitter=";"):
queries, pool, results = queries if type(queries) == list else [queries], pool if pool is not None else 'MAIN_DB', []
#print('\n-----------------'.join(queries))
if pool == 'MAIN_DB':
if not self._connection_pool: await self.connect()
else:
con = await self._connection_pool.acquire()
for query in queries:
try: results.append(await con.fetch(query))
except Exception as e:
if str(e)=="cannot insert multiple commands into a prepared statement":
if query.find(multiple_queries_splitter) >= 0:
for sub_query in [x for x in query.split(multiple_queries_splitter) if len(x) > 0]:
try: results.append(await con.fetch(sub_query))
except Exception as e:
await self._connection_pool.release(con)
raise Exception(str(e)+"|>THE SUB_QUERY->|"+str(sub_query))
else:
await self._connection_pool.release(con)
raise Exception(str(e)+"|>THE QUERY->|"+str(query))
await self._connection_pool.release(con)
return results
else:
for a_db in pool.keys():
if not self._connection_pool_2nd[a_db]: await self.connect()
else:
con = await self._connection_pool_2nd[a_db].acquire()
for query in queries:
try: results.append({a_db:await con.fetch(query)})
except Exception as e:
if str(e)=="cannot insert multiple commands into a prepared statement":
if query.find(multiple_queries_splitter) >= 0:
for sub_query in [x for x in query.split(multiple_queries_splitter) if len(x) > 0]:
try: results.append(await con.fetch(sub_query))
except Exception as e:
await self._connection_pool_2nd[a_db].release(con)
raise Exception(str(e)+"|>THE SUB_QUERY->|"+str(sub_query))
else:
await self._connection_pool_2nd[a_db].release(con)
raise Exception(str(e)+"|>THE QUERY->|"+str(query))
await self._connection_pool_2nd[a_db].release(con)
return results
async def check_type(the_type, subject):
async def text_san_local(x, forbidden_s=[";", "--"]):
return await text_san(x, forbidden_s)
async def date_checker(date, valid_formats=['%Y-%m-%d %H:%M:%S', '%Y-%m-%d']):
valid_date = False
for f in valid_formats:
try: date, valid_date = dt.strptime(date, f), True
except: pass
if valid_date: return date
else: raise Exception("Invalid Date Format! Acceptable formats are:", ' or '.join(valid_formats))
async def to_json(x):
try:
res = loads(x)
res = x
except:
s = dumps(x).replace("'", '"')
res = str(s[1:-1] if (s[0] == s[-1]) and s.startswith(("'", '"')) else s)
if res == 'None': res = "{}"
return res
types = {
"int" : {"convertor":int, "prefix":"", "endfix":""},
"float" : {"convertor":float, "prefix":"", "endfix":""},
"text" : {"convertor":text_san_local, "prefix":"'", "endfix":"'"},
"raw" : {"convertor":str, "prefix":"", "endfix":""},
"json" : {"convertor":to_json, "prefix":"'", "endfix":"'::jsonb"},
"date" : {"convertor":date_checker, "prefix":"'", "endfix":"'::timestamptz"}
}
if the_type not in types.keys(): raise Exception("We don't Support this Type Yet.")
f = types[the_type]["convertor"]
proccessed = str(await f(subject) if iscoroutinefunction(f) else f(subject))
return types[the_type]['prefix'] + proccessed + types[the_type]['endfix']
async def db_query(r_obj, query_name, External, query_payload=None):
queries = deepcopy(r_obj.DBQueries)
if query_payload is not None:
if queries.get(query_name, {}).get('meta', {}).get('Payload', None) is not None:
allowed_types = queries[query_name]['meta']['Payload']
querys_payload_keywords_counter = len(allowed_types.keys())
for var, cont in query_payload.items():
if var in allowed_types.keys():
fin_check = await check_type(allowed_types[var], cont)
if fin_check is False: raise Exception("Illegal Payload Type -> "+str(var)+"|"+str(cont))
queries[query_name]["Query"] = queries[query_name]["Query"].replace("{"+var+"}", fin_check)
querys_payload_keywords_counter -= 1
if querys_payload_keywords_counter != 0: raise Exception("Query's >"+query_name+"< Payload was not complete! ->", querys_payload_keywords_counter, "| query_payload =", query_payload, "| allowed_types =", allowed_types)
if External:
allowed_external_queries = {k:v for k,v in queries.items() if 'meta' in v and 'External' in v['meta'] and v['meta']['External'] == 1}
if query_name in allowed_external_queries:
db_data = await r_obj.fetch_rows(queries=allowed_external_queries[query_name]["Query"], pool=allowed_external_queries[query_name]['meta'].get('DB', None))
if type(db_data[0]) is list: return [{k:v for k,v in rec.items()} for results in db_data for rec in results]
elif type(db_data[0]) is dict: return {DB:[{k:v for k,v in rec.items()} for rec in dbr] for results in db_data for DB, dbr in results.items()}
else: raise Exception("UNKNOWN RETURN RESULTS FROM DB QUERIES!")
return [{"Requested Query":str(query_name), "Result": "Error! Query Name Not Found ~OR~ Not Allowed to be Exposed Externally!."}]
else:
if query_name in queries:
db_data = await r_obj.fetch_rows(queries=queries[query_name]["Query"], pool=queries[query_name].get('DB', None))
return [{k:v for k,v in rec.items()} for results in db_data for rec in results ]
return [{"Requested Query":str(query_name), "Result": "Error! Query Name Not Found."}]
async def Query_DB(payload, request, init_query=None, server_side=False):
db_data = {"DB":{"Queries":[], "Results":[], "query_payload":None}}
if server_side:
db_data["DB"]["Queries"].append(init_query)
db_data["DB"]["query_payload"] = payload
db_data["DB"]["Results"] = {init_query:await db_query(r_obj=request.app.state.db, query_name=init_query, External=False, query_payload=db_data["DB"]["query_payload"])}
return db_data
combos = [('query_names', 'form_data'), ('init_query', 'query_params')]
process_POST_GET = sum([combo[0] in payload[combo[1]] for combo in combos]) > 0
if process_POST_GET or init_query is not None or server_side:
if init_query is not None:
db_data["DB"]["Queries"] += init_query
for combo in combos:
temp_query_payload = {k:v for k, v in payload[combo[1]].items() if k != combo[0] and not k.startswith('-')}
if db_data["DB"]["query_payload"] is None: db_data["DB"]["query_payload"] = temp_query_payload
else: db_data["DB"]["query_payload"].update(temp_query_payload)
for combo in combos:
if combo[0] in payload[combo[1]]:
db_data["DB"]["Queries"] += payload[combo[1]][combo[0]].replace("%20", " ").replace("%22", "").replace("%27", " ").split(',')
temp_query_payload = {k:v for k, v in payload[combo[1]].items() if k != combo[0] and not k.startswith('-')}
if db_data["DB"]["query_payload"] is None: db_data["DB"]["query_payload"] = temp_query_payload
else: db_data["DB"]["query_payload"].update(temp_query_payload)
db_data["DB"]["Queries"] = [x.strip() for x in db_data["DB"]["Queries"]]
db_data["DB"]["Queries"].reverse()
db_data["DB"]["Results"] = {a_query:await db_query(r_obj=request.app.state.db, query_name=a_query, External=True, query_payload=db_data["DB"]["query_payload"]) for a_query in db_data["DB"]["Queries"]}
return db_data
async def generate_basic_DB_tables(db_conn, do_you_want_users, active_endpoints, endpoints=None, reload_all_DBQueries=False):
if reload_all_DBQueries == False:
results = (await db_query(r_obj=db_conn, query_name="check_if_basic_DB_tables_exist", External=False))[0]
if not results['DBQueries']: await db_query(r_obj=db_conn, query_name="create_DBQueries_table", External=False)
all_queries = | |
<gh_stars>1-10
#! /usr/bin/python3
"""
Datastreams are identified by the address that publishes them, and referenced
in transaction outputs.
For CFD leverage, 1x = 5040, 2x = 10080, etc.: 5040 is a superior highly
composite number and a colossally abundant number, and has 1-10, 12 as factors.
All wagers are in XCP.
Expiring a bet match doesn’t re‐open the constituent bets. (So all bets may be ‘filled’.)
"""
import struct
import decimal
import json
D = decimal.Decimal
import time
import logging
logger = logging.getLogger(__name__)
from counterpartylib.lib import config
from counterpartylib.lib import exceptions
from counterpartylib.lib import util
from counterpartylib.lib import log
from counterpartylib.lib import message_type
FORMAT = '>HIQQdII'
LENGTH = 2 + 4 + 8 + 8 + 8 + 4 + 4
ID = 40
def initialise (db):
cursor = db.cursor()
# Bets.
cursor.execute('''CREATE TABLE IF NOT EXISTS bets(
tx_index INTEGER UNIQUE,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
feed_address TEXT,
bet_type INTEGER,
deadline INTEGER,
wager_quantity INTEGER,
wager_remaining INTEGER,
counterwager_quantity INTEGER,
counterwager_remaining INTEGER,
target_value REAL,
leverage INTEGER,
expiration INTEGER,
expire_index INTEGER,
fee_fraction_int INTEGER,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index),
PRIMARY KEY (tx_index, tx_hash))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON bets (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
index_hash_idx ON bets (tx_index, tx_hash)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
expire_idx ON bets (status, expire_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
feed_valid_bettype_idx ON bets (feed_address, status, bet_type)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON bets (source)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
status_idx ON bets (status)
''')
# Bet Matches
cursor.execute('''CREATE TABLE IF NOT EXISTS bet_matches(
id TEXT PRIMARY KEY,
tx0_index INTEGER,
tx0_hash TEXT,
tx0_address TEXT,
tx1_index INTEGER,
tx1_hash TEXT,
tx1_address TEXT,
tx0_bet_type INTEGER,
tx1_bet_type INTEGER,
feed_address TEXT,
initial_value INTEGER,
deadline INTEGER,
target_value REAL,
leverage INTEGER,
forward_quantity INTEGER,
backward_quantity INTEGER,
tx0_block_index INTEGER,
tx1_block_index INTEGER,
block_index INTEGER,
tx0_expiration INTEGER,
tx1_expiration INTEGER,
match_expire_index INTEGER,
fee_fraction_int INTEGER,
status TEXT,
FOREIGN KEY (tx0_index, tx0_hash, tx0_block_index) REFERENCES transactions(tx_index, tx_hash, block_index),
FOREIGN KEY (tx1_index, tx1_hash, tx1_block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
match_expire_idx ON bet_matches (status, match_expire_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
valid_feed_idx ON bet_matches (feed_address, status)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
id_idx ON bet_matches (id)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
tx0_address_idx ON bet_matches (tx0_address)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
tx1_address_idx ON bet_matches (tx1_address)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
status_idx ON bet_matches (status)
''')
# Bet Expirations
cursor.execute('''CREATE TABLE IF NOT EXISTS bet_expirations(
bet_index INTEGER PRIMARY KEY,
bet_hash TEXT UNIQUE,
source TEXT,
block_index INTEGER,
FOREIGN KEY (block_index) REFERENCES blocks(block_index),
FOREIGN KEY (bet_index, bet_hash) REFERENCES bets(tx_index, tx_hash))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON bet_expirations (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON bet_expirations (source)
''')
# Bet Match Expirations
cursor.execute('''CREATE TABLE IF NOT EXISTS bet_match_expirations(
bet_match_id TEXT PRIMARY KEY,
tx0_address TEXT,
tx1_address TEXT,
block_index INTEGER,
FOREIGN KEY (bet_match_id) REFERENCES bet_matches(id),
FOREIGN KEY (block_index) REFERENCES blocks(block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON bet_match_expirations (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
tx0_address_idx ON bet_match_expirations (tx0_address)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
tx1_address_idx ON bet_match_expirations (tx1_address)
''')
# Bet Match Resolutions
cursor.execute('''CREATE TABLE IF NOT EXISTS bet_match_resolutions(
bet_match_id TEXT PRIMARY KEY,
bet_match_type_id INTEGER,
block_index INTEGER,
winner TEXT,
settled BOOL,
bull_credit INTEGER,
bear_credit INTEGER,
escrow_less_fee INTEGER,
fee INTEGER,
FOREIGN KEY (bet_match_id) REFERENCES bet_matches(id),
FOREIGN KEY (block_index) REFERENCES blocks(block_index))
''')
def cancel_bet (db, bet, status, block_index):
cursor = db.cursor()
# Update status of bet.
bindings = {
'status': status,
'tx_hash': bet['tx_hash']
}
sql='update bets set status = :status where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
log.message(db, block_index, 'update', 'bets', bindings)
util.credit(db, bet['source'], config.XCP, bet['wager_remaining'], action='recredit wager remaining', event=bet['tx_hash'])
cursor = db.cursor()
def cancel_bet_match (db, bet_match, status, block_index):
# Does not re‐open, re‐fill, etc. constituent bets.
cursor = db.cursor()
# Recredit tx0 address.
util.credit(db, bet_match['tx0_address'], config.XCP,
bet_match['forward_quantity'], action='recredit forward quantity', event=bet_match['id'])
# Recredit tx1 address.
util.credit(db, bet_match['tx1_address'], config.XCP,
bet_match['backward_quantity'], action='recredit backward quantity', event=bet_match['id'])
# Update status of bet match.
bindings = {
'status': status,
'bet_match_id': bet_match['id']
}
sql='update bet_matches set status = :status where id = :bet_match_id'
cursor.execute(sql, bindings)
log.message(db, block_index, 'update', 'bet_matches', bindings)
cursor.close()
def get_fee_fraction (db, feed_address):
'''Get fee fraction from last broadcast from the feed_address address.
'''
cursor = db.cursor()
broadcasts = list(cursor.execute('''SELECT * FROM broadcasts WHERE (status = ? AND source = ?) ORDER BY tx_index ASC''', ('valid', feed_address)))
cursor.close()
if broadcasts:
last_broadcast = broadcasts[-1]
fee_fraction_int = last_broadcast['fee_fraction_int']
if fee_fraction_int: return fee_fraction_int / 1e8
else: return 0
else:
return 0
def validate (db, source, feed_address, bet_type, deadline, wager_quantity,
counterwager_quantity, target_value, leverage, expiration, block_index):
problems = []
if leverage is None: leverage = 5040
# For SQLite3
if wager_quantity > config.MAX_INT or counterwager_quantity > config.MAX_INT or bet_type > config.MAX_INT \
or deadline > config.MAX_INT or leverage > config.MAX_INT or block_index + expiration > config.MAX_INT:
problems.append('integer overflow')
# Look at feed to be bet on.
cursor = db.cursor()
broadcasts = list(cursor.execute('''SELECT * FROM broadcasts WHERE (status = ? AND source = ?) ORDER BY tx_index ASC''', ('valid', feed_address)))
cursor.close()
if not broadcasts:
problems.append('feed doesn’t exist')
elif not broadcasts[-1]['text']:
problems.append('feed is locked')
elif broadcasts[-1]['timestamp'] >= deadline:
problems.append('deadline in that feed’s past')
if not bet_type in (0, 1, 2, 3):
problems.append('unknown bet type')
# Valid leverage level?
if leverage != 5040 and bet_type in (2,3): # Equal, NotEqual
problems.append('leverage used with Equal or NotEqual')
if leverage < 5040 and not bet_type in (0,1): # BullCFD, BearCFD (fractional leverage makes sense precisely with CFDs)
problems.append('leverage level too low')
if bet_type in (0,1): # BullCFD, BearCFD
if block_index >= 312350: # Protocol change.
problems.append('CFDs temporarily disabled')
if not isinstance(wager_quantity, int):
problems.append('wager_quantity must be in satoshis')
return problems, leverage
if not isinstance(counterwager_quantity, int):
problems.append('counterwager_quantity must be in satoshis')
return problems, leverage
if not isinstance(expiration, int):
problems.append('expiration must be expressed as an integer block delta')
return problems, leverage
if wager_quantity <= 0: problems.append('non‐positive wager')
if counterwager_quantity <= 0: problems.append('non‐positive counterwager')
if deadline < 0: problems.append('negative deadline')
if expiration < 0: problems.append('negative expiration')
if expiration == 0 and not (block_index >= 317500 or config.TESTNET): # Protocol change.
problems.append('zero expiration')
if target_value:
if bet_type in (0,1): # BullCFD, BearCFD
problems.append('CFDs have no target value')
if target_value < 0:
problems.append('negative target value')
if expiration > config.MAX_EXPIRATION:
problems.append('expiration overflow')
return problems, leverage
def compose (db, source, feed_address, bet_type, deadline, wager_quantity,
counterwager_quantity, target_value, leverage, expiration):
if util.get_balance(db, source, config.XCP) < wager_quantity:
raise exceptions.ComposeError('insufficient funds')
problems, leverage = validate(db, source, feed_address, bet_type, deadline, wager_quantity,
counterwager_quantity, target_value, leverage, expiration, util.CURRENT_BLOCK_INDEX)
if util.date_passed(deadline):
problems.append('deadline passed')
if problems: raise exceptions.ComposeError(problems)
data = message_type.pack(ID)
data += struct.pack(FORMAT, bet_type, deadline,
wager_quantity, counterwager_quantity, target_value,
leverage, expiration)
return (source, [(feed_address, None)], data)
def parse (db, tx, message):
bet_parse_cursor = db.cursor()
# Unpack message.
try:
if len(message) != LENGTH:
raise exceptions.UnpackError
(bet_type, deadline, wager_quantity,
counterwager_quantity, target_value, leverage,
expiration) = struct.unpack(FORMAT, message)
status = 'open'
except (exceptions.UnpackError, struct.error):
(bet_type, deadline, wager_quantity,
counterwager_quantity, target_value, leverage,
expiration, fee_fraction_int) = 0, 0, 0, 0, 0, 0, 0, 0
status = 'invalid: could not unpack'
odds, fee_fraction = 0, 0
feed_address = tx['destination']
if status == 'open':
try:
odds = util.price(wager_quantity, counterwager_quantity)
except ZeroDivisionError:
odds = 0
fee_fraction = get_fee_fraction(db, feed_address)
# Overbet
bet_parse_cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (tx['source'], config.XCP))
balances = list(bet_parse_cursor)
if not balances:
wager_quantity = 0
else:
balance = balances[0]['quantity']
if balance < wager_quantity:
wager_quantity = balance
counterwager_quantity = int(util.price(wager_quantity, odds))
problems, leverage = validate(db, tx['source'], feed_address, bet_type, deadline, wager_quantity,
counterwager_quantity, target_value, leverage, expiration, tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
# Debit quantity wagered. (Escrow.)
if status == 'open':
util.debit(db, tx['source'], config.XCP, wager_quantity, action='bet', event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'feed_address': feed_address,
'bet_type': bet_type,
'deadline': deadline,
'wager_quantity': wager_quantity,
'wager_remaining': wager_quantity,
'counterwager_quantity': counterwager_quantity,
'counterwager_remaining': counterwager_quantity,
'target_value': target_value,
'leverage': leverage,
'expiration': expiration,
'expire_index': tx['block_index'] + expiration,
'fee_fraction_int': fee_fraction * 1e8,
'status': status,
}
if "integer overflow" not in status:
sql = 'insert into bets values(:tx_index, :tx_hash, :block_index, :source, :feed_address, :bet_type, :deadline, :wager_quantity, :wager_remaining, | |
<reponame>zepheira/versa
#versa.pipeline.other_actions
import itertools
from amara3 import iri
from versa import I, VERSA_BASEIRI, ORIGIN, RELATIONSHIP, TARGET, ATTRIBUTES, VTYPE_REL
#from versa.terms import VFPRINT_REL
#from versa import util
#from versa.util import simple_lookup
from . import context, materialize_entity, create_resource, is_pipeline_action
__all__ = [ 'var', 'extra', 'attr', 'origin', 'rel', 'target', 'values',
'ifexists', 'if_', 'foreach', 'follow', 'toiri', 'lookup',
'regex_match_modify', 'compose', 'ignore', 'replace_from',
'action_template', 'contains', 'SKIP'
]
SKIP = object()
# DEFAULT_ARG = object()
def var(name):
'''
Action function generator to retrieve a variable from context
'''
def _var(ctx):
_name = name(ctx) if is_pipeline_action(name) else name
return ctx.variables.get(_name)
_var.is_pipeline_action = True
return _var
def extra(key, default=None):
'''
Action function generator to retrieve an extra value from context
'''
def _extra(ctx):
_key = key(ctx) if is_pipeline_action(key) else key
_default = default(ctx) if is_pipeline_action(default) else default
return ctx.extras.get(_key, _default)
_extra.is_pipeline_action = True
return _extra
def attr(aid):
'''
Action function generator to retrieve an attribute from the current link
'''
def _attr(ctx):
_aid = aid(ctx) if is_pipeline_action(aid) else aid
return ctx.current_link[ATTRIBUTES].get(_aid)
_attr.is_pipeline_action = True
return _attr
def contains(l, val):
'''
Action function generator to check that a list contains a value
'''
def _contains(ctx):
_l = l(ctx) if is_pipeline_action(l) else l
vlist = val if isinstance(val, list) else [val]
for v in vlist:
if v in _l:
return True
else:
return False
_contains.is_pipeline_action = True
return _contains
def origin(fprint=None):
'''
Action function generator to return the origin of the context's current link
Arguments:
fprint - Used to derive a unique hash key input for the materialized resource,
May be a list of key, value pairs, from which the ID is derived through
the Versa hash convention, or may be an action function that returns the ID
If a list of key, value pairs, the key of the first value must be the Versa type relationship
And the first value is used in the hash generation
Returns:
origin of the context's current link, or origin computed from provided unique arg
'''
def _origin(ctx):
'''
Versa action function Utility to return the origin of the context's current link
:param ctx: Versa context used in processing (e.g. includes the prototype link
:return: origin of the context's current link
'''
o = ctx.current_link[ORIGIN]
if is_pipeline_action(fprint):
o = fprint(ctx)
elif fprint:
# strip None values from computed unique list, including pairs where v is None
typ = None
computed_fprint = []
for k, v in fprint:
if typ is None:
if k != VTYPE_REL:
raise ValueError('Key of the first unique list pair must be the Versa type relationship')
typ = v
if None in (k, v): continue
v = v if isinstance(v, list) else [v]
for subitem in v:
subval = subitem(ctx) if is_pipeline_action(subitem) else subitem
if subval:
subval = subval if isinstance(subval, list) else [subval]
computed_fprint.extend([(k, s) for s in subval])
o = materialize_entity(ctx, typ, fprint=computed_fprint)
# print(o, ctx.extras)
return o
_origin.is_pipeline_action = True
return _origin
def rel():
'''
Action function generator to return the relationship of the context's current link
:return: origin of the context's current link
'''
def _rel(ctx):
'''
Versa action function Utility to return the relationship of the context's current link
:param ctx: Versa context used in processing (e.g. includes the prototype link
:return: relationship of the context's current link
'''
return ctx.current_link[RELATIONSHIP]
_rel.is_pipeline_action = True
return _rel
def target():
'''
Action function generator to return the target of the context's current link
:return: target of the context's current link
'''
def _target(ctx):
'''
Versa action function Utility to return the target of the context's current link
:param ctx: Versa context used in processing (e.g. includes the prototype link
:return: Target of the context's current link
'''
return ctx.current_link[TARGET]
_target.is_pipeline_action = True
return _target
def values(*rels):
'''
Action function generator to compute a set of relationships from criteria
:param rels: List of relationships to compute
:return: Versa action function to do the actual work
'''
#Action function generator to multiplex a relationship at processing time
def _values(ctx):
'''
Versa action function Utility to specify a list of relationships
:param ctx: Versa context used in processing (e.g. includes the prototype link
:return: Tuple of key/value tuples from the attributes; suitable for hashing
'''
computed_rels = []
for rel in rels:
if is_pipeline_action(rel):
rel = rel(ctx)
if isinstance(rel, list):
computed_rels.extend(rel)
else:
computed_rels.append(rel)
return computed_rels
_values.is_pipeline_action = True
return _values
def ifexists(test, value, alt=None):
'''
Action function generator providing a limited if/then/else type primitive
:param test: Expression to be tested to determine the branch path
:param value: Expression providing the result if test is true
:param alt: Expression providing the result if test is false
:return: Action representing the actual work
'''
def _ifexists(ctx):
'''
Versa action function Utility to specify a list of relationships
:param ctx: Versa context used in processing (e.g. includes the prototype link)
:return: Value computed according to the test expression result
'''
_test = test(ctx) if is_pipeline_action(test) else test
if _test:
return value(ctx) if is_pipeline_action(value) else value
else:
return alt(ctx) if is_pipeline_action(alt) else alt
_ifexists.is_pipeline_action = True
return _ifexists
def if_(test, iftrue, iffalse=None, vars_=None):
'''
Action function generator providing a fuller if/then/else type primitive
:param test: Expression to be tested to determine the branch path
:param iftrue: Expression to be executed (perhaps for side effects) if test is true
:param iffalse: Expression to be executed (perhaps for side effects) if test is false
:param vars: Optional dictionary of variables to be used in computing string test
:return: Action representing the actual work. This function returns the value computed from iftrue if the test computes to true, otherwise iffalse
'''
vars_ = vars_ or {}
def _if_(ctx):
'''
Versa action function utility to execute an if/then/else type primitive
:param ctx: Versa context used in processing (e.g. includes the prototype link)
:return: Value computed according to the test expression result
'''
out_vars = {'target': ctx.current_link[TARGET]}
if isinstance(test, str):
for k, v in vars_.items():
#FIXME: Less crude test
assert isinstance(k, str)
_v = v(ctx) if is_pipeline_action(v) else v
out_vars[k] = _v
_test = eval(test, out_vars, out_vars)
#Test is an expression to be dynamically computed
#for m in ACTION_FUNCTION_PAT.findall(test):
# func_name = m.group(1)
else:
_test = test(ctx) if is_pipeline_action(test) else test
if _test:
return iftrue(ctx) if is_pipeline_action(iftrue) else iftrue
elif iffalse:
return iffalse(ctx) if is_pipeline_action(iffalse) else iffalse
_if_.is_pipeline_action = True
return _if_
# XXX: Top-level Target function is shadowed in here
def foreach(origin=None, rel=None, target=None, attributes=None, action=None):
'''
Action function generator to compute a combination of links
:return: Versa action function to do the actual work
'''
def _foreach(ctx):
'''
Versa action function utility to compute a list of values from a list of expressions
:param ctx: Versa context used in processing (e.g. includes the prototype link)
'''
_origin = origin(ctx) if is_pipeline_action(origin) else origin
_rel = rel(ctx) if is_pipeline_action(rel) else rel
_target = target(ctx) if is_pipeline_action(target) else target
_attributes = attributes(ctx) if is_pipeline_action(attributes) else attributes
(o, r, t, a) = ctx.current_link
o = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin])
r = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel])
t = [t] if _target is None else (_target if isinstance(_target, list) else [_target])
#a = [a] if _attributes is None else _attributes
a = [a] if _attributes is None else (_attributes if isinstance(_attributes, list) else [_attributes])
# print([(curr_o, curr_r, curr_t, curr_a) for (curr_o, curr_r, curr_t, curr_a)
# in product(o, r, t, a)])
# Assemble the possible context links, ignoring those with blank or None origins
subcontexts = [ ctx.copy(current_link=(curr_o, curr_r, curr_t, curr_a))
for (curr_o, curr_r, curr_t, curr_a)
in itertools.product(o, r, t, a) if curr_o ]
if action:
if not(is_pipeline_action(action)):
raise TypeError('foreach() action arg must be callable')
for subctx in subcontexts:
action(subctx)
else:
return subcontexts
#for (curr_o, curr_r, curr_t, curr_a) in product(origin or [o], rel or [r], target or [t], attributes or [a]):
# newctx = ctx.copy(current_link=(curr_o, curr_r, curr_t, curr_a))
#ctx.output_model.add(I(objid), VTYPE_REL, I(iri.absolutize(_typ, ctx.base)), {})
_foreach.is_pipeline_action = True
return _foreach
def follow(*rels, origin=None, action=None):
| |
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1,
file_tsv_1_2,
mad_quality_metric_1_2,
chip_seq_quality_metric,
chipseq_filter_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam):
testapp.patch_json(chip_seq_quality_metric['@id'], {'quality_metric_of': [file_bam_1_1['@id']],
'processing_stage': 'unfiltered',
'total': 10000000,
'mapped': 10000000,
'read1': 100, 'read2': 100})
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_fastq_4['@id'], {'read_length': 100})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'ChIP-seq read mapping'})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_2['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_2['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(biosample_2['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(base_experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'ChIP-seq'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'missing target' for error in collect_audit_errors(res))
def test_audit_experiment_chip_seq_library_complexity_standards(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
library_1,
library_2,
biosample_1,
biosample_2,
mouse_donor_1,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1,
file_tsv_1_2,
mad_quality_metric_1_2,
chip_seq_quality_metric,
chipseq_filter_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam,
target_H3K9me3):
testapp.patch_json(chip_seq_quality_metric['@id'], {'quality_metric_of': [file_bam_1_1['@id']],
'processing_stage': 'unfiltered',
'total': 10000000,
'mapped': 10000000,
'read1': 100, 'read2': 100})
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_fastq_4['@id'], {'read_length': 100})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'ChIP-seq read mapping'})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_2['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_2['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(biosample_2['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(base_experiment['@id'], {'target': target_H3K9me3['@id'],
'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'ChIP-seq'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'severe bottlenecking' for error in collect_audit_errors(res))
def test_audit_experiment_dnase_low_spot_score(testapp,
base_experiment,
replicate_1_1,
library_1,
biosample_1,
mouse_donor_1,
file_fastq_3,
file_bam_1_1,
mad_quality_metric_1_2,
hotspot_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
file_tsv_1_1,
pipeline_bam):
testapp.patch_json(file_tsv_1_1['@id'], {'output_type': 'hotspots'})
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'output_type': 'alignments',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'DNase-HS pipeline single-end - Version 2'})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(base_experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'DNase-seq'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'low spot score' for error in collect_audit_errors(res))
def test_audit_experiment_dnase_seq_low_read_depth(testapp,
base_experiment,
replicate_1_1,
library_1,
biosample_1,
mouse_donor_1,
file_fastq_3,
file_bam_1_1,
mad_quality_metric_1_2,
chip_seq_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam):
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'output_type': 'alignments',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'DNase-HS pipeline single-end - Version 2'})
testapp.patch_json(chip_seq_quality_metric['@id'], {'mapped': 23})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(base_experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'DNase-seq'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'extremely low read depth' for error in collect_audit_errors(res))
def test_audit_experiment_wgbs_coverage(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
library_1,
library_2,
biosample_1,
biosample_2,
mouse_donor_1,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1,
file_bed_methyl,
chip_seq_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam):
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'WGBS paired-end pipeline'})
testapp.patch_json(chip_seq_quality_metric['@id'], {'quality_metric_of': [file_bed_methyl['@id']],
'processing_stage': 'filtered',
'total': 30000000,
'mapped': 30000000,
'read1': 100, 'read2': 100})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_2['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_2['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(biosample_2['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(base_experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'whole-genome shotgun bisulfite sequencing'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'extremely low coverage' for error in collect_audit_errors(res))
def test_audit_experiment_dnase_low_read_length(testapp,
base_experiment,
replicate_1_1,
library_1,
biosample_1,
mouse_donor_1,
file_fastq_3,
file_bam_1_1,
mad_quality_metric_1_2,
chip_seq_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam):
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'output_type': 'alignments',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'DNase-HS pipeline single-end - Version 2'})
testapp.patch_json(chip_seq_quality_metric['@id'], {'mapped': 23})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(base_experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'DNase-seq'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'insufficient read length' for error in collect_audit_errors(res))
def test_audit_experiment_dnase_low_correlation(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
library_1,
library_2,
biosample_1,
mouse_donor_1,
file_fastq_3,
bigWig_file,
file_bam_1_1,
correlation_quality_metric,
chip_seq_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam):
testapp.patch_json(bigWig_file['@id'], {'dataset': base_experiment['@id']})
testapp.patch_json(
correlation_quality_metric['@id'], {'quality_metric_of': [bigWig_file['@id']],
'Pearson correlation': 0.15})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'output_type': 'alignments',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'DNase-HS pipeline single-end - Version 2'})
testapp.patch_json(chip_seq_quality_metric['@id'], {'mapped': 23})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(base_experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'DNase-seq'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'insufficient replicate concordance' for error in collect_audit_errors(res))
# duplication rate audit was removed from v54
def test_audit_experiment_dnase_seq_missing_read_depth(testapp,
base_experiment,
replicate_1_1,
library_1,
biosample_1,
mouse_donor_1,
file_fastq_3,
file_bam_1_1,
mad_quality_metric_1_2,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam):
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'output_type': 'alignments',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'DNase-HS pipeline single-end - Version 2'})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(base_experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'DNase-seq'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'missing read depth' for error in collect_audit_errors(res))
def test_audit_experiment_chip_seq_unfiltered_missing_read_depth(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
library_1,
library_2,
biosample_1,
biosample_2,
mouse_donor_1,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1,
file_tsv_1_2,
mad_quality_metric_1_2,
chip_seq_quality_metric,
chipseq_filter_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam,
target_H3K9me3):
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_fastq_4['@id'], {'read_length': 100})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'output_type': 'unfiltered alignments',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'output_type': 'unfiltered alignments',
'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'ChIP-seq read mapping'})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_2['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_2['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(biosample_2['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(base_experiment['@id'], {'target': target_H3K9me3['@id'],
'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'ChIP-seq'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert all(error['category'] !=
'missing read depth' for error in collect_audit_errors(res))
def test_audit_experiment_out_of_date_analysis_added_fastq(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1):
testapp.patch_json(base_experiment['@id'], {'assay_term_name': 'ChIP-seq'})
testapp.patch_json(file_fastq_4['@id'], {'replicate': replicate_1_1['@id']})
testapp.patch_json(file_bam_1_1['@id'], {'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'derived_from': [file_fastq_3['@id']]})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'out of date analysis' for error in collect_audit_errors(res))
def test_audit_experiment_out_of_date_analysis_removed_fastq(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1):
testapp.patch_json(base_experiment['@id'], {'assay_term_name': 'ChIP-seq'})
testapp.patch_json(file_bam_1_1['@id'], {'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(file_fastq_3['@id'], {'status': 'deleted'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] == 'out of date analysis' for error in collect_audit_errors(res))
def test_audit_experiment_not_out_of_date_analysis_DNase(testapp,
base_experiment,
replicate_1_1,
replicate_1_2,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1):
testapp.patch_json(base_experiment['@id'], {'assay_term_name': 'DNase-seq'})
testapp.patch_json(file_bam_1_1['@id'], {'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(file_fastq_3['@id'], {'replicate': replicate_1_1['@id']})
testapp.patch_json(file_fastq_4['@id'], {'replicate': replicate_1_2['@id']})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert all(error['category'] != 'out of date analysis' for error in collect_audit_errors(res))
def test_audit_experiment_out_of_date_analysis_DNase(testapp,
base_experiment,
replicate_1_1,
replicate_1_2,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1):
testapp.patch_json(base_experiment['@id'], {'assay_term_name': 'DNase-seq'})
testapp.patch_json(file_bam_1_1['@id'], {'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(file_fastq_3['@id'], {'replicate': replicate_1_1['@id'],
'status': 'deleted'})
testapp.patch_json(file_fastq_4['@id'], {'replicate': replicate_1_2['@id']})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] == 'out of date analysis' for error in collect_audit_errors(res))
def test_audit_experiment_no_out_of_date_analysis(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1):
testapp.patch_json(file_bam_1_1['@id'], {'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'derived_from': [file_fastq_4['@id']]})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert all(error['category'] !=
'out of date analysis' for error in collect_audit_errors(res))
# def test_audit_experiment_modERN_control_missing_files() removed from v54
# def test_audit_experiment_modERN_experiment_missing_files() removed from v54
def test_audit_experiment_wgbs_standards(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
library_1,
library_2,
biosample_1,
biosample_2,
mouse_donor_1,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1,
file_tsv_1_2,
file_bed_methyl,
wgbs_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam,
target_H3K9me3):
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_fastq_4['@id'], {'read_length': 100})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'WGBS paired-end pipeline'})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_2['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_2['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(biosample_2['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(base_experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'whole-genome shotgun bisulfite sequencing'})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'high lambda C methylation ratio' for error in collect_audit_errors(res))
def test_audit_experiment_modern_chip_seq_standards(testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
library_1,
library_2,
biosample_1,
biosample_2,
mouse_donor_1,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1,
file_tsv_1_2,
mad_quality_metric_1_2,
chip_seq_quality_metric,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam,
target,
award_modERN):
testapp.patch_json(chip_seq_quality_metric['@id'], {'quality_metric_of': [file_bam_1_1['@id']],
'processing_stage': 'filtered',
'total': 100000,
'mapped': 100000,
'read1': 100, 'read2': 100})
testapp.patch_json(file_fastq_3['@id'], {'read_length': 100})
testapp.patch_json(file_fastq_4['@id'], {'read_length': 100})
testapp.patch_json(file_bam_1_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_3['@id']]})
testapp.patch_json(file_bam_2_1['@id'], {'step_run': analysis_step_run_bam['@id'],
'assembly': 'mm10',
'derived_from': [file_fastq_4['@id']]})
testapp.patch_json(pipeline_bam['@id'], {'title':
'Transcription factor ChIP-seq pipeline (modERN)'})
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_2['@id'], {'donor': mouse_donor_1['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_2['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(biosample_2['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(base_experiment['@id'], {'target': target['@id'],
'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'ChIP-seq',
'award': award_modERN['@id']})
res = testapp.get(base_experiment['@id'] + '@@index-data')
assert any(error['category'] ==
'insufficient read depth' for error in collect_audit_errors(res))
def test_audit_experiment_missing_genetic_modification(
testapp,
base_experiment,
recombinant_target,
replicate_1_1,
replicate_2_1,
library_1,
library_2,
biosample_1,
biosample_2,
donor_1,
donor_2):
testapp.patch_json(biosample_1['@id'], {'biosample_term_name': 'K562',
'biosample_term_id': 'EFO:0002067',
'biosample_type': 'cell line',
'donor': donor_1['@id']})
testapp.patch_json(biosample_2['@id'], {'biosample_term_name': 'K562',
'biosample_term_id': 'EFO:0002067',
'biosample_type': 'cell line',
'donor': donor_2['@id']})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(base_experiment['@id'], {'assay_term_name': 'ChIP-seq',
'target': | |
<filename>cogs/misc.py
import datetime
import asyncio
import strawpy
import random
import re
import requests
from PythonGists import PythonGists
from appuselfbot import bot_prefix
from discord.ext import commands
from discord import utils
from cogs.utils.checks import *
from bs4 import BeautifulSoup
from pyfiglet import figlet_format
from urllib.request import Request, urlopen
'''Module for miscellaneous commands'''
class Misc:
def __init__(self, bot):
self.bot = bot
self.regionals = {'a': '\N{REGIONAL INDICATOR SYMBOL LETTER A}', 'b': '\N{REGIONAL INDICATOR SYMBOL LETTER B}', 'c': '\N{REGIONAL INDICATOR SYMBOL LETTER C}',
'd': '\N{REGIONAL INDICATOR SYMBOL LETTER D}', 'e': '\N{REGIONAL INDICATOR SYMBOL LETTER E}', 'f': '\N{REGIONAL INDICATOR SYMBOL LETTER F}',
'g': '\N{REGIONAL INDICATOR SYMBOL LETTER G}', 'h': '\N{REGIONAL INDICATOR SYMBOL LETTER H}', 'i': '\N{REGIONAL INDICATOR SYMBOL LETTER I}',
'j': '\N{REGIONAL INDICATOR SYMBOL LETTER J}', 'k': '\N{REGIONAL INDICATOR SYMBOL LETTER K}', 'l': '\N{REGIONAL INDICATOR SYMBOL LETTER L}',
'm': '\N{REGIONAL INDICATOR SYMBOL LETTER M}', 'n': '\N{REGIONAL INDICATOR SYMBOL LETTER N}', 'o': '\N{REGIONAL INDICATOR SYMBOL LETTER O}',
'p': '\N{REGIONAL INDICATOR SYMBOL LETTER P}', 'q': '\N{REGIONAL INDICATOR SYMBOL LETTER Q}', 'r': '\N{REGIONAL INDICATOR SYMBOL LETTER R}',
's': '\N{REGIONAL INDICATOR SYMBOL LETTER S}', 't': '\N{REGIONAL INDICATOR SYMBOL LETTER T}', 'u': '\N{REGIONAL INDICATOR SYMBOL LETTER U}',
'v': '\N{REGIONAL INDICATOR SYMBOL LETTER V}', 'w': '\N{REGIONAL INDICATOR SYMBOL LETTER W}', 'x': '\N{REGIONAL INDICATOR SYMBOL LETTER X}',
'y': '\N{REGIONAL INDICATOR SYMBOL LETTER Y}', 'z': '\N{REGIONAL INDICATOR SYMBOL LETTER Z}', '0': '0⃣', '1': '1⃣', '2': '2⃣', '3': '3⃣',
'4': '4⃣', '5': '5⃣', '6': '6⃣', '7': '7⃣', '8': '8⃣', '9': '9⃣', '!': '\u2757', '?': '\u2753'}
self.emoji_reg = re.compile(r'<:.+?:([0-9]{15,21})>')
self.ball = ['It is certain', 'It is decidedly so', 'Without a doubt', 'Yes definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes', 'Signs point to yes', 'Reply hazy try again',
'Ask again later', 'Better not tell you now', 'Cannot predict now', 'Concentrate and ask again', 'Don\'t count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful']
emoji_dict = { # these arrays are in order of "most desirable". Put emojis that most convincingly correspond to their letter near the front of each array.
'a' : ['🇦','🅰','🍙','🔼','4⃣'],
'b' : ['🇧','🅱','8⃣'],
'c' : ['🇨','©','🗜'],
'd' : ['🇩','↩'],
'e' : ['🇪','3⃣','📧','💶'],
'f' : ['🇫','🎏'],
'g' : ['🇬','🗜','6⃣','9⃣','⛽'],
'h' : ['🇭','♓'],
'i' : ['🇮','ℹ','🚹','1⃣'],
'j' : ['🇯','🗾'],
'k' : ['🇰','🎋'],
'l' : ['🇱','1⃣','🇮','👢','💷'],
'm' : ['🇲','Ⓜ','📉'],
'n' : ['🇳','♑','🎵'],
'o' : ['🇴','🅾','0⃣','⭕','🔘','⏺','⚪','⚫','🔵','🔴','💫'],
'p' : ['🇵','🅿'],
'q' : ['🇶','♌'],
'r' : ['🇷','®'],
's' : ['🇸','💲','5⃣','⚡','💰','💵'],
't' : ['🇹', '✝','➕','🎚','🌴','7⃣'],
'u' : ['🇺','⛎','🐉'],
'v' : ['🇻','♈','☑'],
'w' : ['🇼','〰','📈'],
'x' : ['🇽','❎','✖','❌','⚒'],
'y' : ['🇾','✌','💴'],
'z' : ['🇿','2⃣'],
'0' : ['0⃣','🅾','0⃣','⭕','🔘','⏺','⚪','⚫','🔵','🔴','💫'],
'1' : ['1⃣','🇮'],
'2' : ['2⃣','🇿'],
'3' : ['3⃣'],
'4' : ['4⃣'],
'5' : ['5⃣','🇸','💲','⚡'],
'6' : ['6⃣'],
'7' : ['7⃣'],
'8' : ['8⃣','🎱','🇧','🅱'],
'9' : ['9⃣'],
'?' : ['❓'],
'!' : ['❗','❕','⚠','❣'],
#emojis that contain more than one letter can also help us react
#letters that we are trying to replace go in front, emoji to use second
#
#if there is any overlap between characters that could be replaced,
#e.g. 💯 vs 🔟, both could replace "10",
#the longest ones & most desirable ones should go at the top
#else you'll have "100" -> "🔟0" instead of "100" -> "💯".
'combination' : [['cool','🆒'],
['back','🔙'],
['soon','🔜'],
['free','🆓'],
['end','🔚'],
['top','🔝'],
['abc','🔤'],
['atm','🏧'],
['new','🆕'],
['sos','🆘'],
['100','💯'],
['loo','💯'],
['zzz','💤'],
['...','💬'],
['ng','🆖'],
['id','🆔'],
['vs','🆚'],
['wc','🚾'],
['ab','🆎'],
['cl','🆑'],
['ok','🆗'],
['up','🆙'],
['10','🔟'],
['11','⏸'],
['ll','⏸'],
['ii','⏸'],
['tm','™'],
['on','🔛'],
['oo','🈁'],
['!?','⁉'],
['!!','‼'],
['21','📅'],
]
}
# used in >react, checks if it's possible to react with the duper string or not
def has_dupe(duper):
collect_my_duper = list(filter(lambda x : x != '<' and x != '⃣', duper)) # remove < because those are used to denote a written out emoji, and there might be more than one of those requested that are not necessarily the same one. ⃣ appears twice in the number unicode thing, so that must be stripped too...
return len(set(collect_my_duper)) != len(collect_my_duper)
# used in >react, replaces e.g. 'ng' with '🆖'
def replace_combos(react_me):
for combo in Misc.emoji_dict['combination']:
if combo[0] in react_me:
react_me = react_me.replace(combo[0],combo[1],1)
return react_me
# used in >react, replaces e.g. 'aaaa' with '🇦🅰🍙🔼'
def replace_letters(react_me):
for char in "abcdefghijklmnopqrstuvwxyz0123456789!?":
char_count = react_me.count(char)
if char_count > 1: # there's a duplicate of this letter:
if len(Misc.emoji_dict[char]) >= char_count: # if we have enough different ways to say the letter to complete the emoji chain
i = 0
while i < char_count: # moving goal post necessitates while loop instead of for
if Misc.emoji_dict[char][i] not in react_me:
react_me = react_me.replace(char, Misc.emoji_dict[char][i],1)
else:
char_count += 1 # skip this one because it's already been used by another replacement (e.g. circle emoji used to replace O already, then want to replace 0)
i += 1
else:
if char_count == 1:
react_me = react_me.replace(char, Misc.emoji_dict[char][0])
return react_me
@commands.command(pass_context=True)
async def about(self, ctx, txt: str = None):
"""Links to the bot's github page."""
if embed_perms(ctx.message) and txt != 'short':
em = discord.Embed(color=0xad2929, title='\ud83e\udd16 Appu\'s Discord Selfbot', description='**Features:**\n- Custom commands/reactions\n- Save last x images in a channel to your computer\n- Keyword notifier\n'
'- Set/cycle your game status and your avatar\n- Google web and image search\n- MyAnimeList search\n- Spoiler tagging\n'
'- Server info commands\n- Quoting, calculator, creating polls, and much more')
em.add_field(name='\ud83d\udd17 Link to download', value='[Github link](https://github.com/appu1232/Discord-Selfbot/tree/master)')
em.add_field(name='\ud83c\udfa5Quick examples:', value='[Simple commands](http://i.imgur.com/3H9zpop.gif)')
em.set_footer(text='Made by appu1232#2569', icon_url='https://i.imgur.com/RHagTDg.png')
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
else:
await self.bot.send_message(ctx.message.channel, 'https://github.com/appu1232/Selfbot-for-Discord')
await self.bot.delete_message(ctx.message)
@commands.group(aliases=['status'], pass_context=True)
async def stats(self, ctx):
"""Bot stats."""
uptime = (datetime.datetime.now() - self.bot.uptime)
hours, rem = divmod(int(uptime.total_seconds()), 3600)
minutes, seconds = divmod(rem, 60)
days, hours = divmod(hours, 24)
if days:
time = '%s days, %s hours, %s minutes, and %s seconds' % (days, hours, minutes, seconds)
else:
time = '%s hours, %s minutes, and %s seconds' % (hours, minutes, seconds)
try:
game = self.bot.game
except:
game = 'None'
channel_count = 0
for i in self.bot.servers:
channel_count += len(i.channels)
if embed_perms(ctx.message):
em = discord.Embed(title='Bot Stats', color=0x32441c)
em.add_field(name=u'\U0001F553 Uptime', value=time, inline=False)
em.add_field(name=u'\U0001F4E4 Messages sent', value=str(self.bot.icount))
em.add_field(name=u'\U0001F4E5 Messages recieved', value=str(self.bot.message_count))
em.add_field(name=u'\u2757 Mentions', value=str(self.bot.mention_count))
em.add_field(name=u'\u2694 Servers', value=str(len(self.bot.servers)))
em.add_field(name=u'\ud83d\udcd1 Channels', value=str(channel_count))
em.add_field(name=u'\u270F Keywords logged', value=str(self.bot.keyword_log))
em.add_field(name=u'\U0001F3AE Game', value=game)
mem_usage = '{:.2f} MiB'.format(__import__('psutil').Process().memory_full_info().uss / 1024**2)
em.add_field(name=u'\U0001F4BE Memory usage:', value=mem_usage)
try:
g = git.cmd.Git(working_dir=os.getcwd())
g.execute(["git", "fetch", "origin", "master"])
version = g.execute(["git", "rev-list", "--right-only", "--count", "master...origin/master"])
commits = g.execute(["git", "rev-list", "--max-count=%s" % version, "origin/master"])
if version == '0':
status = 'Up to date.'
else:
latest = g.execute(["git", "log", "--pretty=oneline", "--abbrev-commit", "--stat", "--pretty", "-%s" % version, "origin/master"])
gist_latest = PythonGists.Gist(description='Latest changes for the selfbot.', content=latest, name='latest.txt')
if version == '1':
status = 'Behind by 1 release. [Latest update.](%s)' % gist_latest
else:
status = '%s releases behind. [Latest updates.](%s)' % (version, gist_latest)
em.add_field(name=u'\U0001f4bb Update status:', value=status)
except:
pass
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
else:
msg = '**Bot Stats:** ```Uptime: %s\nMessages Sent: %s\nMessages Recieved: %s\nMentions: %s\nServers: %s\nKeywords logged: %s\nGame: %s```' % (time, str(self.bot.icount), str(self.bot.message_count), str(self.bot.mention_count), str(len(self.bot.servers)), str(self.bot.keyword_log), game)
await self.bot.send_message(ctx.message.channel, bot_prefix + msg)
await self.bot.delete_message(ctx.message)
# 8ball
@commands.command(pass_context=True, aliases=['8ball'])
async def ball8(self, ctx, *, msg: str):
"""Let the 8ball decide your fate. Ex: >8ball Will I get good?"""
answer = random.randint(0, 19)
if embed_perms(ctx.message):
if answer < 10:
color = 0x008000
elif 10 <= answer < 15:
color = 0xFFD700
else:
color = 0xFF0000
em = discord.Embed(color=color)
em.add_field(name='\u2753 Question', value=msg)
em.add_field(name='\ud83c\udfb1 8ball', value=self.ball[answer], inline=False)
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
await self.bot.delete_message(ctx.message)
else:
await self.bot.send_message(ctx.message.channel, '\ud83c\udfb1 ``{}``'.format(random.choice(self.ball)))
# Embeds the message
@commands.command(pass_context=True)
async def embed(self, ctx, *, msg: str = None):
"""Embed given text. Ex: Do >embed for more help
Example: >embed title=test this | description=some words | color=3AB35E | field=name=test value=test
You do NOT need to specify every property, only the ones you want.
**All properties and the syntax (put your custom stuff in place of the <> stuff):
- title=<words>
- description=<words>
- color=<hex_value>
- image=<url_to_image> (must be https)
- thumbnail=<url_to_image>
- author=<words> **OR** author=name=<words> icon=<url_to_image>
- footer=<words> **OR** footer=name=<words> icon=<url_to_image>
- field=name=<words> value=<words> (you can add as many fields as you want)
- ptext=<words>
| |
<gh_stars>1-10
# coding: utf-8
# Consider the data in the files a100.csv, b100.csv, s057.csv. Try to determine the
# underlying probability distributions of each data set.
# In[3]:
# Using pandas library for CSV reading and table manipulation
import pandas
import matplotlib.pyplot as plt
# In[293]:
# Reading a100.csv dataset from workspace folder and storing into variable a100
a100 = pandas.read_csv('/home/idies/workspace/AS.171.205/data/a100.csv', header=None)
b100 = pandas.read_csv('/home/idies/workspace/AS.171.205/data/b100.csv', header=None)
s057 = pandas.read_csv('/home/idies/workspace/AS.171.205/data/s057.csv', header=None)
# In[294]:
# Quick data exploration of 100, will print top 10 rows
a100.head(10)
# In[295]:
# Summary of numerical fields of all 100 rows
a100.describe()
# In[305]:
# A raw histogram of a100.csv using the default matplotlib histogram method, at 75 bins for clearer distribution
a100.hist(bins=25)
# A raw histogram of b100.csv using the default matplotlib histogram method, at 100 bins for clearer distribution
b100.hist(bins=100)
# A raw histogram of s057.csv using the default matplotlib histogram method, at 15 bins for clearer distribution
s057.hist(bins=15)
# In[338]:
# A raw histogram of the default iPython histogram method, at 15 bins for clearer distribution
# Stepfilled, and axes labeled
plt.figure(figsize=(12,8))
a100.hist(bins=15,histtype='stepfilled', normed=True, color='r', alpha=.5, label='Log Norm')
plt.title("a100 Normal Distribution Histogram")
plt.xlabel("Value")
plt.ylabel("Probability")
plt.legend()
plt.show()
# In[341]:
# A raw histogram of the default iPython histogram method, at 100 bins for clearer distribution
# Stepfilled, and axes labeled for b100
b100.hist(bins=100, histtype='stepfilled', normed=True, color='r', alpha=.5, label='Gaussian/Normal')
plt.title("b100 (Log Norm) Histogram")
plt.xlabel("Value")
plt.ylabel("Probability")
plt.legend()
plt.show()
# In[344]:
# A raw histogram of the default iPython histogram method, at 15 bins for clearer distribution
# Stepfilled, and axes labeled for s057
s057.hist(bins=15, histtype='stepfilled', normed=True, color='r', alpha=.5, label='Binomial')
plt.title("s057 Binomial Distribution Histogram")
plt.xlabel("Value")
plt.ylabel("Probability")
plt.legend()
plt.show()
# ## I found an interesting set of code from StackOverflow user, "tmthydvnprt"
# http://stackoverflow.com/questions/6620471/fitting-empirical-distribution-to-theoretical-ones-with-scipy-python
#
# With his code, every possible scipy.stats distribution is checked through a loop, and the data is plotted with every line from those distributions. Then, which ever is the best distribution is plotted independently. I have slightly modified his code and ran the three csv files through it, but changed the bin size for each csv to make the graph more readable. The computation time for the loop is slow (takes about 30 seconds), so it will take time to print the graphs.
#
# ## a100.csv
# In[37]:
get_ipython().magic('matplotlib inline')
import warnings
import numpy as np
import pandas as pd
import scipy.stats as st
import statsmodels as sm
import matplotlib
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
matplotlib.rcParams['figure.figsize'] = (16.0, 12.0)
matplotlib.style.use('ggplot')
# Create models from data
def best_fit_distribution(data, bins=75, ax=None):
"""Model data by finding best fit distribution to data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, normed=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [
st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine,
st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk,
st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon,
st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r,
st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss,
st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable,
st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf,
st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal,
st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda,
st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy
]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in DISTRIBUTIONS:
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if axis pass in add to plot
try:
if ax:
pd.Series(pdf, x).plot(ax=ax)
end
except Exception:
pass
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse
except Exception:
pass
return (best_distribution.name, best_params)
def make_pdf(dist, params, size=10000):
"""Generate distributions's Propbability Distribution Function """
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Get sane start and end points of distribution
start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)
end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)
# Build PDF and turn into pandas Series
x = np.linspace(start, end, size)
y = dist.pdf(x, loc=loc, scale=scale, *arg)
pdf = pd.Series(y, x)
return pdf
# Load data from statsmodels datasets
data = pd.read_csv('/home/idies/workspace/AS.171.205/data/a100.csv')
# Plot for comparison
plt.figure(figsize=(12,8))
ax = data.plot(kind='hist', bins=75, normed=True, alpha=0.5, color=plt.rcParams['axes.color_cycle'][1])
# Save plot limits
dataYLim = ax.get_ylim()
# Find best fit distribution
best_fit_name, best_fir_paramms = best_fit_distribution(data, 200, ax)
best_dist = getattr(st, best_fit_name)
# Update plots
ax.set_ylim(dataYLim)
ax.set_title(u'a100.csv.\n All Fitted Distributions')
ax.set_xlabel(u'Value')
ax.set_ylabel('Probability')
# Make PDF
pdf = make_pdf(best_dist, best_fir_paramms)
# Display
plt.figure(figsize=(12,8))
ax = pdf.plot(lw=2, label='PDF', legend=True)
data.plot(kind='hist', bins=100, normed=True, alpha=0.5, label='Data', legend=True, ax=ax)
param_names = (best_dist.shapes + ', loc, scale').split(', ') if best_dist.shapes else ['loc', 'scale']
param_str = ', '.join(['{}={:0.2f}'.format(k,v) for k,v in zip(param_names, best_fir_paramms)])
dist_str = '{}({})'.format(best_fit_name, param_str)
ax.set_title(u'a100.csv. with best fit distribution \n' + 'Best fit is: ' + dist_str)
ax.set_xlabel(u'Value')
ax.set_ylabel('Probability')
# ## Continued, with b100:
#
# I found an interesting set of code from StackOverflow user, "tmthydvnprt"
# http://stackoverflow.com/questions/6620471/fitting-empirical-distribution-to-theoretical-ones-with-scipy-python
#
# With his code, every possible scipy.stats distribution is checked through a loop, and the data is plotted with every line from those distributions. Then, which ever is the best distribution is plotted independently. I have slightly modified his code and ran the three csv files through it, but changed the bin size for each csv to make the graph more readable. The computation time for the loop is slow (takes about 30 seconds), so it will take time to print the graphs.
#
# ## b100.csv with bins = 50
# In[42]:
get_ipython().magic('matplotlib inline')
import warnings
import numpy as np
import pandas as pd
import scipy.stats as st
import statsmodels as sm
import matplotlib
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
matplotlib.rcParams['figure.figsize'] = (16.0, 12.0)
matplotlib.style.use('ggplot')
# Create models from data
def best_fit_distribution(data, bins=75, ax=None):
"""Model data by finding best fit distribution to data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, normed=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [
st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine,
st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk,
st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon,
st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r,
st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss,
st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable,
st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf,
st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal,
st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda,
st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy
]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in DISTRIBUTIONS:
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if axis pass in add to plot
try:
if ax:
pd.Series(pdf, x).plot(ax=ax)
end
except Exception:
pass
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse
except Exception:
pass
return (best_distribution.name, best_params)
def make_pdf(dist, params, size=10000):
"""Generate distributions's Propbability Distribution Function """
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Get sane start and end points of distribution
start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)
end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)
# Build PDF and turn into pandas Series
x = np.linspace(start, end, size)
y = dist.pdf(x, loc=loc, scale=scale, *arg)
pdf = pd.Series(y, x)
return pdf
# Load data from statsmodels datasets
data = pd.read_csv('/home/idies/workspace/AS.171.205/data/b100.csv')
# Plot for comparison
plt.figure(figsize=(12,8))
ax = data.plot(kind='hist', bins=75, normed=True, alpha=0.5, color=plt.rcParams['axes.color_cycle'][1])
# Save plot limits
dataYLim = ax.get_ylim()
# Find best fit distribution
best_fit_name, best_fir_paramms = best_fit_distribution(data, 200, ax)
best_dist = getattr(st, best_fit_name)
# Update plots
ax.set_ylim(dataYLim)
ax.set_title(u'b100.csv.\n All Fitted Distributions')
ax.set_xlabel(u'Value')
ax.set_ylabel('Probability')
# Make PDF
pdf = make_pdf(best_dist, best_fir_paramms)
# Display
plt.figure(figsize=(12,8))
ax = pdf.plot(lw=2, label='PDF', legend=True)
data.plot(kind='hist', bins=50, normed=True, alpha=0.5, label='Data', legend=True, ax=ax)
param_names = (best_dist.shapes + ', loc, scale').split(', ') if best_dist.shapes else ['loc', 'scale']
param_str = ', '.join(['{}={:0.2f}'.format(k,v) for k,v in zip(param_names, best_fir_paramms)])
dist_str = '{}({})'.format(best_fit_name, param_str)
ax.set_title(u'b100.csv. with best fit distribution \n' + 'Best fit is: ' + dist_str)
ax.set_xlabel(u'Value')
ax.set_ylabel('Probability')
# ## Continued, with s057.csv:
#
# I found an interesting set of code from StackOverflow user, "tmthydvnprt"
# http://stackoverflow.com/questions/6620471/fitting-empirical-distribution-to-theoretical-ones-with-scipy-python
#
# With his code, every possible scipy.stats distribution is checked through a loop, and the data is plotted with every line from those distributions. Then, which ever is the best distribution is plotted independently. I have slightly modified his code and ran the three csv files through it, but changed the bin size for each csv to make the graph more readable. The computation time for the loop is slow (takes about 30 seconds), so it will take time | |
2857: SChildCategory(b=array([24]), m=array([200]), lbl='프린터 공유기'),
2858: SChildCategory(b=array([14]), m=array([290]), lbl='암워머'),
2859: SChildCategory(b=array([54]), m=array([502]), lbl='미용실'),
2860: SChildCategory(b=array([1]), m=array([1]), lbl='보드게임용품'),
2861: SChildCategory(b=array([34]), m=array([332]), lbl='차량용 접지'),
2862: SChildCategory(b=array([11]), m=array([493]), lbl='인테리어 몰딩'),
2863: SChildCategory(b=array([56]), m=array([510]), lbl='중국(상해)'),
2864: SChildCategory(b=array([27]), m=array([370]), lbl='생크림/휘핑크림'),
2865: SChildCategory(b=array([51]), m=array([449]), lbl='낙지'),
2866: SChildCategory(b=array([42]), m=array([299]), lbl='냉장고 소모품'),
2867: SChildCategory(b=array([11]), m=array([493]), lbl='기타 건축자재'),
2868: SChildCategory(b=array([56]), m=array([504]), lbl='캐나다(기타)'),
2869: SChildCategory(b=array([20]), m=array([348]), lbl='유아동 귀걸이'),
2870: SChildCategory(b=array([12]), m=array([471]), lbl='바다루어대'),
2871: SChildCategory(b=array([1]), m=array([498]), lbl='젬베'),
2872: SChildCategory(b=array([44]), m=array([437]), lbl='모터사이클 오일'),
2873: SChildCategory(b=array([12]), m=array([479]), lbl='바다낚시세트'),
2874: SChildCategory(b=array([34]), m=array([409]), lbl='차량용 재떨이'),
2875: SChildCategory(b=array([34]), m=array([247]), lbl='차량용 가습기'),
2876: SChildCategory(b=array([27]), m=array([284]), lbl='짜장'),
2877: SChildCategory(b=array([8]), m=array([80]), lbl='마이크로화이바이불솜'),
2878: SChildCategory(b=array([10]), m=array([457]), lbl='수의/장례용품'),
2879: SChildCategory(b=array([10]), m=array([457]), lbl='휠체어'),
2880: SChildCategory(b=array([39]), m=array([372]), lbl='CD/DVD 복사기'),
2881: SChildCategory(b=array([10]), m=array([9]), lbl='기타 건강측정용품'),
2882: SChildCategory(b=array([56]), m=array([504]), lbl='멕시코'),
2883: SChildCategory(b=array([34]), m=array([483]), lbl='시동경보기'),
2884: SChildCategory(b=array([22]), m=array([341]), lbl='백일/돌잔치 용품'),
2885: SChildCategory(b=array([1]), m=array([426]), lbl='메트로놈'),
2886: SChildCategory(b=array([34]), m=array([332]), lbl='차량용 배선'),
2887: SChildCategory(b=array([27]), m=array([284]), lbl='간편 도시락'),
2888: SChildCategory(b=array([12]), m=array([482]), lbl='민물낚시바늘'),
2889: SChildCategory(b=array([16]), m=array([305]), lbl='상커버/부속품'),
2890: SChildCategory(b=array([26]), m=array([158]), lbl='연수기'),
2891: SChildCategory(b=array([44]), m=array([117]), lbl='모터사이클 신발'),
2892: SChildCategory(b=array([51]), m=array([438]), lbl='삼치'),
2893: SChildCategory(b=array([36]), m=array([135]), lbl='스텝스툴'),
2894: SChildCategory(b=array([56]), m=array([510]), lbl='중국(기타)'),
2895: SChildCategory(b=array([56]), m=array([503]), lbl='태국(푸켓)'),
2896: SChildCategory(b=array([12]), m=array([534]), lbl='중층찌케이스'),
2897: SChildCategory(b=array([49]), m=array([193]), lbl='태블릿PC 충전기'),
2898: SChildCategory(b=array([8]), m=array([180]), lbl='메모리폼베개'),
2899: SChildCategory(b=array([34]), m=array([262]), lbl='선루프'),
2900: SChildCategory(b=array([40]), m=array([125]), lbl='빨래삶통'),
2901: SChildCategory(b=array([56]), m=array([495]), lbl='오스트리아'),
2902: SChildCategory(b=array([51]), m=array([449]), lbl='기타 수산물'),
2903: SChildCategory(b=array([35]), m=array([395]), lbl='스크랩북'),
2904: SChildCategory(b=array([43]), m=array([442]), lbl='호신스프레이/가스총'),
2905: SChildCategory(b=array([51]), m=array([240]), lbl='노가리'),
2906: SChildCategory(b=array([1]), m=array([309]), lbl='코딩 로봇'),
2907: SChildCategory(b=array([34]), m=array([136]), lbl='킥매트'),
2908: SChildCategory(b=array([34]), m=array([381]), lbl='기타 오일/용품'),
2909: SChildCategory(b=array([51]), m=array([474]), lbl='양배추/양상추'),
2910: SChildCategory(b=array([2]), m=array([199]), lbl='동물로봇'),
2911: SChildCategory(b=array([12]), m=array([475]), lbl='봉돌'),
2912: SChildCategory(b=array([34]), m=array([430]), lbl='차량용 스피커'),
2913: SChildCategory(b=array([12]), m=array([170]), lbl='등산GPS'),
2914: SChildCategory(b=array([10]), m=array([236]), lbl='금연보조용품'),
2915: SChildCategory(b=array([27]), m=array([270]), lbl='닭가슴살캔'),
2916: SChildCategory(b=array([47]), m=array([541]), lbl='패밀리룩'),
2917: SChildCategory(b=array([48]), m=array([307]), lbl='아기두유'),
2918: SChildCategory(b=array([51]), m=array([474]), lbl='미나리'),
2919: SChildCategory(b=array([27]), m=array([270]), lbl='연어통조림'),
2920: SChildCategory(b=array([13]), m=array([347]), lbl='럭비용품'),
2921: SChildCategory(b=array([34]), m=array([292]), lbl='네온/무드램프'),
2922: SChildCategory(b=array([51]), m=array([438]), lbl='가자미'),
2923: SChildCategory(b=array([26]), m=array([494]), lbl='진동 클렌저'),
2924: SChildCategory(b=array([56]), m=array([511]), lbl='규슈'),
2925: SChildCategory(b=array([12]), m=array([482]), lbl='민물낚시세트'),
2926: SChildCategory(b=array([51]), m=array([474]), lbl='옥수수'),
2927: SChildCategory(b=array([56]), m=array([507]), lbl='사이판'),
2928: SChildCategory(b=array([27]), m=array([211]), lbl='식빵'),
2929: SChildCategory(b=array([47]), m=array([154]), lbl='외국 전통의상'),
2930: SChildCategory(b=array([30]), m=array([478]), lbl='중고 골프풀세트'),
2931: SChildCategory(b=array([51]), m=array([447]), lbl='생닭/부분육'),
2932: SChildCategory(b=array([34]), m=array([381]), lbl='부동액'),
2933: SChildCategory(b=array([1]), m=array([339]), lbl='쪽가위'),
2934: SChildCategory(b=array([13]), m=array([221]), lbl='야구용품'),
2935: SChildCategory(b=array([14]), m=array([487]), lbl='남자가발'),
2936: SChildCategory(b=array([51]), m=array([407]), lbl='영지버섯'),
2937: SChildCategory(b=array([34]), m=array([483]), lbl='차량용 폴딩키'),
2938: SChildCategory(b=array([51]), m=array([449]), lbl='문어'),
2939: SChildCategory(b=array([21]), m=array([25]), lbl='가구바퀴'),
2940: SChildCategory(b=array([13]), m=array([257]), lbl='스포츠 다리토시'),
2941: SChildCategory(b=array([56]), m=array([495]), lbl='체코'),
2942: SChildCategory(b=array([51]), m=array([544]), lbl='생식/선식'),
2943: SChildCategory(b=array([54]), m=array([501]), lbl='치킨/야식'),
2944: SChildCategory(b=array([2]), m=array([2]), lbl='헬스'),
2945: SChildCategory(b=array([51]), m=array([474]), lbl='믹스 채소'),
2946: SChildCategory(b=array([32]), m=array([446]), lbl='일반카메라 필름'),
2947: SChildCategory(b=array([22]), m=array([242]), lbl='스키장비'),
2948: SChildCategory(b=array([27]), m=array([365]), lbl='스테이크'),
2949: SChildCategory(b=array([51]), m=array([447]), lbl='계란'),
2950: SChildCategory(b=array([51]), m=array([217]), lbl='마카다미아'),
2951: SChildCategory(b=array([27]), m=array([385]), lbl='식용색소'),
2952: SChildCategory(b=array([56]), m=array([503]), lbl='필리핀(보라카이)'),
2953: SChildCategory(b=array([21]), m=array([189]), lbl='마네킹'),
2954: SChildCategory(b=array([41]), m=array([303]), lbl='바닐라시럽'),
2955: SChildCategory(b=array([51]), m=array([474]), lbl='호박'),
2956: SChildCategory(b=array([37]), m=array([90]), lbl='팩도구'),
2957: SChildCategory(b=array([51]), m=array([407]), lbl='새송이버섯'),
2958: SChildCategory(b=array([51]), m=array([219, 447, 515]), lbl='양념육'),
2959: SChildCategory(b=array([51]), m=array([378]), lbl='감말랭이'),
2960: SChildCategory(b=array([56]), m=array([511]), lbl='홋카이도(북해도)'),
2961: SChildCategory(b=array([12]), m=array([534]), lbl='중층찌'),
2962: SChildCategory(b=array([23]), m=array([271]), lbl='칫솔/치약세트'),
2963: SChildCategory(b=array([44]), m=array([137]), lbl='자전거 바구니'),
2964: SChildCategory(b=array([14]), m=array([290]), lbl='패딩장갑'),
2965: SChildCategory(b=array([51]), m=array([474]), lbl='브로콜리/셀러리'),
2966: SChildCategory(b=array([12]), m=array([482]), lbl='민물낚시릴대'),
2967: SChildCategory(b=array([2]), m=array([300]), lbl='보행기'),
2968: SChildCategory(b=array([1]), m=array([339]), lbl='와펜'),
2969: SChildCategory(b=array([51]), m=array([438]), lbl='갈치'),
2970: SChildCategory(b=array([37]), m=array([90]), lbl='샤프너'),
2971: SChildCategory(b=array([27]), m=array([284]), lbl='햄버그스테이크/미트볼'),
2972: SChildCategory(b=array([56]), m=array([495]), lbl='스위스'),
2973: SChildCategory(b=array([30]), m=array([478]), lbl='중고 페어웨이우드'),
2974: SChildCategory(b=array([1]), m=array([339]), lbl='골무'),
2975: SChildCategory(b=array([23]), m=array([246]), lbl='세탁 표백제'),
2976: SChildCategory(b=array([12]), m=array([534]), lbl='중층낚싯대'),
2977: SChildCategory(b=array([26]), m=array([321]), lbl='무전원식 비데'),
2978: SChildCategory(b=array([16]), m=array([184]), lbl='제면기'),
2979: SChildCategory(b=array([13]), m=array([87]), lbl='수중랜턴'),
2980: SChildCategory(b=array([2]), m=array([399]), lbl='콩콩이'),
2981: SChildCategory(b=array([56]), m=array([503]), lbl='베트남(다낭)'),
2982: SChildCategory(b=array([22]), m=array([242]), lbl='보드장비'),
2983: SChildCategory(b=array([41]), m=array([102]), lbl='프림'),
2984: SChildCategory(b=array([2]), m=array([23]), lbl='카메라놀이'),
2985: SChildCategory(b=array([23]), m=array([190]), lbl='놀이방매트용품'),
2986: SChildCategory(b=array([42]), m=array([259]), lbl='전기밥솥액세서리'),
2987: SChildCategory(b=array([42]), m=array([254]), lbl='업소용 싱크대/작업대'),
2988: SChildCategory(b=array([56]), m=array([507]), lbl='호주(시드니)'),
2989: SChildCategory(b=array([27]), m=array([420]), lbl='락교/초생강'),
2990: SChildCategory(b=array([51]), m=array([474]), lbl='어린잎/새싹채소'),
2991: SChildCategory(b=array([56]), m=array([504]), lbl='라스베가스'),
2992: SChildCategory(b=array([51]), m=array([438]), lbl='홍어'),
2993: SChildCategory(b=array([56]), m=array([510]), lbl='중국(북경)'),
2994: SChildCategory(b=array([41]), m=array([303]), lbl='메이플시럽'),
2995: SChildCategory(b=array([1]), m=array([116]), lbl='페이퍼토이'),
2996: SChildCategory(b=array([33]), m=array([269]), lbl='수소수제조기'),
2997: SChildCategory(b=array([56]), m=array([503]), lbl='필리핀(세부)'),
2998: SChildCategory(b=array([23]), m=array([379]), lbl='삼륜/쌍둥이형유모차'),
2999: SChildCategory(b=array([44]), m=array([175]), lbl='자전거 스프라켓'),
3000: SChildCategory(b=array([23]), m=array([379]), lbl='유모차 햇빛가리개'),
3001: SChildCategory(b=array([2]), m=array([399]), lbl='지붕차'),
3002: SChildCategory(b=array([23]), m=array([444]), lbl='산모방석'),
3003: SChildCategory(b=array([34]), m=array([406]), lbl='차량용 면도기'),
3004: SChildCategory(b=array([37]), m=array([167]), lbl='볼륨업크림'),
3005: SChildCategory(b=array([45]), m=array([127]), lbl='의자부품/소품'),
3006: SChildCategory(b=array([2]), m=array([134]), lbl='곤충채집용품'),
3007: SChildCategory(b=array([34]), m=array([262]), lbl='안테나볼'),
3008: SChildCategory(b=array([20]), m=array([250]), lbl='남성 귀걸이'),
3009: SChildCategory(b=array([13]), m=array([491]), lbl='볼링가방'),
3010: SChildCategory(b=array([27]), m=array([365]), lbl='족발/편육/바비큐'),
3011: SChildCategory(b=array([39]), m=array([372]), lbl='블루레이 드라이브'),
3012: SChildCategory(b=array([54]), m=array([547]), lbl='배달 도시락'),
3013: SChildCategory(b=array([54]), m=array([547]), lbl='배달 음료'),
3014: SChildCategory(b=array([56]), m=array([495]), lbl='북유럽'),
3015: SChildCategory(b=array([35]), m=array([327]), lbl='화구박스'),
3016: SChildCategory(b=array([11]), m=array([493]), lbl='도어/창호'),
3017: SChildCategory(b=array([1]), m=array([336]), lbl='아코디언'),
3018: SChildCategory(b=array([11]), m=array([415]), lbl='시너'),
3019: SChildCategory(b=array([51]), m=array([515]), lbl='[시리얼] 장조림/카레용'),
3020: SChildCategory(b=array([56]), m=array([504]), lbl='하와이'),
3021: SChildCategory(b=array([10]), m=array([236]), lbl='수면장애관리용품'),
3022: SChildCategory(b=array([51]), m=array([334]), lbl='절임배추/김치속'),
3023: SChildCategory(b=array([1]), m=array([336]), lbl='일반피아노'),
3024: SChildCategory(b=array([55]), m=array([524, 529]), lbl='패키지'),
3025: SChildCategory(b=array([30]), m=array([478]), lbl='중고 유틸리티우드'),
3026: SChildCategory(b=array([51]), m=array([515]), lbl='[시리얼] 갈비/찜/바비큐'),
3027: SChildCategory(b=array([24]), m=array([73]), lbl='트랙볼 마우스'),
3028: SChildCategory(b=array([27]), m=array([337]), lbl='해바라기씨유'),
3029: SChildCategory(b=array([1]), m=array([434]), lbl='기타 관악기'),
3030: SChildCategory(b=array([13]), m=array([123]), lbl='승마운동기'),
3031: SChildCategory(b=array([51]), m=array([352]), lbl='감/홍시'),
3032: SChildCategory(b=array([22]), m=array([26]), lbl='멜빵/벨트'),
3033: SChildCategory(b=array([13]), m=array([380]), lbl='스키/보드 스티커'),
3034: SChildCategory(b=array([13]), m=array([123]), lbl='진동운동기구'),
3035: SChildCategory(b=array([54]), m=array([514]), lbl='카페푸드/도넛/떡'),
3036: SChildCategory(b=array([23]), m=array([295]), lbl='보낭'),
3037: SChildCategory(b=array([8]), m=array([214]), lbl='생활커버류'),
3038: SChildCategory(b=array([25]), m=array([435]), lbl='CDP'),
3039: SChildCategory(b=array([14]), m=array([232]), lbl='수트케이스'),
3040: SChildCategory(b=array([34]), m=array([62]), lbl='룸미러 커버'),
3041: SChildCategory(b=array([56]), m=array([504]), lbl='워싱턴'),
3042: SChildCategory(b=array([10]), m=array([427]), lbl='기타 렌즈용품'),
3043: SChildCategory(b=array([2]), m=array([399]), lbl='시소'),
3044: SChildCategory(b=array([56]), m=array([503]), lbl='베트남(하노이)'),
3045: SChildCategory(b=array([35]), m=array([395]), lbl='포토박스/용품'),
3046: SChildCategory(b=array([54]), m=array([523]), lbl='교육'),
3047: SChildCategory(b=array([12]), m=array([471]), lbl='와이어베이트'),
3048: SChildCategory(b=array([48]), m=array([382]), lbl='4단계분유'),
3049: SChildCategory(b=array([54]), m=array([502]), lbl='기타 헤어/뷰티'),
3050: SChildCategory(b=array([16]), m=array([218]), lbl='시리얼 디스펜서'),
3051: SChildCategory(b=array([56]), m=array([503]), lbl='캄보디아'),
3052: SChildCategory(b=array([1]), m=array([498]), lbl='트라이앵글'),
3053: SChildCategory(b=array([34]), m=array([198]), lbl='성에제거제'),
3054: SChildCategory(b=array([56]), m=array([495]), lbl='러시아'),
3055: SChildCategory(b=array([26]), m=array([549]), lbl='두피관리기'),
3056: SChildCategory(b=array([34]), m=array([262]), lbl='폴대'),
3057: SChildCategory(b=array([56]), m=array([511]), lbl='오키나와'),
3058: SChildCategory(b=array([12]), m=array([471]), lbl='루어낚시바늘'),
3059: SChildCategory(b=array([56]), m=array([503]), lbl='싱가포르'),
3060: SChildCategory(b=array([51]), m=array([449]), lbl='어란'),
3061: SChildCategory(b=array([1]), m=array([336]), lbl='그랜드피아노'),
3062: SChildCategory(b=array([51]), m=array([438]), lbl='장어'),
3063: SChildCategory(b=array([52]), m=array([451]), lbl='매립형내비게이션'),
3064: SChildCategory(b=array([27]), m=array([284]), lbl='하이라이스'),
3065: SChildCategory(b=array([48]), m=array([382]), lbl='특수분유'),
3066: SChildCategory(b=array([47]), m=array([500]), lbl='웨딩 티아라'),
3067: SChildCategory(b=array([51]), m=array([449]), lbl='굴'),
3068: SChildCategory(b=array([34]), m=array([381]), lbl='브레이크오일'),
3069: SChildCategory(b=array([41]), m=array([303]), lbl='설탕시럽'),
3070: SChildCategory(b=array([23]), m=array([29]), lbl='발진크림'),
3071: SChildCategory(b=array([28]), m=array([267]), lbl='밴딩기'),
3072: SChildCategory(b=array([27]), m=array([370]), lbl='마가린'),
3073: SChildCategory(b=array([10]), m=array([356]), lbl='적외선치료기'),
3074: SChildCategory(b=array([37]), m=array([349]), lbl='풋스프레이'),
3075: SChildCategory(b=array([56]), m=array([503]), lbl='말레이시아'),
3076: SChildCategory(b=array([55]), m=array([512]), lbl='제주 렌터카'),
3077: SChildCategory(b=array([13]), m=array([354]), lbl='완력기'),
3078: SChildCategory(b=array([24]), m=array([204]), lbl='OHP/전자칠판'),
3079: SChildCategory(b=array([13]), m=array([380]), lbl='스키/보드 관리용품'),
3080: SChildCategory(b=array([9]), m=array([489]), lbl='커플비치웨어'),
3081: SChildCategory(b=array([44]), m=array([366]), lbl='픽시 자전거'),
3082: SChildCategory(b=array([13]), m=array([535]), lbl='기타검도용품'),
3083: SChildCategory(b=array([23]), m=array([228]), lbl='유축기'),
3084: SChildCategory(b=array([51]), m=array([544]), lbl='수산축산 분말/분태'),
3085: SChildCategory(b=array([34]), m=array([291]), lbl='차량용 국기봉'),
3086: SChildCategory(b=array([16]), m=array([38]), lbl='에그컵'),
3087: SChildCategory(b=array([26]), m=array([278]), lbl='면도기/제모기 소모품'),
3088: SChildCategory(b=array([27]), m=array([392]), lbl='기타 면류'),
3089: SChildCategory(b=array([47]), m=array([500]), lbl='웨딩 꽃팔찌'),
3090: SChildCategory(b=array([54]), m=array([550]), lbl='음료'),
3091: SChildCategory(b=array([27]), m=array([224]), lbl='떡볶이'),
3092: SChildCategory(b=array([33]), m=array([269]), lbl='이온수기'),
3093: SChildCategory(b=array([34]), m=array([406]), lbl='기타 안전/편의용품'),
3094: SChildCategory(b=array([1]), m=array([339]), lbl='실/바늘'),
3095: SChildCategory(b=array([12]), m=array([170]), lbl='등산헬멧'),
3096: SChildCategory(b=array([23]), m=array([425]), lbl='안전문'),
3097: SChildCategory(b=array([3]), m=array([164]), lbl='휴대폰 핸즈프리'),
3098: SChildCategory(b=array([1]), m=array([472]), lbl='장구'),
3099: SChildCategory(b=array([23]), m=array([322]), lbl='투약기/흡입기'),
3100: SChildCategory(b=array([13]), m=array([354]), lbl='파워볼'),
3101: SChildCategory(b=array([51]), m=array([474]), lbl='마/야콘'),
3102: SChildCategory(b=array([13]), m=array([363]), lbl='체육관용품'),
3103: SChildCategory(b=array([56]), m=array([510]), lbl='마카오'),
3104: SChildCategory(b=array([17]), m=array([455]), lbl='부츠삭스'),
3105: SChildCategory(b=array([13]), m=array([452]), lbl='보드데크'),
3106: SChildCategory(b=array([13]), m=array([520]), lbl='스키플레이트'),
3107: SChildCategory(b=array([54]), m=array([551]), lbl='데이터/로밍'),
3108: SChildCategory(b=array([2]), m=array([66]), lbl='유아동 컴퓨터'),
3109: SChildCategory(b=array([26]), m=array([194]), lbl='안마의자 렌탈'),
3110: SChildCategory(b=array([13]), m=array([212]), lbl='격파용품'),
3111: SChildCategory(b=array([51]), m=array([474]), lbl='오이/가지'),
3112: SChildCategory(b=array([42]), m=array([146]), lbl='죽/두부제조기'),
3113: SChildCategory(b=array([26]), m=array([549]), lbl='전기헤어캡'),
3114: SChildCategory(b=array([26]), m=array([34]), lbl='헤어스타일러 소모품'),
3115: SChildCategory(b=array([41]), m=array([249]), lbl='바나나우유'),
3116: SChildCategory(b=array([14]), m=array([251]), lbl='가방 스트랩'),
3117: SChildCategory(b=array([12]), m=array([475]), lbl='어군탐지기'),
3118: SChildCategory(b=array([10]), m=array([356]), lbl='저주파패드'),
3119: SChildCategory(b=array([51]), m=array([544]), lbl='과일채소 분말/분태'),
3120: SChildCategory(b=array([51]), m=array([474]), lbl='우엉/연근'),
3121: SChildCategory(b=array([24]), m=array([459]), lbl='TV카드/DMB수신기'),
3122: SChildCategory(b=array([56]), m=array([503]), lbl='태국(기타)'),
3123: SChildCategory(b=array([13]), m=array([293]), lbl='스키/보드복 세트'),
3124: SChildCategory(b=array([25]), m=array([440]), lbl='노래방 액세서리'),
3125: SChildCategory(b=array([56]), m=array([503]), lbl='인도네시아'),
3126: SChildCategory(b=array([56]), m=array([504]), lbl='로스앤젤레스'),
3127: SChildCategory(b=array([51]), m=array([449]), lbl='대게/킹크랩'),
3128: SChildCategory(b=array([6]), m=array([274]), lbl='배구복'),
3129: SChildCategory(b=array([15]), m=array([359]), lbl='가스온수기'),
3130: SChildCategory(b=array([23]), m=array([115]), lbl='기저귀 교환대'),
3131: SChildCategory(b=array([55]), m=array([524]), lbl='골프'),
3132: SChildCategory(b=array([12]), m=array([234]), lbl='등산원피스'),
3133: SChildCategory(b=array([41]), m=array([552]), lbl='전통주'),
3134: SChildCategory(b=array([51]), m=array([474]), lbl='시금치'),
3135: SChildCategory(b=array([39]), m=array([230]), lbl='SSHD'),
3136: SChildCategory(b=array([47]), m=array([500]), lbl='웨딩 장갑'),
3137: SChildCategory(b=array([18]), m=array([266]), lbl='선파우더'),
3138: SChildCategory(b=array([44]), m=array([137]), lbl='자전거 트레일러'),
3139: SChildCategory(b=array([26]), m=array([494]), lbl='LED 마스크'),
3140: SChildCategory(b=array([23]), m=array([29]), lbl='유아 립밤/립케어'),
3141: SChildCategory(b=array([56]), m=array([511]), lbl='도쿄'),
3142: SChildCategory(b=array([13]), m=array([101]), lbl='물놀이매트'),
3143: SChildCategory(b=array([2]), m=array([399]), lbl='스프링카'),
3144: SChildCategory(b=array([13]), m=array([491]), lbl='볼링용품'),
3145: SChildCategory(b=array([25]), m=array([302]), lbl='어학학습기'),
3146: SChildCategory(b=array([51]), m=array([219]), lbl='[시리얼] 갈비/찜/탕용'),
3147: SChildCategory(b=array([1]), m=array([434]), lbl='트럼본'),
3148: SChildCategory(b=array([10]), m=array([476]), lbl='임신/배란테스트기'),
3149: SChildCategory(b=array([54]), m=array([551]), lbl='게임'),
3150: SChildCategory(b=array([27]), m=array([224]), lbl='꽃빵'),
3151: SChildCategory(b=array([51]), m=array([474]), lbl='콩나물/숙주'),
3152: SChildCategory(b=array([24]), m=array([31]), lbl='플로터'),
3153: SChildCategory(b=array([13]), m=array([452]), lbl='보드세트'),
3154: SChildCategory(b=array([51]), m=array([449]), lbl='꽃게'),
3155: SChildCategory(b=array([42]), m=array([254]), lbl='업소용 가스레인지'),
3156: SChildCategory(b=array([55]), m=array([524]), lbl='자유여행'),
3157: SChildCategory(b=array([10]), m=array([9]), | |
<reponame>cpressey/grin
#!/usr/bin/env python
""" grin searches text files.
"""
from __future__ import print_function
import bisect
import fnmatch
import gzip
import itertools
import os
import re
import shlex
import stat
import sys
from io import UnsupportedOperation
import argparse
if sys.version_info[0] > 2:
to_str = lambda s : s.decode('latin1')
ints2bytes = bytes
else:
to_str = str
ints2bytes = lambda ints : ''.join(map(chr, ints))
#### Constants ####
__version__ = '1.3.1'
# Maintain the numerical order of these constants. We use them for sorting.
PRE = -1
MATCH = 0
POST = 1
# Use file(1)'s choices for what's text and what's not.
TEXTCHARS = ints2bytes([7,8,9,10,12,13,27] + list(range(0x20, 0x100)))
ALLBYTES = ints2bytes(range(256))
COLOR_TABLE = ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan',
'white', 'default']
COLOR_STYLE = {
'filename': dict(fg="green", bold=True),
'searchterm': dict(fg="black", bg="yellow"),
}
# gzip magic header bytes.
GZIP_MAGIC = b'\037\213'
# Target amount of data to read into memory at a time.
READ_BLOCKSIZE = 16 * 1024 * 1024
def is_binary_string(bytes):
""" Determine if a string is classified as binary rather than text.
Parameters
----------
bytes : str
Returns
-------
is_binary : bool
"""
nontext = bytes.translate(ALLBYTES, TEXTCHARS)
return bool(nontext)
def get_line_offsets(block):
""" Compute the list of offsets in DataBlock 'block' which correspond to
the beginnings of new lines.
Returns: (offset list, count of lines in "current block")
"""
# Note: this implementation based on string.find() benchmarks about twice as
# fast as a list comprehension using re.finditer().
line_offsets = [0]
line_count = 0 # Count of lines inside range [block.start, block.end) *only*
s = block.data
while True:
next_newline = s.find('\n', line_offsets[-1])
if next_newline < 0:
# Tack on a final "line start" corresponding to EOF, if not done already.
# This makes it possible to determine the length of each line by computing
# a difference between successive elements.
if line_offsets[-1] < len(s):
line_offsets.append(len(s))
return (line_offsets, line_count)
else:
line_offsets.append(next_newline + 1)
# Keep track of the count of lines within the "current block"
if next_newline >= block.start and next_newline < block.end:
line_count += 1
def colorize(s, fg=None, bg=None, bold=False, underline=False, reverse=False):
""" Wraps a string with ANSI color escape sequences corresponding to the
style parameters given.
All of the color and style parameters are optional.
Parameters
----------
s : str
fg : str
Foreground color of the text. One of (black, red, green, yellow, blue,
magenta, cyan, white, default)
bg : str
Background color of the text. Color choices are the same as for fg.
bold : bool
Whether or not to display the text in bold.
underline : bool
Whether or not to underline the text.
reverse : bool
Whether or not to show the text in reverse video.
Returns
-------
A string with embedded color escape sequences.
"""
style_fragments = []
if fg in COLOR_TABLE:
# Foreground colors go from 30-39
style_fragments.append(COLOR_TABLE.index(fg) + 30)
if bg in COLOR_TABLE:
# Background colors go from 40-49
style_fragments.append(COLOR_TABLE.index(bg) + 40)
if bold:
style_fragments.append(1)
if underline:
style_fragments.append(4)
if reverse:
style_fragments.append(7)
style_start = '\x1b[' + ';'.join(map(str,style_fragments)) + 'm'
style_end = '\x1b[0m'
return style_start + s + style_end
class Options(dict):
""" Simple options.
"""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self.__dict__ = self
def default_options():
""" Populate the default options.
"""
opt = Options(
before_context = 0,
after_context = 0,
show_line_numbers = True,
show_match = True,
show_filename = True,
show_emacs = False,
skip_hidden_dirs=False,
skip_hidden_files=False,
skip_backup_files=True,
skip_dirs=set(),
skip_exts=set(),
skip_symlink_dirs=True,
skip_symlink_files=True,
binary_bytes=4096,
)
return opt
class DataBlock(object):
""" This class holds a block of data read from a file, along with
some preceding and trailing context.
Attributes
----------
data : byte string
start : int
Offset into 'data' where the "current block" begins; everything
prior to this is 'before' context bytes
end : int
Offset into 'data' where the "current block" ends; everything
after this is 'after' context bytes
before_count : int
Number of lines contained in data[:start]
is_last : bool
True if this is the final block in the file
"""
def __init__(self, data='', start=0, end=0, before_count=0, is_last=False):
self.data = data
self.start = start
self.end = end
self.before_count = before_count
self.is_last = is_last
EMPTY_DATABLOCK = DataBlock()
class GrepText(object):
""" Grep a single file for a regex by iterating over the lines in a file.
Attributes
----------
regex : compiled regex
options : Options or similar
"""
def __init__(self, regex, options=None):
# The compiled regex.
self.regex = regex
# An equivalent regex with multiline enabled.
self.regex_m = re.compile(regex.pattern, regex.flags | re.MULTILINE)
# The options object from parsing the configuration and command line.
if options is None:
options = default_options()
self.options = options
def read_block_with_context(self, prev, fp, fp_size):
""" Read a block of data from the file, along with some surrounding
context.
Parameters
----------
prev : DataBlock, or None
The result of the previous application of read_block_with_context(),
or None if this is the first block.
fp : filelike object
The source of block data.
fp_size : int or None
Size of the file in bytes, or None if the size could not be
determined.
Returns
-------
A DataBlock representing the "current" block along with context.
"""
if fp_size is None:
target_io_size = READ_BLOCKSIZE
block_main = to_str(fp.read(target_io_size))
is_last_block = len(block_main) < target_io_size
else:
remaining = max(fp_size - fp.tell(), 0)
target_io_size = min(READ_BLOCKSIZE, remaining)
block_main = to_str(fp.read(target_io_size))
is_last_block = target_io_size == remaining
if prev is None:
if is_last_block:
# FAST PATH: the entire file fits into a single block, so we
# can avoid the overhead of locating lines of 'before' and
# 'after' context.
result = DataBlock(
data = block_main,
start = 0,
end = len(block_main),
before_count = 0,
is_last = True,
)
return result
else:
prev = EMPTY_DATABLOCK
# SLOW PATH: handle the general case of a large file which is split
# across multiple blocks.
# Look back into 'preceding' for some lines of 'before' context.
if prev.end == 0:
before_start = 0
before_count = 0
else:
before_start = prev.end - 1
before_count = 0
for i in range(self.options.before_context):
ofs = prev.data.rfind('\n', 0, before_start)
before_start = ofs
before_count += 1
if ofs < 0:
break
before_start += 1
before_lines = prev.data[before_start:prev.end]
# Using readline() to force this block out to a newline boundary...
curr_block = (prev.data[prev.end:] + block_main +
('' if is_last_block else to_str(fp.readline())))
# Read in some lines of 'after' context.
if is_last_block:
after_lines = ''
else:
after_lines_list = [to_str(fp.readline())
for i in range(self.options.after_context)]
after_lines = ''.join(after_lines_list)
result = DataBlock(
data = before_lines + curr_block + after_lines,
start = len(before_lines),
end = len(before_lines) + len(curr_block),
before_count = before_count,
is_last = is_last_block,
)
return result
def do_grep(self, fp):
""" Do a full grep.
Parameters
----------
fp : filelike object
An open filelike object.
Returns
-------
A list of 4-tuples (lineno, type (POST/PRE/MATCH), line, spans). For
each tuple of type MATCH, **spans** is a list of (start,end) positions
of substrings that matched the pattern.
"""
context = []
line_count = 0
if isinstance(fp, gzip.GzipFile):
fp_size = None # gzipped data is usually longer than the file
else:
try:
file_no = fp.fileno()
except (AttributeError, UnsupportedOperation): # doesn't support fileno()
fp_size = None
else:
status = os.fstat(file_no)
if stat.S_ISREG(status.st_mode):
fp_size = status.st_size
else:
fp_size = None
block = self.read_block_with_context(None, fp, fp_size)
while block.end > block.start:
(block_line_count, block_context) = self.do_grep_block(block,
line_count - block.before_count)
context += block_context
if block.is_last:
break
next_block = self.read_block_with_context(block, fp, fp_size)
if next_block.end > next_block.start:
if block_line_count is None:
# If the file contains N blocks, then in the best case we
# will need to compute line offsets for the first N-1 blocks.
# Most files will fit within a single block, so if there are
# no regex matches then we can typically avoid computing *any*
# line offsets.
(_, block_line_count) = get_line_offsets(block)
line_count += block_line_count
block = next_block
unique_context = self.uniquify_context(context)
return unique_context
def do_grep_block(self, block, line_num_offset):
""" Grep a single block of file content.
Parameters
----------
block : DataBlock
A chunk of file data.
line_num_offset: int
The number of lines preceding block.data.
Returns
-------
Tuple of the form
(line_count, list of (lineno, type (POST/PRE/MATCH), line, spans).
'line_count' is either the | |
import unittest
from cpuinfo import *
import helpers
class MockDataSource(object):
bits = '64bit'
cpu_count = 4
is_windows = False
arch_string_raw = 's390x'
uname_string_raw = ''
can_cpuid = False
@staticmethod
def has_proc_cpuinfo():
return True
@staticmethod
def has_dmesg():
return True
@staticmethod
def has_lscpu():
return True
@staticmethod
def cat_proc_cpuinfo():
returncode = 0
output = r'''
vendor_id : IBM/S390
# processors : 4
bogomips per cpu: 2913.00
max thread id : 0
features : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te sie
cache0 : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1 : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2 : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3 : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4 : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5 : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = FF, identification = 14C047, machine = 2827
processor 1: version = FF, identification = 14C047, machine = 2827
processor 2: version = FF, identification = 14C047, machine = 2827
processor 3: version = FF, identification = 14C047, machine = 2827
cpu number : 0
cpu MHz dynamic : 5504
cpu MHz static : 5504
cpu number : 1
cpu MHz dynamic : 5504
cpu MHz static : 5504
cpu number : 2
cpu MHz dynamic : 5504
cpu MHz static : 5504
cpu number : 3
cpu MHz dynamic : 5504
cpu MHz static : 5504
'''
return returncode, output
@staticmethod
def lscpu():
returncode = 0
output = r'''
Architecture: s390x
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Big Endian
CPU(s): 4
On-line CPU(s) list: 0-3
Thread(s) per core: 1
Core(s) per socket: 1
Socket(s) per book: 1
Book(s) per drawer: 1
Drawer(s): 4
Vendor ID: IBM/S390
Machine type: 2827
CPU dynamic MHz: 5504
CPU static MHz: 5504
BogoMIPS: 2913.00
Hypervisor: z/VM 5.4.0
Hypervisor vendor: IBM
Virtualization type: full
Dispatching mode: horizontal
L1d cache: 96K
L1i cache: 64K
L2d cache: 1024K
L2i cache: 1024K
L3 cache: 49152K
L4 cache: 393216K
Flags: esan3 zarch stfle msa ldisp eimm dfp etf3eh highgprs sie
'''
return returncode, output
@staticmethod
def dmesg_a():
returncode = 0
output = r'''
[623985.026158] 000003ffda1f9118 00e1526ff184ab35 00000000800008a0 000003ffda1f90f0
[623985.026161] 0000000080000740 0000000000000000 000002aa4b1cf0a0 000003ffaa476f30
[623985.026165] 000003ffaa428f58 000002aa4b1bf6b0 000003ffa9e22b9e 000003ffda1f8ee0
[623985.026175] User Code: 0000000080000828: c0f4ffffffc0 brcl 15,800007a8
[623985.026175] 000000008000082e: 0707 bcr 0,%r7
[623985.026175] #0000000080000830: a7f40001 brc 15,80000832
[623985.026175] >0000000080000834: 0707 bcr 0,%r7
[623985.026175] 0000000080000836: 0707 bcr 0,%r7
[623985.026175] 0000000080000838: eb7ff0380024 stmg %r7,%r15,56(%r15)
[623985.026175] 000000008000083e: e3f0ff60ff71 lay %r15,-160(%r15)
[623985.026175] 0000000080000844: b9040092 lgr %r9,%r2
[623985.026211] Last Breaking-Event-Address:
[623985.026214] [<0000000080000830>] 0x80000830
[624418.306980] User process fault: interruption code 0038 ilc:3 in libstdc++.so.6.0.23[3ff9d000000+1b9000]
[624418.306992] Failing address: 46726f6200005000 TEID: 46726f6200005800
[624418.306994] Fault in primary space mode while using user ASCE.
[624418.306997] AS:0000000081d081c7 R3:0000000000000024
[624418.307003] CPU: 3 PID: 56744 Comm: try-catch-2.exe Not tainted 4.8.15-300.fc25.s390x #1
[624418.307005] Hardware name: IBM 2827 H43 400 (z/VM)
[624418.307009] task: 00000000f74c1c80 task.stack: 00000000ab6f0000
[624418.307012] User PSW : 0705000180000000 000003ff9d0a7f58
[624418.307016] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:0 PM:0 RI:0 EA:3
[624418.307016] User GPRS: 0000000000000000 46726f6200005465 0000000080003528 000003ff9d1bba00
[624418.307024] 000003fff8278e88 000003fff8278dc0 000000008000187a fffffffffffffffd
[624418.307028] 000003ff00000000 000003fff8278e88 0000000080003528 000003ff9d1bba00
[624418.307032] 0000000080003428 000003ff9d172658 000003ff9d0a7f32 000003fff8278d20
[624418.307050] User Code: 000003ff9d0a7f4a: e310a0000004 lg %r1,0(%r10)
[624418.307050] 000003ff9d0a7f50: b904003b lgr %r3,%r11
[624418.307050] #000003ff9d0a7f54: b904002a lgr %r2,%r10
[624418.307050] >000003ff9d0a7f58: e31010200004 lg %r1,32(%r1)
[624418.307050] 000003ff9d0a7f5e: a7590001 lghi %r5,1
[624418.307050] 000003ff9d0a7f62: 4140f0a0 la %r4,160(%r15)
[624418.307050] 000003ff9d0a7f66: 0de1 basr %r14,%r1
[624418.307050] 000003ff9d0a7f68: ec280009007c cgij %r2,0,8,3ff9d0a7f7a
[624418.307061] Last Breaking-Event-Address:
[624418.307065] [<000003ff9d0a7f32>] 0x3ff9d0a7f32
[624418.806616] User process fault: interruption code 0038 ilc:3 in libstdc++.so.6.0.23[3ffac780000+1b9000]
[624418.806627] Failing address: 5465737473756000 TEID: 5465737473756800
[624418.806629] Fault in primary space mode while using user ASCE.
[624418.806633] AS:00000000a44441c7 R3:0000000000000024
[624418.806638] CPU: 3 PID: 56971 Comm: try-catch-9.exe Not tainted 4.8.15-300.fc25.s390x #1
[624418.806641] Hardware name: IBM 2827 H43 400 (z/VM)
[624418.806644] task: 0000000001a9b900 task.stack: 0000000082968000
[624418.806647] User PSW : 0705000180000000 000003ffac827f58
[624418.806650] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:0 PM:0 RI:0 EA:3
[624418.806650] User GPRS: 0000000000000000 5465737473756974 00000000800032a4 000003ffac93ba00
[624418.806658] 000003ffdd4f8bb0 000003ffdd4f8ae8 0000000080001338 0000000000000000
[624418.806662] 000003ff00000000 000003ffdd4f8bb0 00000000800032a4 000003ffac93ba00
[624418.806666] 0000000087919e90 000003ffac8f2658 000003ffac827f32 000003ffdd4f8a48
[624418.806683] User Code: 000003ffac827f4a: e310a0000004 lg %r1,0(%r10)
[624418.806683] 000003ffac827f50: b904003b lgr %r3,%r11
[624418.806683] #000003ffac827f54: b904002a lgr %r2,%r10
[624418.806683] >000003ffac827f58: e31010200004 lg %r1,32(%r1)
[624418.806683] 000003ffac827f5e: a7590001 lghi %r5,1
[624418.806683] 000003ffac827f62: 4140f0a0 la %r4,160(%r15)
[624418.806683] 000003ffac827f66: 0de1 basr %r14,%r1
[624418.806683] 000003ffac827f68: ec280009007c cgij %r2,0,8,3ffac827f7a
[624418.806694] Last Breaking-Event-Address:
[624418.806697] [<000003ffac827f32>] 0x3ffac827f32
[624457.542811] User process fault: interruption code 0038 ilc:3 in libstdc++.so.6.0.23[3ffbc080000+1b9000]
[624457.542823] Failing address: 46726f6200005000 TEID: 46726f6200005800
[624457.542825] Fault in primary space mode while using user ASCE.
[624457.542829] AS:0000000002e701c7 R3:0000000000000024
[624457.542834] CPU: 2 PID: 6763 Comm: try-catch-2.exe Not tainted 4.8.15-300.fc25.s390x #1
[624457.542837] Hardware name: IBM 2827 H43 400 (z/VM)
[624457.542840] task: 00000000f7aa0000 task.stack: 0000000003530000
[624457.542844] User PSW : 0705000180000000 000003ffbc127f58
[624457.542847] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:0 PM:0 RI:0 EA:3
[624457.542847] User GPRS: 0000000000000000 46726f6200005465 0000000080003528 000003ffbc23ba00
[624457.542856] 000003ffc14f8dd8 000003ffc14f8d10 000000008000187a fffffffffffffffd
[624457.542859] 000003ff00000000 000003ffc14f8dd8 0000000080003528 000003ffbc23ba00
[624457.542863] 0000000080003428 000003ffbc1f2658 000003ffbc127f32 000003ffc14f8c70
[624457.542882] User Code: 000003ffbc127f4a: e310a0000004 lg %r1,0(%r10)
[624457.542882] 000003ffbc127f50: b904003b lgr %r3,%r11
[624457.542882] #000003ffbc127f54: b904002a lgr %r2,%r10
[624457.542882] >000003ffbc127f58: e31010200004 lg %r1,32(%r1)
[624457.542882] 000003ffbc127f5e: a7590001 lghi %r5,1
[624457.542882] 000003ffbc127f62: 4140f0a0 la %r4,160(%r15)
[624457.542882] 000003ffbc127f66: 0de1 basr %r14,%r1
[624457.542882] 000003ffbc127f68: ec280009007c cgij %r2,0,8,3ffbc127f7a
[624457.542893] Last Breaking-Event-Address:
[624457.542896] [<000003ffbc127f32>] 0x3ffbc127f32
[624458.013783] User process fault: interruption code 0038 ilc:3 in libstdc++.so.6.0.23[3ff94f00000+1b9000]
[624458.013795] Failing address: 5465737473756000 TEID: 5465737473756800
[624458.013797] Fault in primary space mode while using user ASCE.
[624458.013801] AS:0000000004be41c7 R3:0000000000000024
[624458.013806] CPU: 1 PID: 6896 Comm: try-catch-9.exe Not tainted 4.8.15-300.fc25.s390x #1
[624458.013809] Hardware name: IBM 2827 H43 400 (z/VM)
[624458.013812] task: 00000000f5b4b900 task.stack: 00000000061f4000
[624458.013815] User PSW : 0705000180000000 000003ff94fa7f58
[624458.013818] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:0 PM:0 RI:0 EA:3
[624458.013818] User GPRS: 0000000000000000 5465737473756974 00000000800032a4 000003ff950bba00
[624458.013826] 000003ffd0df96f0 000003ffd0df9628 0000000080001338 0000000000000000
[624458.013830] 000003ff00000000 000003ffd0df96f0 00000000800032a4 000003ff950bba00
[624458.013834] 00000000a19d4e90 000003ff95072658 000003ff94fa7f32 000003ffd0df9588
[624458.013852] User Code: 000003ff94fa7f4a: e310a0000004 lg %r1,0(%r10)
[624458.013852] 000003ff94fa7f50: b904003b lgr %r3,%r11
[624458.013852] #000003ff94fa7f54: b904002a lgr %r2,%r10
[624458.013852] >000003ff94fa7f58: e31010200004 lg %r1,32(%r1)
[624458.013852] 000003ff94fa7f5e: a7590001 lghi %r5,1
[624458.013852] 000003ff94fa7f62: 4140f0a0 la %r4,160(%r15)
[624458.013852] 000003ff94fa7f66: 0de1 basr %r14,%r1
[624458.013852] 000003ff94fa7f68: ec280009007c cgij %r2,0,8,3ff94fa7f7a
[624458.013863] Last Breaking-Event-Address:
[624458.013866] [<000003ff94fa7f32>] 0x3ff94fa7f32
[682281.933336] User process fault: interruption code 003b ilc:3 in cmsysTestProcess[2aa16200000+9000]
[682281.933347] Failing address: 0000000000000000 TEID: 0000000000000400
[682281.933349] Fault in primary space mode while using user ASCE.
[682281.933353] AS:00000000829e01c7 R3:0000000000000024
[682281.933358] CPU: 0 PID: 29755 Comm: cmsysTestProces Not tainted 4.8.15-300.fc25.s390x #1
[682281.933362] Hardware name: IBM 2827 H43 400 (z/VM)
[682281.933365] task: 00000000f5f13900 task.stack: 00000000c2610000
[682281.933368] User PSW : 0705000180000000 000002aa162027a2
[682281.933371] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:0 PM:0 RI:0 EA:3
[682281.933371] User GPRS: 0000000000000000 000003ff00000000 0000000000000000 0000000000000001
[682281.933380] 000000000000002e 000003ff7f848c88 000002aa16207430 000003ffe33ff0a0
[682281.933383] 000002aa1620769e 0000000000000000 000003ff7f848d70 000003ff7f848d68
[682281.933388] 000003ff7f928f58 000002aa16207df0 000002aa16202794 000003ffe33feb68
[682281.934367] User Code: 000002aa16202794: e350a0000004 lg %r5,0(%r10)
[682281.934367] 000002aa1620279a: a749002e lghi %r4,46
[682281.934367] #000002aa1620279e: a7390001 lghi %r3,1
[682281.934367] >000002aa162027a2: e54c00040000 mvhi 4,0
[682281.934367] 000002aa162027a8: c02000002867 larl %r2,2aa16207876
[682281.934367] 000002aa162027ae: c0e5fffffabd brasl %r14,2aa16201d28
[682281.934367] 000002aa162027b4: e350b0000004 lg %r5,0(%r11)
[682281.934367] 000002aa162027ba: a749002e lghi %r4,46
[682281.934379] Last Breaking-Event-Address:
[682281.934382] [<000003ff7f6fccb8>] 0x3ff7f6fccb8
[682281.935888] User process fault: interruption code 003b ilc:3 in cmsysTestProcess[2aa36500000+9000]
[682281.935896] Failing address: 0000000000000000 TEID: 0000000000000400
[682281.935900] Fault in primary space mode while using user ASCE.
[682281.935910] AS:00000000ab3f01c7 R3:0000000000000024
[682281.935917] CPU: 0 PID: 29759 Comm: cmsysTestProces Not tainted 4.8.15-300.fc25.s390x #1
[682281.935940] Hardware name: IBM 2827 H43 400 (z/VM)
[682281.935941] task: 0000000083025580 task.stack: 00000000bebf4000
[682281.935942] User PSW : 0705000180000000 000002aa365027a2
[682281.935943] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:0 PM:0 RI:0 EA:3
[682281.935943] User GPRS: 0000000000000000 000003ff00000000 0000000000000000 0000000000000001
[682281.935946] 000000000000002e 000003ff9ce48c88 000002aa36507430 000003ffd60febe0
[682281.935947] 000002aa3650769e 0000000000000000 000003ff9ce48d70 000003ff9ce48d68
[682281.935948] 000003ff9cf28f58 000002aa36507df0 000002aa36502794 000003ffd60fe6a8
[682281.935954] User Code: 000002aa36502794: e350a0000004 lg %r5,0(%r10)
[682281.935954] 000002aa3650279a: a749002e lghi %r4,46
[682281.935954] #000002aa3650279e: a7390001 lghi %r3,1
[682281.935954] >000002aa365027a2: e54c00040000 mvhi 4,0
[682281.935954] 000002aa365027a8: c02000002867 larl %r2,2aa36507876
[682281.935954] 000002aa365027ae: c0e5fffffabd brasl %r14,2aa36501d28
[682281.935954] 000002aa365027b4: e350b0000004 lg %r5,0(%r11)
[682281.935954] 000002aa365027ba: a749002e lghi %r4,46
[682281.935964] Last Breaking-Event-Address:
[682281.935965] [<000003ff9ccfccb8>] 0x3ff9ccfccb8
[682695.568959] User process fault: interruption code 0010 ilc:3 in Crash[1000000+1000]
[682695.568971] Failing address: 0000000000000000 TEID: 0000000000000400
[682695.568973] Fault in primary space mode while using user ASCE.
[682695.568977] AS:00000000549a41c7 R3:000000006654c007 S:0000000000000020
[682695.568983] CPU: 0 PID: 6485 Comm: Crash Not tainted 4.8.15-300.fc25.s390x #1
[682695.568986] Hardware name: IBM 2827 H43 400 (z/VM)
[682695.568989] task: 00000000f81fb900 task.stack: 0000000004058000
[682695.568992] User PSW : 0705100180000000 0000000001000776
[682695.568995] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:1 PM:0 RI:0 EA:3
[682695.568995] User GPRS: 0000000000000000 0000000000000000 0000000000000001 000003ffd4cfe438
[682695.569003] 000003ffd4cfe448 0090305969303276 0000000001000800 000003ffd4cfe420
[682695.569007] 0000000001000668 0000000000000000 000002aa3e31b1f0 000003ffd4cfe168
[682695.569011] 000003ff91328f58 000002aa3e3251f0 000003ff90d22b9e 000003ffd4cfe168
[682695.572673] User Code: 0000000001000766: b90400bf lgr %r11,%r15
[682695.572673] 000000000100076a: e548b0a00000 mvghi 160(%r11),0
[682695.572673] #0000000001000770: e310b0a00004 lg %r1,160(%r11)
[682695.572673] >0000000001000776: e54c10000001 mvhi 0(%r1),1
[682695.572673] 000000000100077c: a7180000 lhi %r1,0
[682695.572673] 0000000001000780: b9140011 lgfr %r1,%r1
[682695.572673] 0000000001000784: b9040021 lgr %r2,%r1
[682695.572673] 0000000001000788: b3cd00b2 lgdr %r11,%f2
[682695.572686] Last Breaking-Event-Address:
[682695.572690] [<000003ff90d22b9c>] 0x3ff90d22b9c
[699521.918071] User process fault: interruption code 0004 ilc:3 in conftest[1000000+c5000]
[699521.918083] Failing address: 00000000010c6000 TEID: 00000000010c6404
[699521.918085] Fault in primary space mode while using user ASCE.
[699521.918089] AS:00000000a80d41c7 R3:00000000a462c007 S:000000008267e000 P:00000000918ff21d
[699521.918095] CPU: 2 PID: 42951 Comm: conftest Not tainted 4.8.15-300.fc25.s390x #1
[699521.918098] Hardware name: IBM 2827 H43 400 (z/VM)
[699521.918101] task: 00000000f4a41c80 task.stack: 0000000082ff0000
[699521.918104] User PSW : 0705000180000000 000000000100de62
[699521.918107] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:0 PM:0 RI:0 EA:3
[699521.918107] User GPRS: fffffffffffffff0 0000000000000000 000003ffde67c020 0000000000000001
[699521.918116] 000003ffde67c0d8 000000000100e590 000000000100e638 000003ffde67c0c0
[699521.918120] 000000000100dca8 000002aa3f932170 0000000000000000 000002aa3f9d0e10
[699521.918124] 000000000100e590 000002aa3f9d1010 000000000100dce6 000003ffde67beb0
[699521.918140] User Code: 000000000100de54: a71affff ahi %r1,-1
[699521.918140] 000000000100de58: 8810001f srl %r1,31
[699521.918140] #000000000100de5c: c41f0005d5a6 strl %r1,10c89a8
[699521.918140] >000000000100de62: c42b0005c7ff stgrl %r2,10c6e60
[699521.918140] 000000000100de68: e310f0a00004 lg %r1,160(%r15)
[699521.918140] 000000000100de6e: ec21000100d9 aghik %r2,%r1,1
[699521.918140] 000000000100de74: eb220003000d sllg %r2,%r2,3
[699521.918140] 000000000100de7a: e320f0a80008 ag %r2,168(%r15)
[699521.918152] Last Breaking-Event-Address:
[699521.918155] [<000000000100dce0>] 0x100dce0
[701836.544344] User process fault: interruption code 0004 ilc:3 in conftest[1000000+c5000]
[701836.544354] Failing address: 00000000010c6000 TEID: 00000000010c6404
[701836.544357] Fault in primary space mode while using user ASCE.
[701836.544360] AS:00000000ef6401c7 R3:00000000b52c0007 S:00000000a9721000 P:00000000ce7c021d
[701836.544367] CPU: 3 PID: 48640 Comm: conftest Not tainted 4.8.15-300.fc25.s390x #1
[701836.544370] Hardware name: IBM 2827 H43 400 (z/VM)
[701836.544374] task: 00000000f5b4b900 task.stack: 000000008287c000
[701836.544377] User PSW : 0705000180000000 000000000100de62
[701836.544380] R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:0 PM:0 RI:0 EA:3
[701836.544380] User GPRS: fffffffffffffff0 0000000000000000 000003ffeaf7bfa0 0000000000000001
[701836.544389] 000003ffeaf7c058 000000000100e590 000000000100e638 000003ffeaf7c040
[701836.544393] 000000000100dca8 000002aa48a418c0 0000000000000000 000002aa48a4b240
[701836.544397] 000000000100e590 000002aa48a52730 000000000100dce6 000003ffeaf7be30
[701836.544414] User Code: 000000000100de54: a71affff ahi %r1,-1
[701836.544414] 000000000100de58: 8810001f srl %r1,31
[701836.544414] #000000000100de5c: c41f0005d5a6 strl %r1,10c89a8
[701836.544414] >000000000100de62: c42b0005c7ff stgrl %r2,10c6e60
[701836.544414] 000000000100de68: e310f0a00004 lg %r1,160(%r15)
[701836.544414] 000000000100de6e: ec21000100d9 aghik %r2,%r1,1
[701836.544414] 000000000100de74: eb220003000d sllg %r2,%r2,3
[701836.544414] 000000000100de7a: e320f0a80008 ag %r2,168(%r15)
[701836.544427] Last Breaking-Event-Address:
[701836.544429] [<000000000100dce0>] 0x100dce0
[702856.049112] User process fault: interruption code 0004 ilc:3 in conftest[1000000+c5000]
[702856.049125] Failing address: 00000000010c6000 TEID: 00000000010c6404
[702856.049127] Fault in primary space mode while using user ASCE.
[702856.049131] AS:00000000801581c7 R3:00000000a7da4007 S:00000000802e9000 P:00000000a540621d
[702856.049138] CPU: 2 PID: 53342 Comm: conftest Not tainted 4.8.15-300.fc25.s390x #1
[702856.049141] Hardware name: IBM 2827 H43 400 (z/VM)
[702856.049144] task: 00000000f5b49c80 | |
<reponame>tinyg/ironpython2
import sys, itertools, unittest
from test import test_support
from StringIO import StringIO
import ast
import types
def to_tuple(t):
if t is None or isinstance(t, (basestring, int, long, complex)):
return t
elif isinstance(t, list):
return [to_tuple(e) for e in t]
result = [t.__class__.__name__]
if hasattr(t, 'lineno') and hasattr(t, 'col_offset'):
result.append((t.lineno, t.col_offset))
if t._fields is None:
return tuple(result)
for f in t._fields:
result.append(to_tuple(getattr(t, f)))
return tuple(result)
# These tests are compiled through "exec"
# There should be at least one test per statement
exec_tests = [
# FunctionDef
"def f(): pass",
"def f(a): pass",
"def f(a=1): pass",
"def f(*args, **kwargs): pass",
# ClassDef
"class C:pass",
# Return
"def f():return 1",
# Delete
"del v",
# Assign
"v = 1",
# AugAssign
"v += 1",
# Print
"print >>f, 1, ",
# For
"for v in v:pass",
# While
"while v:pass",
# If
"if v:pass",
# If elif else
"if v:pass\nelif u:pass\nelse: pass",
# Raise
"raise Exception, 'string'",
# TryExcept
"try:\n pass\nexcept Exception:\n pass",
# TryFinally
"try:\n pass\nfinally:\n pass",
# Assert
"assert v",
# Import
"import sys",
# ImportFrom
"from sys import v",
# Exec
"exec 'v'",
# Global
"global v",
# Expr
"1",
# Pass,
"pass",
# Break
# "break", doesn't work outside a loop
"while x: break",
# Continue
# "continue", doesn't work outside a loop
"while x: continue",
# for statements with naked tuples (see http://bugs.python.org/issue6704)
"for a,b in c: pass",
"[(a,b) for a,b in c]",
"((a,b) for a,b in c)",
# yield makes no sense outside function
"def f(): yield 1",
# CP35001
"def f(): yield",
# comment
"#"
]
# These are compiled through "single"
# because of overlap with "eval", it just tests what
# can't be tested with "eval"
single_tests = [
"1+2"
]
# These are compiled through "eval"
# It should test all expressions
eval_tests = [
# BoolOp
"a and b",
# BinOp
"a + b",
# UnaryOp
"not v",
# Lambda
"lambda:None",
"lambda x: x",
"lambda x: (yield x)",
# Dict
"{ 1:2 }",
# ListComp
"[a for b in c if d]",
# GeneratorExp
"(a for b in c for d in e for f in g)",
"(a for b in c if d)",
"(a for b in c for c in d)",
# Yield
"((yield i) for i in range(5))",
# Compare
"1 < 2 < 3",
# Call
"f(1,2,c=3,*d,**e)",
# Repr
"`v`",
# Num
"10L",
# Str
"'string'",
# Attribute
"a.b",
# Subscript
"a[b:c]",
# Name
"v",
# List
"[1,2,3]",
# Tuple
"1,2,3",
# Combination
"a.b.c.d(a.b[1:2])",
# ellipsis
"a[...]",
# index
"a[1]",
# set
"{a,b,c}",
# DictComp
"{k:v for k,v in li}",
# SetComp
"{e for e in li}",
]
# TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension
# excepthandler, arguments, keywords, alias
class AST_Tests(unittest.TestCase):
def _assertTrueorder(self, ast_node, parent_pos):
if not isinstance(ast_node, ast.AST) or ast_node._fields is None:
return
if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)):
node_pos = (ast_node.lineno, ast_node.col_offset)
self.assertTrue(node_pos >= parent_pos)
parent_pos = (ast_node.lineno, ast_node.col_offset)
for name in ast_node._fields:
value = getattr(ast_node, name)
if isinstance(value, list):
for child in value:
self._assertTrueorder(child, parent_pos)
elif value is not None:
self._assertTrueorder(value, parent_pos)
def test_compile_from_ast_001(self):
p = ast.parse("-1", mode="eval")
c = compile(p,"<unknown>", mode="eval" )
self.assertEqual( eval(c), -1)
def test_compile_from_ast_002(self):
p = ast.parse("+1", mode="eval")
c = compile(p,"<unknown>", mode="eval" )
self.assertEqual( eval(c), 1)
def test_compile_from_ast_003(self):
p = ast.parse("not True", mode="eval")
c = compile(p,"<unknown>", mode="eval" )
self.assertEqual( eval(c), False)
def test_compile_from_ast_004(self):
p = ast.parse("2+2", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 4 )
def test_compile_from_ast_005(self):
p = ast.parse("5-1", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 4 )
def test_compile_from_ast_006(self):
p = ast.parse("42%13", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 3 )
def test_compile_from_ast_007(self):
p = ast.parse("2**8", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 256 )
def test_compile_from_ast_008(self):
p = ast.parse("True or False", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), True )
def test_compile_from_ast_009(self):
p = ast.parse("True and False", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), False )
def test_compile_from_ast_010(self):
p = ast.parse("'a'", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), "a" )
def test_compile_from_ast_011(self):
p = ast.parse("42", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 42 )
def test_compile_from_ast_012(self):
p = ast.parse("None", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), None )
def test_compile_from_ast_013(self):
p = ast.parse("[1,2,3]", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), [1,2,3] )
def test_compile_from_ast_014(self):
p = ast.parse("{1,2,3}", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), {1,2,3} )
def test_compile_from_ast_015(self):
p = ast.parse("{1:'a', 2:'b'}", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), {1:'a',2:'b'} )
def test_compile_from_ast_016(self):
p = ast.parse("1,2", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), (1,2) )
def test_compile_from_ast_017(self):
p = ast.parse("dict()", mode="eval") # trivial call expression
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), {} )
# parenthesis ?
def test_compile_from_ast_018(self):
p = ast.parse("(1)", mode="eval") # failed attempt to generate parenthesis expression
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 1 )
def test_compile_from_ast_019(self):
p = ast.parse("[x for x in range(2)]", mode="eval") # list comprehension
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), [0,1] )
def test_compile_from_ast_020(self):
# list comprehension
p = ast.parse("[(x, y, z) for x in [1,2,3] if x!=2 for y in [3,1,4] for z in [7,8,9] if x != y]", mode="eval")
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), [(1, 3, 7), (1, 3, 8), (1, 3, 9), (1, 4, 7),
(1, 4, 8), (1, 4, 9), (3, 1, 7), (3, 1, 8),
(3, 1, 9), (3, 4, 7), (3, 4, 8), (3, 4, 9)] )
def test_compile_from_ast_021(self):
p = ast.parse("2>1", mode="eval") # Compare
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), True )
def test_compile_from_ast_022(self):
p = ast.parse("2>1<3==3", mode="eval") # All comparisons evaluate to True
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), True )
p = ast.parse("1>1<3==3", mode="eval") # False at first position
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), False )
p = ast.parse("2>1<0==0", mode="eval") # False at second position
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), False )
p = ast.parse("2>1<3==1", mode="eval") # False at third position
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), False )
def test_compile_from_ast_023(self):
p = ast.parse("{x for x in range(3) if x!=2 }", mode="eval") # set comprehension
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), {0,1} )
def test_compile_from_ast_024(self):
p = ast.parse("{ x : ord(x) for x in ['a','b'] }", mode="eval") # dict comprehension
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), {'a':97, 'b':98 } )
def test_compile_from_ast_025(self):
p = ast.parse("'a'.upper()", mode="eval") # attribute
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 'A' )
def test_compile_from_ast_026(self):
p = ast.parse("lambda x: x", mode="eval") # lambda
c = compile(p,"<unknown>", mode="eval")
f = eval(c)
self.assertEqual( f(42),42)
def test_compile_from_ast_027(self):
p = ast.parse("lambda x=42: x", mode="eval") # default argument
c = compile(p,"<unknown>", mode="eval")
f = eval(c)
self.assertEqual( f(),42)
def test_compile_from_ast_028(self):
p = ast.parse("1 if True else 2", mode="eval") # conditional expression
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 1 )
p = ast.parse("1 if False else 2", mode="eval") # conditional expression
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 2 )
def test_compile_from_ast_029(self):
p = ast.parse("(x for x in [1,2,3] if x!=1)", mode="eval") # generator
c = compile(p,"<unknown>", mode="eval")
g = eval(c)
self.assertEqual( g.next(), 2 )
self.assertEqual( g.next(), 3 )
self.assertRaises( StopIteration, g.next )
def test_compile_from_ast_030(self):
p = ast.parse("`101`", mode="eval") # repr
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), '101' )
def test_compile_from_ast_031(self):
p = ast.parse("range(13)[10]", mode="eval") # index
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), 10 )
def test_compile_from_ast_032(self):
p = ast.parse("range(42)[1:5]", mode="eval") # slice
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), range(1,5))
def test_compile_from_ast_033(self):
p = ast.parse("range(42)[1:5:2]", mode="eval") # extended? slice
c = compile(p,"<unknown>", mode="eval")
self.assertEqual( eval(c), [1,3])
def test_compile_from_ast_034(self):
# plain generator
page = [ "line1 aaaaa bbb cccccc ddddddddd", "line2 xxxxxxxx yyyyyyy zzzzzz", "line3 ssssss ttttttttttt uuuu" ]
p = ast.parse("(word for line in page for word in line.split())", mode="eval")
c = compile(p,"<unknown>", mode="eval")
g = eval(c)
self.assertEqual( g.next(), 'line1' )
self.assertEqual( g.next(), 'aaaaa' )
self.assertEqual( g.next(), 'bbb')
self.assertEqual( g.next(), 'cccccc' )
self.assertEqual( g.next(), 'ddddddddd' )
self.assertEqual( g.next(), 'line2' )
self.assertEqual( g.next(), 'xxxxxxxx' )
self.assertEqual( g.next(), 'yyyyyyy' )
self.assertEqual( g.next(), 'zzzzzz' )
self.assertEqual( g.next(), 'line3' )
self.assertEqual( g.next(), 'ssssss' )
self.assertEqual( g.next(), 'ttttttttttt' )
self.assertEqual( g.next(), 'uuuu' | |
<reponame>dsaharov/psychojs-server
import json
from logger import log
import os
import tempfile
from werkzeug.utils import secure_filename
from import_study import import_study
import datetime
import shutil
import subprocess
from contextlib import contextmanager
from dateutil.parser import parse as parse_datestr
import urllib
import random
EXPERIMENT_SESSION_TIMEOUT = datetime.timedelta(hours=1)
class ExperimentSession():
def __init__(self, run, token, data_path, session_args={}):
self.token = token
self.run = run
self.data_path = data_path
self.data = {}
self.start_time = datetime.datetime.now()
self.has_data = False
self.is_complete = False
self.session_args = session_args
def log(self, msg, **kwargs):
self.run.log(msg, token=self.token, **kwargs)
def accept_data(self, key, value, save_format):
if key == '_exp_info':
self.log('Got exp_info')
json_value = json.loads(value)
self.log('{} keys: {}'.format(
len(json_value), [x for x in json_value]
))
self.session_args.update(json_value)
else:
self.log('Incoming data, format {}'.format(save_format))
if value is None or len(value) < 1 or value == '\n':
self.log('Data is empty, ignoring.')
else:
self.data[key] = value
self.has_data = True
def save_data(self):
for key in self.data:
if any([s in key for s in ['..', '/', '~']]):
self.log('Skipping {}'.format(key))
continue
target_file = os.path.join(
self.data_path,
key
)
with open(target_file, 'w') as f:
f.write(self.data[key])
self.log('Wrote {}'.format(key))
def set_completed(self, completed):
self.is_complete = completed
def close(self, bulk=False):
self.log('Closing session ({})'.format(
'complete' if self.is_complete else 'incomplete'
))
if self.has_data and \
(self.is_complete or self.run.save_incomplete_data):
self.log('Saving data...')
self.save_data()
self.run.on_session_closed(self, bulk)
def fill_url_params(self, url):
#TODO: super hacky
try:
if '{SECRET_URL}' in url:
url = url.replace(
'{SECRET_URL}', self.run.experiment.secret_url)
if '{' in url and '}' in url:
url = url.format(**self.session_args)
except:
self.log('Error parsing url {}'.format(url))
return url
def get_redirect_url_override(self):
if self.is_complete:
return self.fill_url_params(self.run.completion_url)
else:
return self.fill_url_params(self.run.cancel_url)
class ExperimentRun():
# A run is one deployment of an experiment
# It is expected to yield N sessions of data
# There might be no limit to the number of sessions
# Each run should store its data seperately
# The run might be closed at any time by the researcher
def __init__(self,
exp,
id,
data_path,
size=None,
access_type='invite-only',
completion_url=None,
cancel_url=None,
save_incomplete_data=True,
briefing_url=None,
debriefing_url=None,
session_args={}):
self.experiment = exp
self.id = id
self.data_path = data_path
# Sessions
self.next_session_token = 0
self.sessions = {}
# Access
self.size = size
self.num_sessions = 0
self.access_type = access_type
# Redirect URLs
self.completion_url = completion_url
self.cancel_url = cancel_url
# Data settings
self.save_incomplete_data = save_incomplete_data
# Briefing
self.briefing_url = briefing_url
self.debriefing_url = debriefing_url
# Session URL params
self.session_args = session_args
self.arg_counts = {}
def to_dict(self):
obj = {
'id' : self.id,
'data_path': self.data_path,
'size': self.size,
'num_sessions': self.num_sessions,
'access_type': self.access_type,
'completion_url': self.completion_url,
'cancel_url': self.cancel_url,
'save_incomplete_data': self.save_incomplete_data,
'briefing_url': self.briefing_url,
'debriefing_url': self.debriefing_url,
'session_args': json.dumps(self.session_args),
'arg_counts': json.dumps(self.arg_counts)
}
return obj
def from_dict(exp, obj):
run = ExperimentRun(
exp,
obj['id'],
obj['data_path'],
obj.get('size'),
obj.get('access_type', 'invite-only'),
obj.get('completion_url'),
obj.get('cancel_url'),
obj.get('save_incomplete_data', True),
obj.get('briefing_url', None),
obj.get('debriefing_url', None),
json.loads(obj.get('session_args', '{}'))
)
run.arg_counts = json.loads(obj.get('arg_counts', '{}'))
run.num_sessions = obj['num_sessions']
return run
def log(self, msg, **kwargs):
self.experiment.log(msg, run=self.id, **kwargs)
def get_remaining_sessions(self):
if self.size is None:
return None
return self.size - self.num_sessions
def get_next_session_token(self):
self.next_session_token += 1
return str(self.next_session_token)
def finish_run(self):
self.log('Run complete')
self.experiment.on_run_finished(self)
def on_session_closed(self, session, bulk=False):
if session.has_data and (
session.is_complete or self.save_incomplete_data):
self.num_sessions += 1
else:
# This submission does not count, undo its session args contribution
for key in session.session_args:
val = session.session_args[key]
self.decrement_arg_count(key, val)
if self.arg_counts[key][val] == 0:
del self.arg_counts[key][val]
token = session.token
del self.sessions[token]
self.experiment.on_session_closed(session, bulk=bulk)
if self.size is not None and self.get_remaining_sessions() == 0:
self.finish_run()
elif not bulk: #TODO: HACK to prevent extra file writes
self.experiment.save_metadata()
def has_session(self, token):
return token in self.sessions
def get_session(self, token):
return self.sessions[token]
def close_all_sessions(self):
for session in list(self.sessions.values()):
#TODO: HACK to prevent extra file writes
session.close(bulk=True)
def timeout_old_sessions(self):
now = datetime.datetime.now()
expired_sessions = [
t for t in self.sessions
if now - self.sessions[t].start_time
> EXPERIMENT_SESSION_TIMEOUT
]
for token in expired_sessions:
self.log('Closing expired session.', token=token)
self.sessions[token].close()
def get_arg_count(self, key, value):
if key not in self.arg_counts:
self.arg_counts[key] = {}
if value not in self.arg_counts[key]:
self.arg_counts[key][value] = 0
return self.arg_counts[key][value]
def set_arg_count(self, key, value, new_count):
self.arg_counts[key][value] = new_count
def increment_arg_count(self, key, value):
self.set_arg_count(key, value, self.get_arg_count(key, value) + 1)
def decrement_arg_count(self, key, value):
self.set_arg_count(key, value, self.get_arg_count(key, value) - 1)
def get_key_counts(self, key):
return [
(v, self.arg_counts[key][v]) for v in self.arg_counts.get(key, {})
]
def parse_arg_string(self, key, value, params):
if value is None:
return None
if type(value) is str and value.endswith(')'):
if value.startswith('URL('):
url_key = value[len('URL('):-1]
if url_key in params:
return params[url_key]
return None
elif value.startswith('uniform('):
value_list = [x.strip() for x in
value[len('uniform('):-1].split(',')]
lowest_count = None
least_common_values = None
for v in value_list:
count = self.get_arg_count(key, v)
if lowest_count is None or count < lowest_count:
lowest_count = count
least_common_values = [v]
elif count == lowest_count:
least_common_values.append(v)
return random.choice(least_common_values)
return value
def open_session(self, params):
token = self.get_next_session_token()
# Resolve session arguments from given params
session_args = {}
for key in self.session_args:
value = self.parse_arg_string(key, self.session_args[key], params)
if value is not None:
session_args[key] = value
self.increment_arg_count(key, value)
self.log('Opening session', token=token, **session_args)
experiment_session = ExperimentSession(
self,
token,
self.data_path,
session_args
)
self.sessions[token] = experiment_session
return experiment_session
def cancel(self):
self.close_all_sessions()
self.finish_run()
class PsychoJsExperiment():
def __init__(self, server, id, data_path):
self.server = server
self.id = id
self.config = {}
# Filesystem location
self.data_path = data_path
# Current run
self.next_run_id = 1
self.run = None
# Permissions
self.admins = set()
self.secret_url = None
# Group
self.group = None
def log(self, msg, **kwargs):
self.server.log(msg, study=self.id, **kwargs)
def load_config(self):
self.config = {
'experiment': {
'name': self.id,
'fullpath': ';;;;' #TODO: unused
},
'psychoJsManager': { 'URL': './server/' },
'saveFormat': 'CSV' #TODO: unused
}
def is_active(self):
return self.run is not None
def get_access_type(self):
if not self.is_active():
raise ValueError()
return self.run.access_type
def has_session(self, token):
if not self.is_active():
return False
return self.run.has_session(token)
def get_session(self, token):
if not self.is_active():
raise ValueError()
return self.run.get_session(token)
def timeout_old_sessions(self):
if not self.is_active():
raise ValueError()
self.run.timeout_old_sessions()
def get_next_run_id(self):
id = self.next_run_id
self.next_run_id += 1
return id
def start_run(self, **kwargs):
if self.is_active():
raise ValueError()
while True:
id = self.get_next_run_id()
run_data_path = os.path.join(
self.data_path, 'run_{}'.format(id)
)
#TODO: HACK
if not os.path.exists(run_data_path):
break
self.log('Starting run {}'.format(id))
self.run = ExperimentRun(
self,
id,
run_data_path,
**kwargs
)
os.makedirs(run_data_path)
self.save_metadata()
def cancel_run(self):
if not self.is_active():
raise ValueError()
self.run.cancel()
def open_session(self, params):
if not self.is_active():
raise ValueError()
return self.run.open_session(params)
def on_session_closed(self, session, bulk=False):
self.server.on_session_closed(self, session, bulk=bulk)
def get_remaining_sessions(self):
if not self.is_active():
return 0
return self.run.get_remaining_sessions()
def get_total_sessions(self):
if not self.is_active():
return 0
return self.run.size
def get_num_sessions(self):
if not self.is_active():
return 0
return self.run.num_sessions
def has_session_limit(self):
return self.get_remaining_sessions() is not None
def on_run_finished(self, run):
self.run = None
if self.has_secret_url():
self.server.remove_secret_url_code(self.secret_url)
self.save_metadata()
def is_editable_by(self, user):
return user in self.admins
def add_admin(self, user):
self.admins.add(user)
def remove_admin(self, user):
self.admins.remove(user)
def meta_to_json_str(self):
meta = {}
meta['admins'] = list(self.admins)
meta['next_run_id'] = self.next_run_id
meta['group'] = self.group
if self.is_active():
meta['run'] = self.run.to_dict()
return json.dumps(meta)
def load_meta_json_str(self, json_str):
meta = json.loads(json_str)
self.admins = set(meta.get('admins', []))
self.next_run_id = meta.get('next_run_id', 1)
self.group = meta.get('group', None)
if 'run' in meta:
self.run = ExperimentRun.from_dict(self, meta['run'])
def save_metadata(self):
self.log('Saving metadata')
self.server.save_study_metadata(self.id)
def has_secret_url(self):
return self.secret_url is not None
def remove_secret_url(self):
self.secret_url = None
def set_secret_url(self, code):
if self.has_secret_url():
self.server.remove_secret_url_code(self.secret_url)
self.log('New secret url',code=code)
self.secret_url = code
def get_secret_url(self):
return self.secret_url
def has_completion_url(self):
return self.run.completion_url is not None
def get_completion_url(self):
return self.run.completion_url
def has_cancel_url(self):
return self.run.cancel_url is not None
def get_cancel_url(self):
return self.run.cancel_url
def has_briefing_url(self):
return self.run.briefing_url is not None
def get_briefing_url(self):
return self.run.briefing_url
def has_debriefing_url(self):
return self.run.debriefing_url is not None
def get_debriefing_url(self):
return self.run.debriefing_url
def has_session_args(self):
return len(self.run.session_args.keys()) > 0
def get_session_args(self):
return self.run.session_args
def get_key_value_counts(self, key):
return self.run.get_key_counts(key)
class ExperimentServer():
def __init__(self, data_path, study_path, code_generator_fn):
self.experiments = {}
self.data_path = data_path
self.study_path = study_path
# Participant codes
self.participant_codes = {}
# Code generation
self.code_generator_fn = code_generator_fn
# HTTP Session to Experiment Session map
self.user_session_map = {}
self.session_user_map = {}
def log(self, msg, **kwargs):
log(msg, **kwargs)
def get_metafile_path(self, study):
return os.path.join(
self.study_path,
study,
'meta.json'
)
def save_study_metadata(self, study):
with open(self.get_metafile_path(study), 'w') as md:
md.write(self.experiments[study].meta_to_json_str())
def load_study_metadata(self, study):
with open(self.get_metafile_path(study)) as md:
self.experiments[study].load_meta_json_str(
md.read()
)
def load_server_metadata(self, meta_json):
if 'participant_codes' in meta_json:
for props in meta_json['participant_codes']:
code = props['code']
study = props['study']
kwargs = {}
if props.get('is_secret_url', False):
self.get_experiment(study).set_secret_url(code)
kwargs['is_secret_url'] = True
else:
kwargs['timeout'] = parse_datestr(props['timeout'])
kwargs['unique_session'] = props['unique_session']
kwargs['session_limit'] = props['session_limit']
kwargs['session_count'] = props.get('session_count', 0)
self.add_participant_code(study, code=code, **kwargs)
def save_server_metadata(self):
metadata = {}
metadata['participant_codes'] = []
for | |
self.lineEdit_predSample = QtWidgets.QLineEdit(self)
self.button_importKeys = QtWidgets.QPushButton('Import Keys', self)
self.button_predBP = QtWidgets.QPushButton('Pred-NMAG', self)
self.button_bpMatSaveDir = QtWidgets.QPushButton('NMAG-SaveDir', self)
self.lineEdit_bpMatSaveDir = QtWidgets.QLineEdit(self)
self.button_startBP = QtWidgets.QPushButton('Save-NMAG', self)
# 植被指数补充结果
self.button_viSaveDir = QtWidgets.QPushButton('VI-SaveDir', self)
self.lineEdit_viSaveDir = QtWidgets.QLineEdit(self)
self.button_viCal = QtWidgets.QPushButton('VI-Cal', self)
# RGB 可视化结果
self.groupbox_mat2rgb = QtWidgets.QGroupBox('mat-RGB-Combination', self)
self.button_rMatInput = QtWidgets.QPushButton('R-Mat', self)
self.button_gMatInput = QtWidgets.QPushButton('G-Mat', self)
self.button_bMatInput = QtWidgets.QPushButton('B-Mat', self)
self.lineEdit_rMatDir = QtWidgets.QLineEdit(self)
self.lineEdit_gMatDir = QtWidgets.QLineEdit(self)
self.lineEdit_bMatDir = QtWidgets.QLineEdit(self)
self.button_matMode = QtWidgets.QPushButton('mat-Mode', self) # 合成看是校正前还是校正后
self.cmobox_matMode = QtWidgets.QComboBox(self)
self.button_rgbSaveDir = QtWidgets.QPushButton('RGB-SaveDir', self)
self.lineEdit_rgbSaveDir = QtWidgets.QLineEdit(self)
self.button_startConvert = QtWidgets.QPushButton('Start-Convert', self)
# 初始化
self.lineEdit_inputRefMatDir.setText('./mat/B5测试样/B5S1000.mat')
self.lineEdit_inputPIFsMatDir.setText('./mat/B5测试样/PIFs-slopes-改.mat')
self.cmobox_rrnMethod.addItems(['NMAG'])
self.cmobox_initValue.addItems(['init', 'maxStds', 'maxMeans'])
self.cmobox_outydValue.addItems(['mean', 'minSSE'])
#
self.cmobox_contrastMethod.addItems(['NMAG'])
self.lineEdit_trainSample.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_testSample.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_predSample.setAlignment(QtCore.Qt.AlignCenter)
self.cmobox_matMode.addItems(['校正前(ref_Values)', '校正后(BP_CorrValues)'])
#
self.table_trainModelKey.setRowCount(4)
self.table_trainModelKey.setColumnCount(1)
self.table_trainModelKey.setHorizontalHeaderLabels(['Key'])
self.table_trainModelKey.setVerticalHeaderLabels(['BpInNode', 'HideLyNum', 'BpHideNode', 'BpOutNode'])
self.table_trainModelKey.setItem(0, 0, QtWidgets.QTableWidgetItem(str('1')))
self.table_trainModelKey.setItem(1, 0, QtWidgets.QTableWidgetItem(str('1')))
self.table_trainModelKey.setItem(2, 0, QtWidgets.QTableWidgetItem(str('10')))
self.table_trainModelKey.setItem(3, 0, QtWidgets.QTableWidgetItem(str('1')))
self.table_trainModelKey.setDisabled(True)
#
self.button_initValue.setDisabled(True)
self.cmobox_initValue.setDisabled(True)
self.button_outydValue.setDisabled(True)
self.cmobox_outydValue.setDisabled(True)
#
self.button_trainBP.setDisabled(True)
self.button_keySave.setDisabled(True)
self.button_imgProcessMap.setDisabled(True)
self.button_testBp.setDisabled(True)
self.button_predBP.setDisabled(True)
self.button_matMode.setDisabled(True)
self.button_startConvert.setDisabled(True)
self.button_viCal.setDisabled(True)
#
self.button_trainBP.setStyleSheet("background-color: blue")
self.button_testBp.setStyleSheet("background-color: blue")
self.button_predBP.setStyleSheet("background-color: blue")
self.button_startConvert.setStyleSheet("background-color: blue")
self.button_viCal.setStyleSheet("background-color: blue")
def layoutUI(self):
#
grid = QtWidgets.QGridLayout(self)
grid.addWidget(self.groupbox_inputMat, 0, 0, 2, 4)
grid.addWidget(self.groupbox_MethodChoose, 2, 0, 2, 4)
grid.addWidget(self.groupbox_Train, 4, 0, 5, 4)
grid.addWidget(self.groupbox_Test, 9, 0, 5, 4)
grid.addWidget(self.groupbox_Pred, 14, 0, 4, 4)
grid.addWidget(self.groupbox_mat2rgb, 0, 6, 6, 4)
grid.addWidget(self.view, 6, 6, 12 ,4)
self.view.setFixedWidth(500)
self.view.setFixedHeight(400)
self.groupbox_Test.setFixedWidth(500)
#
grid_inputMat = QtWidgets.QGridLayout(self.groupbox_inputMat)
grid_rrnChoose = QtWidgets.QGridLayout(self.groupbox_MethodChoose)
grid_train = QtWidgets.QGridLayout(self.groupbox_Train)
grid_test = QtWidgets.QGridLayout(self.groupbox_Test)
grid_pred = QtWidgets.QGridLayout(self.groupbox_Pred)
grid_mat2rgb = QtWidgets.QGridLayout(self.groupbox_mat2rgb)
#
grid_inputMat.addWidget(self.button_inputRefMatDir, 0, 0, 1, 1) # 输入文本框模式
grid_inputMat.addWidget(self.lineEdit_inputRefMatDir, 0, 1, 1, 5)
grid_inputMat.addWidget(self.button_inputPIFsMatDir, 1, 0, 1, 1)
grid_inputMat.addWidget(self.lineEdit_inputPIFsMatDir, 1, 1, 1, 5)
#
grid_rrnChoose.addWidget(self.button_rrnMethod, 0, 0, 1, 1) # 方法选择模块
grid_rrnChoose.addWidget(self.cmobox_rrnMethod, 0, 1, 1, 3)
grid_rrnChoose.addWidget(self.button_initValue, 1, 0, 1, 1)
grid_rrnChoose.addWidget(self.cmobox_initValue, 1, 1, 1, 1)
grid_rrnChoose.addWidget(self.button_outydValue, 1, 2, 1, 1)
grid_rrnChoose.addWidget(self.cmobox_outydValue, 1, 3, 1, 1)
#
grid_train.addWidget(self.button_trainSample, 0, 0, 1, 1) # BP训练模块
grid_train.addWidget(self.lineEdit_trainSample, 0, 1, 1, 1)
grid_train.addWidget(self.button_trainBP, 1, 0, 1, 1)
grid_train.addWidget(self.button_keySave, 1, 1, 1, 1)
grid_train.addWidget(self.lineEdit_KeySaveDir, 2, 0, 1, 2)
grid_train.addWidget(self.button_imgProcessMap, 3, 0, 1, 1)
grid_train.addWidget(self.lineEdit_imgProcess, 3, 1, 1, 1)
grid_train.addWidget(self.lineEdit_corrOrder, 4, 0, 1, 4)
grid_train.addWidget(self.table_trainModelKey, 0, 2, 4, 2)
#
grid_pred.addWidget(self.button_predSample, 0, 0, 1, 1) # BP预测模块
grid_pred.addWidget(self.lineEdit_predSample, 0, 1, 1, 1)
grid_pred.addWidget(self.button_importKeys, 0, 2, 1, 1)
grid_pred.addWidget(self.button_predBP, 0, 3, 1, 1)
grid_pred.addWidget(self.button_bpMatSaveDir, 1, 0, 1, 1)
grid_pred.addWidget(self.lineEdit_bpMatSaveDir, 1, 1, 1, 3)
grid_pred.addWidget(self.button_startBP, 2, 3, 1, 1)
#
grid_test.addWidget(self.button_contrastMethod, 0, 0, 1, 1) # BP测试模块
grid_test.addWidget(self.cmobox_contrastMethod, 0, 1, 1, 3)
grid_test.addWidget(self.button_testSample, 1, 0, 1, 1)
grid_test.addWidget(self.lineEdit_testSample, 1, 1, 1, 1)
grid_test.addWidget(self.button_testImportKeys, 1, 2, 1, 1)
grid_test.addWidget(self.button_testBp, 1, 3, 1, 1)
grid_test.addWidget(self.button_rmseCal, 2, 0, 1, 1)
grid_test.addWidget(self.lineEdit_rmseCal, 2, 1, 1, 3)
#
grid_mat2rgb.addWidget(self.button_matMode, 0, 0, 1, 1)
grid_mat2rgb.addWidget(self.cmobox_matMode, 0, 1, 1, 1)
grid_mat2rgb.addWidget(self.button_rMatInput, 1, 0, 1, 1)
grid_mat2rgb.addWidget(self.button_gMatInput, 2, 0, 1, 1)
grid_mat2rgb.addWidget(self.button_bMatInput, 3, 0, 1, 1)
grid_mat2rgb.addWidget(self.lineEdit_rMatDir, 1, 1, 1, 3)
grid_mat2rgb.addWidget(self.lineEdit_gMatDir, 2, 1, 1, 3)
grid_mat2rgb.addWidget(self.lineEdit_bMatDir, 3, 1, 1, 3)
grid_mat2rgb.addWidget(self.button_rgbSaveDir, 4, 0, 1, 1)
grid_mat2rgb.addWidget(self.lineEdit_rgbSaveDir, 4, 1, 1, 2)
grid_mat2rgb.addWidget(self.button_startConvert, 4, 3, 1, 1)
grid_mat2rgb.addWidget(self.button_viSaveDir, 5, 0, 1, 1)
grid_mat2rgb.addWidget(self.lineEdit_viSaveDir, 5, 1, 1, 2)
grid_mat2rgb.addWidget(self.button_viCal, 5, 3, 1, 1)
pass
def single_slot(self):
self.button_inputRefMatDir.clicked.connect(self.slot_buttonInputRefDir)
self.button_inputPIFsMatDir.clicked.connect(self.slot_buttonInputPIFsDir)
#
self.button_trainSample.clicked.connect(self.slot_buttonTrainSample) # 输入训练样本
self.button_trainBP.clicked.connect(self.slot_buttonTrainBp) # 模型训练
self.button_keySave.clicked.connect(self.slot_buttonKeySave) # 保存权重和阈值
self.button_imgProcessMap.clicked.connect(self.slot_buttonImgProcessMap) # 对拟合过程进行监测
#
self.button_testSample.clicked.connect(self.slot_buttonTestSample) # 输入测试样本
self.button_testImportKeys.clicked.connect(self.slot_buttonImportKeys) # 输入参数Bp
self.button_testBp.clicked.connect(self.slot_buttonTestBp) # 进行测试BP样本集的输出
self.button_rmseCal.clicked.connect(self.slot_buttonRMSECal) # RMSE计算
#
self.button_predSample.clicked.connect(self.slot_buttonPredSample) # 输入预测样本
self.button_importKeys.clicked.connect(self.slot_buttonImportKeys) # 输入参数【同test】
self.button_predBP.clicked.connect(self.slot_buttonPredBp) # 进行预测BP样本集的输出
self.button_startBP.clicked.connect(self.slot_buttonBpStartDir) # 保存结果
self.button_bpMatSaveDir.clicked.connect(self.slot_buttonBpMatSaveDir)
#
self.button_viSaveDir.clicked.connect(self.slot_buttonVIsaveDir) # 输入保存NDVI的路径
self.button_viCal.clicked.connect(self.slot_buttonVICal) # 保存NDVI的数值
#
self.button_rMatInput.clicked.connect(self.slot_buttonRMatInput) # 输入各个波段的Mat数据
self.button_gMatInput.clicked.connect(self.slot_buttonGMatInput)
self.button_bMatInput.clicked.connect(self.slot_buttonBMatInput)
self.button_rgbSaveDir.clicked.connect(self.slot_buttonRgbSaveDir) # 输入保存RGB的路径
self.button_startConvert.clicked.connect(self.slot_buttonStartConvert) # 开始进行转换
pass
def slot_buttonInputRefDir(self):
#
# 输入裁剪后的时间序列反射率数据
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.lineEdit_inputRefMatDir.setText(open_filename)
pass
def slot_buttonInputPIFsDir(self):
#
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.lineEdit_inputPIFsMatDir.setText(open_filename)
def slot_buttonLsmCorredSaveDir(self):
#
corredSaveDir = QtWidgets.QFileDialog.getSaveFileName(self, '校正后波段反射率数据', './mat/', '*.mat')[0]
self.lineEdit_lsmCorredSaveDir.setText(corredSaveDir)
'''
NMAG 槽函数
'''
def slot_buttonTrainSample(self):
self.pifsValues = scipy.io.loadmat(self.lineEdit_inputPIFsMatDir.text())['PIFs_Refvalues'] # [num,23]
print(np.shape(np.array(self.pifsValues)))
self.lineEdit_trainSample.setText(str(np.shape(np.array(self.pifsValues))[0]))
self.button_trainBP.setDisabled(False)
self.button_keySave.setDisabled(False)
self.table_trainModelKey.setDisabled(False)
def slot_buttonTrainBp(self):
#
# 变量初始化
bpInNode = int(self.table_trainModelKey.item(0, 0).text())
bpHideNode = int(self.table_trainModelKey.item(2, 0).text())
bpOutNode = int(self.table_trainModelKey.item(3, 0).text())
#
self.ih_w = []
self.ho_w = []
self.hide_b0 = []
self.out_b0 = []
#
# BP简单
# self.lossFig = myFigure()
# BP + 贪婪算法 (NMAG)
if self.cmobox_rrnMethod.currentIndex() == 0:
#
# 初始化
m_Outsample = []
self.corrOrder = [] # 映射
self.pifsCorrValues = []
#
array_ih_w = np.zeros([len(self.doys), bpInNode, bpHideNode])
array_ho_w = np.zeros([len(self.doys), bpHideNode, bpOutNode])
array_hide_b0 = np.zeros([len(self.doys), bpHideNode])
array_out_b0 = np.zeros([len(self.doys), bpOutNode])
#
# 选定初始影像
# 1.添加初始时间20171023
self.corrOrder.append(0)
#
m_Outsample.append(np.array(self.pifsValues)[:, self.corrOrder[0]]) # 起始参考值
#
print('First Map is : %s' % self.doys[int(self.corrOrder[0])])
print(np.array(m_Outsample)[0, :])
#
for i in range(1, len(self.doys)):
#
# 获取最小期望值的index
rmse_sum = np.zeros([len(self.doys)])
array_Outsample = np.array(m_Outsample) # 预测值集合的数组
for j in range(len(self.doys)):
if j in self.corrOrder:
rmse_sum[j] = 99999
else:
#
sum = 0.0
for p in range(np.shape(array_Outsample)[0]):
z0 = np.array(self.pifsValues)[:, j] - array_Outsample[p, :] # xi - f(xj)
sum += np.sum(z0 * z0) # sum((xi-f(xj))*(xi-(f(xj)))
rmse_sum[j] = sum
#
index = np.argsort(rmse_sum)[0]
print('\nNow input %s data' % self.doys[index])
self.corrOrder.append(index)
#
# 输入层
m_Insample = np.array(self.pifsValues)[:, index] # 待校正-输入值
print('Insam:', m_Insample)
#
train_bp = ag.bpNet(bpInNode, bpHideNode, bpOutNode, imgNum=i, Greedy=True) ## 直接构造损失函数,进行梯度下降
train_bp.bpInitNetFunc()
#
times = 0
err = []
err_time = []
while train_bp.totalErr > 0.0001 and times < 1000:
times += 1
train_bp.bpNetTrainFunc(m_Insample, m_Outsample, imgNum=i, Greedy=True)
if (times + 1) % 10 == 0:
print('Doys %s BP %5d DT:%10.5f\n' % (self.doys[index], (times + 1), train_bp.totalErr))
err.append(train_bp.totalErr)
err_time.append(times + 1)
#
# 绘制损失函数曲线
plt.plot(err_time, err)
#
# 储存误差矩阵
scipy.io.savemat(f'./mat/{self.doys[index]}_HideNode_{bpHideNode}.mat',{'err_time':err_time,'error':err})
#
# 加入计算结果
#
corrValue = []
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
corrValue.extend(train_bp.out_y0.tolist())
#
print('out_y0:', np.array(corrValue))
m_Outsample.append(corrValue) # 添加预测值作为参考值,保证局部最优
#
array_ih_w[index, :, :] = train_bp.ih_w # 期望结果:[21,1,10]
array_ho_w[index, :, :] = train_bp.ho_w # [21,10,1]
array_hide_b0[index, :] = train_bp.hide_b0 # [21,10]
array_out_b0[index, :] = train_bp.out_b0 # [21,1]
#
# 保存变量
self.ih_w = array_ih_w.tolist()
self.ho_w = array_ho_w.tolist()
self.hide_b0 = array_hide_b0.tolist()
self.out_b0 = array_out_b0.tolist()
self.pifsCorrValues = m_Outsample
print(np.shape(np.array(m_Outsample)))
print(self.corrOrder)
pass
#
# 显示图像
xlabel = [10, 50, 100, 300, 500, 1000]
plt.xticks(xlabel, fontsize=5,rotation=45)
ylabel = [0, 1, 2, 3,4,5]
plt.yticks(ylabel, fontsize=5)
self.lossFig.ax.set_title('loss')
self.scence = QtWidgets.QGraphicsScene()
self.scence.addWidget(self.lossFig)
self.view.setScene(self.scence)
def slot_buttonKeySave(self):
#
# 保存模型参数
#
saveDir = QtWidgets.QFileDialog.getSaveFileName(self, 'BPNet', './mat/', '*.mat')[0]
self.lineEdit_KeySaveDir.setText(saveDir)
#
if saveDir:
if self.cmobox_rrnMethod.currentIndex() == 1: # 针对两景遥感影像而言
scipy.io.savemat(saveDir,
{'ih_w': self.ih_w, 'ho_w': self.ho_w,
'hide_b0': self.hide_b0, 'out_b0': self.out_b0})
if self.cmobox_rrnMethod.currentIndex() == 3 or self.cmobox_rrnMethod.currentIndex() == 4 or self.cmobox_rrnMethod.currentIndex() == 5:
scipy.io.savemat(saveDir,
{'ih_w': self.ih_w, 'ho_w': self.ho_w,
'hide_b0': self.hide_b0, 'out_b0': self.out_b0, 'corrOrder': self.corrOrder,
'pifsCorrValues': self.pifsCorrValues})
else:
print('Wrong Input!')
def slot_buttonImgProcessMap(self): # 过程监视
#
bpInNode = int(self.table_trainModelKey.item(0, 0).text())
bpHideNode = int(self.table_trainModelKey.item(2, 0).text())
bpOutNode = int(self.table_trainModelKey.item(3, 0).text())
#
# 获取inSam
index = int(self.lineEdit_imgProcess.text()) # 横坐标Xi
inSam = np.array(self.pifsValues)[:, index]
#
# 获取out_y0
train_bp = ag.bpNet(bpInNode, bpHideNode, bpOutNode)
train_bp.bpInitNetFunc()
#
if self.cmobox_rrnMethod.currentIndex() == 3:
tt = index
#
out_yd = np.array(self.pifsValues)[:, 0] # 真值为初始值
outSam = []
train_bp.ih_w = np.array(self.ih_w[tt]).astype(float) # 定参数
train_bp.ho_w = np.array(self.ho_w[tt]).astype(float)
train_bp.out_b0 = np.array(self.out_b0[tt]).astype(float)
train_bp.hide_b0 = np.array(self.hide_b0[tt]).astype(float)
m_Insample = np.array(self.pifsValues)[:, index]
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
outSam.extend(train_bp.out_y0.tolist())
#
print('DOY:', self.doys[index])
print('Insam:', np.array(inSam))
print('out_yd:', np.array(out_yd))
print('out_y0:', np.array(outSam))
if self.cmobox_rrnMethod.currentIndex() == 5:
if self.corrOrder:
corrOrder = self.corrOrder
else:
corrOrder = [0, 1, 6, 17, 20, 7, 16, 2, 15, 5, 3, 4, 19, 18, 14, 9, 13, 8, 11, 12, 10]
self.lineEdit_corrOrder.setText(str(corrOrder))
#
m_Outsample = []
m_Outsample.append(np.array(self.pifsValues)[:, 0])
for i in range(1, len(corrOrder)):
#
# 获取out_yd:
if index == corrOrder[i]:
out_yd = np.mean(m_Outsample, axis=0) # 真值为均值
tt = corrOrder[i]
outSam = []
train_bp.ih_w = np.array(self.ih_w[tt]).astype(float) # 定参数
train_bp.ho_w = np.array(self.ho_w[tt]).astype(float)
train_bp.out_b0 = np.array(self.out_b0[tt]).astype(float)
train_bp.hide_b0 = np.array(self.hide_b0[tt]).astype(float)
m_Insample = np.array(self.pifsValues)[:, tt]
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
outSam.extend(train_bp.out_y0.tolist()) # 此处必须要加tolist,不然结果会随时变化
##
print('DOY:', self.doys[index])
print('Insam:', np.array(inSam))
print('out_yd:', np.array(out_yd))
print('out_y0:', np.array(outSam))
#
# lsm实际测试
p0 = [1, 20]
keys = leastsq(self.error, p0, args=(m_Insample, out_yd))[0]
#
outSam_lsm = np.round(m_Insample * keys[0] + keys[1], 5)
#
erro = np.sum((outSam_lsm - out_yd) * (outSam_lsm - out_yd)) / 2.0
print(erro)
break
#
tt = corrOrder[i]
corrValue = []
train_bp.ih_w = np.array(self.ih_w[tt]).astype(float) # 定参数
train_bp.ho_w = np.array(self.ho_w[tt]).astype(float)
train_bp.out_b0 = np.array(self.out_b0[tt]).astype(float)
train_bp.hide_b0 = np.array(self.hide_b0[tt]).astype(float)
m_Insample = np.array(self.pifsValues)[:, tt]
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
corrValue.extend(train_bp.out_y0.tolist()) # 此处必须要加tolist,不然结果会随时变化
#
m_Outsample.append(corrValue) # 添加预测值作为参考值,保证局部最优
#
# 画图
#
self.progressFig = myFigure()
m = []
m.append(inSam)
m.append(out_yd)
m.append(outSam)
if self.cmobox_rrnMethod.currentIndex() == 5:
m.append(outSam_lsm)
index222 = np.lexsort([np.array(m)[0, :]])
m = np.array(m)[:, index222]
plt.scatter(m[0, :], m[1, :], s=1, c='b')
plt.plot(m[0, :], m[2, :], c='r')
if self.cmobox_rrnMethod.currentIndex() == 5:
plt.plot(m[0, :], m[3, | |
ID (required)
:param DeviceDataSourceInstance body: (required)
:return: DeviceDataSourceInstance
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_device_datasource_instance_with_http_info(device_id, hds_id, body, **kwargs) # noqa: E501
else:
(data) = self.add_device_datasource_instance_with_http_info(device_id, hds_id, body, **kwargs) # noqa: E501
return data
def add_device_datasource_instance_with_http_info(self, device_id, hds_id, body, **kwargs): # noqa: E501
"""add device instance # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_datasource_instance_with_http_info(device_id, hds_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: The device-datasource ID (required)
:param DeviceDataSourceInstance body: (required)
:return: DeviceDataSourceInstance
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'hds_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_device_datasource_instance" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `add_device_datasource_instance`") # noqa: E501
# verify the required parameter 'hds_id' is set
if ('hds_id' not in params or
params['hds_id'] is None):
raise ValueError("Missing the required parameter `hds_id` when calling `add_device_datasource_instance`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_device_datasource_instance`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `add_device_datasource_instance`, must conform to the pattern `/\d+/`") # noqa: E501
if 'hds_id' in params and not re.search('\d+', params['hds_id'] if type(params['hds_id']) is str else str(params['hds_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `hds_id` when calling `add_device_datasource_instance`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'hds_id' in params:
path_params['hdsId'] = params['hds_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/devicedatasources/{hdsId}/instances', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceDataSourceInstance', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_device_datasource_instance_group(self, device_id, device_ds_id, body, **kwargs): # noqa: E501
"""add device datasource instance group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_datasource_instance_group(device_id, device_ds_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int device_ds_id: The device-datasource ID you'd like to add an instance group for (required)
:param DeviceDataSourceInstanceGroup body: (required)
:return: DeviceDataSourceInstanceGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_device_datasource_instance_group_with_http_info(device_id, device_ds_id, body, **kwargs) # noqa: E501
else:
(data) = self.add_device_datasource_instance_group_with_http_info(device_id, device_ds_id, body, **kwargs) # noqa: E501
return data
def add_device_datasource_instance_group_with_http_info(self, device_id, device_ds_id, body, **kwargs): # noqa: E501
"""add device datasource instance group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_datasource_instance_group_with_http_info(device_id, device_ds_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int device_ds_id: The device-datasource ID you'd like to add an instance group for (required)
:param DeviceDataSourceInstanceGroup body: (required)
:return: DeviceDataSourceInstanceGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'device_ds_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_device_datasource_instance_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `add_device_datasource_instance_group`") # noqa: E501
# verify the required parameter 'device_ds_id' is set
if ('device_ds_id' not in params or
params['device_ds_id'] is None):
raise ValueError("Missing the required parameter `device_ds_id` when calling `add_device_datasource_instance_group`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_device_datasource_instance_group`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `add_device_datasource_instance_group`, must conform to the pattern `/\d+/`") # noqa: E501
if 'device_ds_id' in params and not re.search('\d+', params['device_ds_id'] if type(params['device_ds_id']) is str else str(params['device_ds_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_ds_id` when calling `add_device_datasource_instance_group`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'device_ds_id' in params:
path_params['deviceDsId'] = params['device_ds_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/devicedatasources/{deviceDsId}/groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceDataSourceInstanceGroup', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_device_group(self, body, **kwargs): # noqa: E501
"""add device group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_group(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DeviceGroup body: (required)
:return: DeviceGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_device_group_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_device_group_with_http_info(body, **kwargs) # noqa: E501
return data
def add_device_group_with_http_info(self, body, **kwargs): # noqa: E501
"""add device group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_group_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DeviceGroup body: (required)
:return: DeviceGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_device_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_device_group`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceGroup', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_device_group_cluster_alert_conf(self, device_group_id, body, **kwargs): # noqa: E501
"""Add cluster alert configuration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_group_cluster_alert_conf(device_group_id, body, async_req=True)
>>> result = | |
"""
Elements
========
Defines an :class:`.Atom` class for each element.
"""
from __future__ import annotations
import typing
from .atom import Atom
_T = typing.TypeVar('_T', bound='AtomImpl')
class AtomImpl(Atom):
"""
An implementation of the :class:`.Atom` interface.
"""
_atomic_number: typing.ClassVar[int]
def __init_subclass__(cls: type[AtomImpl], **kwargs) -> None:
cls._elements[cls._atomic_number] = cls
def __init__(
self,
id: int,
charge: int = 0,
) -> None:
self._id = id
self._charge = charge
def get_id(self) -> int:
return self._id
def _with_id(self: _T, id: int) -> _T:
"""
Modify the atom.
"""
self._id = id
return self
def with_id(self, id: int) -> AtomImpl:
return self.clone()._with_id(id)
def get_atomic_number(self) -> int:
return self._atomic_number
def get_charge(self) -> int:
return self._charge
def clone(self) -> AtomImpl:
return type(self)(self._id, self._charge)
def __repr__(self) -> str:
charge = (
f', charge={self._charge}' if self._charge != 0 else ''
)
return f'{self.__class__.__name__}({self._id}{charge})'
def __str__(self) -> str:
return repr(self)
class H(AtomImpl):
_atomic_number = 1
def clone(self) -> H:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> H:
return self.clone()._with_id(id)
class He(AtomImpl):
_atomic_number = 2
def clone(self) -> He:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> He:
return self.clone()._with_id(id)
class Li(AtomImpl):
_atomic_number = 3
def clone(self) -> Li:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Li:
return self.clone()._with_id(id)
class Be(AtomImpl):
_atomic_number = 4
def clone(self) -> Be:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Be:
return self.clone()._with_id(id)
class B(AtomImpl):
_atomic_number = 5
def clone(self) -> B:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> B:
return self.clone()._with_id(id)
class C(AtomImpl):
_atomic_number = 6
def clone(self) -> C:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> C:
return self.clone()._with_id(id)
class N(AtomImpl):
_atomic_number = 7
def clone(self) -> N:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> N:
return self.clone()._with_id(id)
# "O" is a valid elemental symbol.
class O(AtomImpl): # noqa
_atomic_number = 8
def clone(self) -> O: # noqa
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> O: # noqa
return self.clone()._with_id(id)
class F(AtomImpl):
_atomic_number = 9
def clone(self) -> F:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> F:
return self.clone()._with_id(id)
class Ne(AtomImpl):
_atomic_number = 10
def clone(self) -> Ne:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ne:
return self.clone()._with_id(id)
class Na(AtomImpl):
_atomic_number = 11
def clone(self) -> Na:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Na:
return self.clone()._with_id(id)
class Mg(AtomImpl):
_atomic_number = 12
def clone(self) -> Mg:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Mg:
return self.clone()._with_id(id)
class Al(AtomImpl):
_atomic_number = 13
def clone(self) -> Al:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Al:
return self.clone()._with_id(id)
class Si(AtomImpl):
_atomic_number = 14
def clone(self) -> Si:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Si:
return self.clone()._with_id(id)
class P(AtomImpl):
_atomic_number = 15
def clone(self) -> P:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> P:
return self.clone()._with_id(id)
class S(AtomImpl):
_atomic_number = 16
def clone(self) -> S:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> S:
return self.clone()._with_id(id)
class Cl(AtomImpl):
_atomic_number = 17
def clone(self) -> Cl:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Cl:
return self.clone()._with_id(id)
class Ar(AtomImpl):
_atomic_number = 18
def clone(self) -> Ar:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ar:
return self.clone()._with_id(id)
class K(AtomImpl):
_atomic_number = 19
def clone(self) -> K:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> K:
return self.clone()._with_id(id)
class Ca(AtomImpl):
_atomic_number = 20
def clone(self) -> Ca:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ca:
return self.clone()._with_id(id)
class Sc(AtomImpl):
_atomic_number = 21
def clone(self) -> Sc:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Sc:
return self.clone()._with_id(id)
class Ti(AtomImpl):
_atomic_number = 22
def clone(self) -> Ti:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ti:
return self.clone()._with_id(id)
class V(AtomImpl):
_atomic_number = 23
def clone(self) -> V:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> V:
return self.clone()._with_id(id)
class Cr(AtomImpl):
_atomic_number = 24
def clone(self) -> Cr:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Cr:
return self.clone()._with_id(id)
class Mn(AtomImpl):
_atomic_number = 25
def clone(self) -> Mn:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Mn:
return self.clone()._with_id(id)
class Fe(AtomImpl):
_atomic_number = 26
def clone(self) -> Fe:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Fe:
return self.clone()._with_id(id)
class Co(AtomImpl):
_atomic_number = 27
def clone(self) -> Co:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Co:
return self.clone()._with_id(id)
class Ni(AtomImpl):
_atomic_number = 28
def clone(self) -> Ni:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ni:
return self.clone()._with_id(id)
class Cu(AtomImpl):
_atomic_number = 29
def clone(self) -> Cu:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Cu:
return self.clone()._with_id(id)
class Zn(AtomImpl):
_atomic_number = 30
def clone(self) -> Zn:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Zn:
return self.clone()._with_id(id)
class Ga(AtomImpl):
_atomic_number = 31
def clone(self) -> Ga:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ga:
return self.clone()._with_id(id)
class Ge(AtomImpl):
_atomic_number = 32
def clone(self) -> Ge:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ge:
return self.clone()._with_id(id)
class As(AtomImpl):
_atomic_number = 33
def clone(self) -> As:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> As:
return self.clone()._with_id(id)
class Se(AtomImpl):
_atomic_number = 34
def clone(self) -> Se:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Se:
return self.clone()._with_id(id)
class Br(AtomImpl):
_atomic_number = 35
def clone(self) -> Br:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Br:
return self.clone()._with_id(id)
class Kr(AtomImpl):
_atomic_number = 36
def clone(self) -> Kr:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Kr:
return self.clone()._with_id(id)
class Rb(AtomImpl):
_atomic_number = 37
def clone(self) -> Rb:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Rb:
return self.clone()._with_id(id)
class Sr(AtomImpl):
_atomic_number = 38
def clone(self) -> Sr:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Sr:
return self.clone()._with_id(id)
class Y(AtomImpl):
_atomic_number = 39
def clone(self) -> Y:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Y:
return self.clone()._with_id(id)
class Zr(AtomImpl):
_atomic_number = 40
def clone(self) -> Zr:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Zr:
return self.clone()._with_id(id)
class Nb(AtomImpl):
_atomic_number = 41
def clone(self) -> Nb:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Nb:
return self.clone()._with_id(id)
class Mo(AtomImpl):
_atomic_number = 42
def clone(self) -> Mo:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Mo:
return self.clone()._with_id(id)
class Tc(AtomImpl):
_atomic_number = 43
def clone(self) -> Tc:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Tc:
return self.clone()._with_id(id)
class Ru(AtomImpl):
_atomic_number = 44
def clone(self) -> Ru:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ru:
return self.clone()._with_id(id)
class Rh(AtomImpl):
_atomic_number = 45
def clone(self) -> Rh:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Rh:
return self.clone()._with_id(id)
class Pd(AtomImpl):
_atomic_number = 46
def clone(self) -> Pd:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Pd:
return self.clone()._with_id(id)
class Ag(AtomImpl):
_atomic_number = 47
def clone(self) -> Ag:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ag:
return self.clone()._with_id(id)
class Cd(AtomImpl):
_atomic_number = 48
def clone(self) -> Cd:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Cd:
return self.clone()._with_id(id)
class In(AtomImpl):
_atomic_number = 49
def clone(self) -> In:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> In:
return self.clone()._with_id(id)
class Sn(AtomImpl):
_atomic_number = 50
def clone(self) -> Sn:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Sn:
return self.clone()._with_id(id)
class Sb(AtomImpl):
_atomic_number = 51
def clone(self) -> Sb:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Sb:
return self.clone()._with_id(id)
class Te(AtomImpl):
_atomic_number = 52
def clone(self) -> Te:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Te:
return self.clone()._with_id(id)
# "I" is a valid elemental symbol.
class I(AtomImpl): # noqa
_atomic_number = 53
def clone(self) -> I: # noqa
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> I: # noqa
return self.clone()._with_id(id)
class Xe(AtomImpl):
_atomic_number = 54
def clone(self) -> Xe:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Xe:
return self.clone()._with_id(id)
class Cs(AtomImpl):
_atomic_number = 55
def clone(self) -> Cs:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Cs:
return self.clone()._with_id(id)
class Ba(AtomImpl):
_atomic_number = 56
def clone(self) -> Ba:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ba:
return self.clone()._with_id(id)
class La(AtomImpl):
_atomic_number = 57
def clone(self) -> La:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> La:
return self.clone()._with_id(id)
class Ce(AtomImpl):
_atomic_number = 58
def clone(self) -> Ce:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Ce:
return self.clone()._with_id(id)
class Pr(AtomImpl):
_atomic_number = 59
def clone(self) -> Pr:
return type(self)(self._id, self._charge)
def with_id(self, id: int) -> Pr:
return self.clone()._with_id(id)
class Nd(AtomImpl):
_atomic_number = 60
def clone(self) | |
point_cloud = list()
point_cloud.append((4.157333 ,5.020408 ,6.752996))
point_cloud.append((2.641909 ,5.020408 ,6.808028))
point_cloud.append((2.642918 ,4.106757 ,6.802344))
point_cloud.append((4.155944 ,4.106757 ,6.747401))
point_cloud.append((1.685477 ,5.020408 ,-0.064397))
point_cloud.append((3.157898 ,5.020408 ,-0.428619))
point_cloud.append((3.158093 ,4.106757 ,-0.422854))
point_cloud.append((1.687998 ,4.106757 ,-0.059209))
point_cloud.append((1.695509 ,1.385707 ,-0.043759))
point_cloud.append((3.158668 ,1.385707 ,-0.405691))
point_cloud.append((3.158852 ,0.518685 ,-0.400222))
point_cloud.append((1.697904 ,0.518685 ,-0.038836))
point_cloud.append((0.203099 ,1.385707 ,5.132862))
point_cloud.append((-0.307195 ,1.385707 ,3.714816))
point_cloud.append((-0.301774 ,0.518685 ,3.714062))
point_cloud.append((0.207746 ,0.518685 ,5.129967))
point_cloud.append((2.646883 ,0.518685 ,6.780034))
point_cloud.append((2.645925 ,1.385707 ,6.785423))
point_cloud.append((1.251037 ,1.385707 ,6.215363))
point_cloud.append((1.254098 ,0.518685 ,6.210833))
point_cloud.append((-0.203067 ,1.385707 ,2.212009))
point_cloud.append((0.508332 ,1.385707 ,0.883271))
point_cloud.append((0.512518 ,0.518685 ,0.886794))
point_cloud.append((-0.197806 ,0.518685 ,2.213525))
point_cloud.append((0.183619 ,5.020408 ,5.145003))
point_cloud.append((-0.329907 ,5.020408 ,3.717978))
point_cloud.append((-0.324200 ,4.106757 ,3.717183))
point_cloud.append((0.188516 ,4.106757 ,5.141951))
point_cloud.append((0.193356 ,3.203756 ,5.138935))
point_cloud.append((-0.318555 ,3.203756 ,3.716398))
point_cloud.append((-0.312960 ,2.308404 ,3.715619))
point_cloud.append((0.198153 ,2.308404 ,5.135944))
point_cloud.append((2.644905 ,2.308404 ,6.791161))
point_cloud.append((2.643916 ,3.203756 ,6.796730))
point_cloud.append((1.244608 ,3.203756 ,6.224862))
point_cloud.append((1.247773 ,2.308404 ,6.220184))
point_cloud.append((1.238187 ,5.020408 ,6.234357))
point_cloud.append((1.241418 ,4.106757 ,6.229582))
point_cloud.append((-0.225116 ,5.020408 ,2.205656))
point_cloud.append((0.490787 ,5.020408 ,0.868504))
point_cloud.append((0.495195 ,4.106757 ,0.872216))
point_cloud.append((-0.219577 ,4.106757 ,2.207253))
point_cloud.append((0.499557 ,3.203756 ,0.875884))
point_cloud.append((0.503879 ,2.308404 ,0.879522))
point_cloud.append((-0.208665 ,2.308404 ,2.210397))
point_cloud.append((-0.214097 ,3.203756 ,2.208831))
point_cloud.append((3.158284 ,3.203756 ,-0.417160))
point_cloud.append((3.158474 ,2.308404 ,-0.411511))
point_cloud.append((1.692964 ,2.308404 ,-0.048998))
point_cloud.append((1.690492 ,3.203756 ,-0.054082))
point_cloud.append((6.707332 ,5.020408 ,1.972985))
point_cloud.append((6.918824 ,5.020408 ,3.474906))
point_cloud.append((6.913069 ,4.106757 ,3.474495))
point_cloud.append((6.701913 ,4.106757 ,1.974951))
point_cloud.append((6.895933 ,1.385707 ,3.473274))
point_cloud.append((6.890467 ,0.518685 ,3.472884))
point_cloud.append((6.680625 ,0.518685 ,1.982669))
point_cloud.append((6.685767 ,1.385707 ,1.980803))
point_cloud.append((4.641633 ,1.385707 ,-0.142216))
point_cloud.append((5.890574 ,1.385707 ,0.701569))
point_cloud.append((5.886631 ,0.518685 ,0.705367))
point_cloud.append((4.639576 ,0.518685 ,-0.137145))
point_cloud.append((4.650253 ,5.020408 ,-0.163476))
point_cloud.append((5.907104 ,5.020408 ,0.685652))
point_cloud.append((5.902949 ,4.106757 ,0.689653))
point_cloud.append((4.648087 ,4.106757 ,-0.158131))
point_cloud.append((5.898841 ,3.203756 ,0.693607))
point_cloud.append((5.894772 ,2.308404 ,0.697529))
point_cloud.append((4.643820 ,2.308404 ,-0.147612))
point_cloud.append((4.645946 ,3.203756 ,-0.152850))
point_cloud.append((6.907381 ,3.203756 ,3.474090))
point_cloud.append((6.901742 ,2.308404 ,3.473688))
point_cloud.append((6.691242 ,2.308404 ,1.978818))
point_cloud.append((6.696553 ,3.203756 ,1.976892))
point_cloud.append((4.150497 ,0.518685 ,6.725431))
point_cloud.append((4.151813 ,1.385707 ,6.730737))
point_cloud.append((6.476025 ,1.385707 ,4.919978))
point_cloud.append((5.508497 ,1.385707 ,6.075462))
point_cloud.append((5.505131 ,0.518685 ,6.071143))
point_cloud.append((6.471198 ,0.518685 ,4.917403))
point_cloud.append((6.496260 ,5.020408 ,4.930770))
point_cloud.append((5.522605 ,5.020408 ,6.093571))
point_cloud.append((5.519058 ,4.106757 ,6.089018))
point_cloud.append((6.491173 ,4.106757 ,4.928057))
point_cloud.append((5.515553 ,3.203756 ,6.084520))
point_cloud.append((5.512079 ,2.308404 ,6.080058))
point_cloud.append((6.481163 ,2.308404 ,4.922717))
point_cloud.append((6.486146 ,3.203756 ,4.925376))
point_cloud.append((4.153216 ,2.308404 ,6.736388))
point_cloud.append((4.154574 ,3.203756 ,6.741870))
point_cloud.append((1.675448 ,8.652617 ,-0.085022))
point_cloud.append((3.157130 ,8.652617 ,-0.451532))
point_cloud.append((3.157327 ,7.729900 ,-0.445711))
point_cloud.append((1.677998 ,7.729900 ,-0.079780))
point_cloud.append((0.164145 ,8.652617 ,5.157134))
point_cloud.append((-0.352604 ,8.652617 ,3.721138))
point_cloud.append((-0.346842 ,7.729900 ,3.720335))
point_cloud.append((0.169092 ,7.729900 ,5.154052))
point_cloud.append((0.173913 ,6.830529 ,5.151048))
point_cloud.append((-0.341222 ,6.830529 ,3.719552))
point_cloud.append((-0.335544 ,5.922382 ,3.718763))
point_cloud.append((0.178781 ,5.922382 ,5.148015))
point_cloud.append((2.640912 ,5.922382 ,6.813638))
point_cloud.append((2.639909 ,6.830529 ,6.819285))
point_cloud.append((1.231787 ,6.830529 ,6.243816))
point_cloud.append((1.234998 ,5.922382 ,6.239071))
point_cloud.append((2.638916 ,7.729900 ,6.824878))
point_cloud.append((2.637897 ,8.652617 ,6.830617))
point_cloud.append((1.225348 ,8.652617 ,6.253339))
point_cloud.append((1.228609 ,7.729900 ,6.248516))
point_cloud.append((-0.247161 ,8.652617 ,2.199305))
point_cloud.append((0.473244 ,8.652617 ,0.853746))
point_cloud.append((0.477700 ,7.729900 ,0.857494))
point_cloud.append((-0.241562 ,7.729900 ,2.200918))
point_cloud.append((0.482043 ,6.830529 ,0.861148))
point_cloud.append((0.486426 ,5.922382 ,0.864840))
point_cloud.append((-0.230595 ,5.922382 ,2.204078))
point_cloud.append((-0.236103 ,6.830529 ,2.202491))
point_cloud.append((3.157516 ,6.830529 ,-0.440039))
point_cloud.append((3.157709 ,5.922382 ,-0.434308))
point_cloud.append((1.682988 ,5.922382 ,-0.069518))
point_cloud.append((1.680478 ,6.830529 ,-0.074673))
point_cloud.append((-0.358291 ,9.562016 ,3.721928))
point_cloud.append((0.159270 ,9.562016 ,5.160172))
point_cloud.append((2.636892 ,9.562016 ,6.836272))
point_cloud.append((1.222133 ,9.562016 ,6.258091))
point_cloud.append((0.468853 ,9.562016 ,0.850051))
point_cloud.append((-0.252680 ,9.562016 ,2.197715))
point_cloud.append((3.156940 ,9.562016 ,-0.457271))
point_cloud.append((1.672940 ,9.562016 ,-0.090183))
point_cloud.append((6.941701 ,8.652617 ,3.476536))
point_cloud.append((6.935891 ,7.729900 ,3.476122))
point_cloud.append((6.723406 ,7.729900 ,1.967156))
point_cloud.append((6.728880 ,8.652617 ,1.965172))
point_cloud.append((4.658867 ,8.652617 ,-0.184721))
point_cloud.append((5.923620 ,8.652617 ,0.669743))
point_cloud.append((5.919425 ,7.729900 ,0.673786))
point_cloud.append((4.656679 ,7.729900 ,-0.179323))
point_cloud.append((4.652392 ,5.922382 ,-0.168751))
point_cloud.append((4.654546 ,6.830529 ,-0.174065))
point_cloud.append((5.915335 ,6.830529 ,0.677725))
point_cloud.append((5.911205 ,5.922382 ,0.681701))
point_cloud.append((6.930224 ,6.830529 ,3.475718))
point_cloud.append((6.924505 ,5.922382 ,3.475310))
point_cloud.append((6.712684 ,5.922382 ,1.971046))
point_cloud.append((6.718071 ,6.830529 ,1.969091))
point_cloud.append((5.927756 ,9.562016 ,0.665762))
point_cloud.append((4.661024 ,9.562016 ,-0.190042))
point_cloud.append((6.947430 ,9.562016 ,3.476945))
point_cloud.append((6.734274 ,9.562016 ,1.963216))
point_cloud.append((4.161447 ,7.729900 ,6.769588))
point_cloud.append((4.162848 ,8.652617 ,6.775239))
point_cloud.append((6.516480 ,8.652617 ,4.941554))
point_cloud.append((5.536707 ,8.652617 ,6.111668))
point_cloud.append((5.533121 ,7.729900 ,6.107068))
point_cloud.append((6.511345 ,7.729900 ,4.938815))
point_cloud.append((6.501281 ,5.922382 ,4.933448))
point_cloud.append((6.506339 ,6.830529 ,4.936145))
point_cloud.append((5.529633 ,6.830529 ,6.102589))
point_cloud.append((5.526105 ,5.922382 ,6.098064))
point_cloud.append((4.158701 ,5.922382 ,6.758520))
point_cloud.append((4.160082 ,6.830529 ,6.764081))
point_cloud.append((5.540236 ,9.562016 ,6.116198))
point_cloud.append((6.521542 ,9.562016 ,4.944255))
point_cloud.append((4.164228 ,9.562016 ,6.780807))
point_cloud.append((-0.182651 ,10.144874 ,3.697513))
point_cloud.append((0.309623 ,10.144874 ,5.066475))
point_cloud.append((1.321507 ,10.144874 ,6.111202))
point_cloud.append((2.667878 ,10.144874 ,6.661858))
point_cloud.append((0.604496 ,10.144874 ,0.964228))
point_cloud.append((-0.082413 ,10.144874 ,2.246745))
point_cloud.append((3.162846 ,10.144874 ,-0.280110))
point_cloud.append((1.750460 ,10.144874 ,0.069181))
point_cloud.append((5.800086 ,10.144874 ,0.788670))
point_cloud.append((4.594453 ,10.144874 ,-0.025760))
point_cloud.append((6.770698 ,10.144874 ,3.464323))
point_cloud.append((6.567606 ,10.144874 ,2.023682))
point_cloud.append((5.431362 ,10.144874 ,5.976444))
point_cloud.append((6.365062 ,10.144874 ,4.860831))
point_cloud.append((4.121535 ,10.144874 ,6.608676))
point_cloud.append((-0.055685 ,10.328520 ,3.679867))
point_cloud.append((0.418293 ,10.328521 ,4.998750))
point_cloud.append((1.393342 ,10.328520 ,6.005020))
point_cloud.append((2.690276 ,10.328521 ,6.535792))
point_cloud.append((0.702539 ,10.328521 ,1.046765))
point_cloud.append((0.040656 ,10.328521 ,2.282179))
point_cloud.append((3.167110 ,10.328521 ,-0.152051))
point_cloud.append((1.806494 ,10.328521 ,0.184372))
point_cloud.append((5.707799 ,10.328521 ,0.877508))
point_cloud.append((4.546336 ,10.328521 ,0.092994))
point_cloud.append((6.642953 ,10.328521 ,3.455196))
point_cloud.append((6.447130 ,10.328521 ,2.067395))
point_cloud.append((5.352671 ,10.328521 ,5.875429))
point_cloud.append((6.251944 ,10.328520 ,4.800529))
point_cloud.append((4.090671 ,10.328520 ,6.484248))
point_cloud.append((-0.152433 ,10.534639 ,3.693314))
point_cloud.append((0.335487 ,10.534639 ,5.050354))
point_cloud.append((1.338603 ,10.534639 ,6.085933))
point_cloud.append((2.673209 ,10.534638 ,6.631852))
point_cloud.append((0.627828 ,10.534640 ,0.983873))
point_cloud.append((-0.053121 ,10.534639 ,2.255178))
point_cloud.append((3.163859 ,10.534640 ,-0.249630))
point_cloud.append((1.763798 ,10.534640 ,0.096598))
point_cloud.append((5.778119 ,10.534640 ,0.809815))
point_cloud.append((4.583003 ,10.534640 ,0.002506))
point_cloud.append((6.740290 ,10.534639 ,3.462149))
point_cloud.append((6.538933 ,10.534640 ,2.034087))
point_cloud.append((5.412632 ,10.534639 ,5.952401))
point_cloud.append((6.338140 ,10.534639 ,4.846481))
point_cloud.append((4.114188 ,10.534639 ,6.579065))
point_cloud.append((-0.080783 ,10.568887 ,3.683355))
point_cloud.append((0.396815 ,10.568886 ,5.012135))
point_cloud.append((1.379143 ,10.568887 ,6.026009))
point_cloud.append((2.685851 ,10.568885 ,6.560707))
point_cloud.append((0.683162 ,10.568887 ,1.030452))
point_cloud.append((0.016331 ,10.568886 ,2.275175))
point_cloud.append((3.166268 ,10.568887 ,-0.177361))
point_cloud.append((1.795422 ,10.568887 ,0.161605))
point_cloud.append((5.726038 ,10.568887 ,0.859950))
point_cloud.append((4.555847 ,10.568887 ,0.069523))
point_cloud.append((6.668199 ,10.568885 ,3.456999))
point_cloud.append((6.470943 ,10.568887 ,2.058755))
point_cloud.append((5.368223 ,10.568886 ,5.895393))
point_cloud.append((6.274303 ,10.568886 ,4.812450))
point_cloud.append((4.096771 ,10.568887 ,6.508844))
point_cloud.append((0.020051 ,10.354052 ,3.669335))
point_cloud.append((0.483121 ,10.354052 ,4.958346))
point_cloud.append((1.436195 ,10.354052 ,5.941669))
point_cloud.append((2.703640 ,10.354052 ,6.460579))
point_cloud.append((0.761034 ,10.354051 ,1.096006))
point_cloud.append((0.114071 ,10.354051 ,2.303318))
point_cloud.append((3.169657 ,10.354051 ,-0.075649))
point_cloud.append((1.839924 ,10.354052 ,0.253090))
point_cloud.append((5.652745 ,10.354052 ,0.930508))
point_cloud.append((4.517631 ,10.354051 ,0.163843))
point_cloud.append((6.566752 ,10.354049 ,3.449753))
point_cloud.append((6.375257 ,10.354051 ,2.093472))
point_cloud.append((5.305725 ,10.354052 ,5.815170))
point_cloud.append((6.184472 ,10.354049 ,4.764557))
point_cloud.append((4.072262 ,10.354053 ,6.410017))
point_cloud.append((3.280156 ,10.144875 ,3.215654))
point_cloud.append((-0.152433 ,10.534639 ,3.693314))
point_cloud.append((0.335487 ,10.534639 ,5.050354))
point_cloud.append((0.396815 ,10.568886 ,5.012135))
point_cloud.append((-0.080783 ,10.568887 ,3.683355))
point_cloud.append((1.338603 ,10.534639 ,6.085933))
point_cloud.append((2.673209 ,10.534638 ,6.631852))
point_cloud.append((2.685851 ,10.568885 ,6.560707))
point_cloud.append((1.379143 ,10.568887 ,6.026009))
point_cloud.append((0.627828 ,10.534640 ,0.983873))
point_cloud.append((-0.053121 ,10.534639 ,2.255178))
point_cloud.append((0.016331 ,10.568886 ,2.275175))
point_cloud.append((0.683162 ,10.568887 ,1.030452))
point_cloud.append((3.163859 ,10.534640 ,-0.249630))
point_cloud.append((1.763798 ,10.534640 ,0.096598))
point_cloud.append((1.795422 ,10.568887 ,0.161605))
point_cloud.append((3.166268 ,10.568887 ,-0.177361))
point_cloud.append((5.778119 ,10.534640 ,0.809815))
point_cloud.append((4.583003 ,10.534640 ,0.002506))
point_cloud.append((4.555847 ,10.568887 ,0.069523))
point_cloud.append((5.726038 ,10.568887 ,0.859950))
point_cloud.append((6.740290 ,10.534639 ,3.462149))
point_cloud.append((6.538933 ,10.534640 ,2.034087))
point_cloud.append((6.470943 ,10.568887 ,2.058755))
point_cloud.append((6.668199 ,10.568885 ,3.456999))
point_cloud.append((5.412632 ,10.534639 ,5.952401))
point_cloud.append((6.338140 ,10.534639 ,4.846481))
point_cloud.append((6.274303 ,10.568886 ,4.812450))
point_cloud.append((5.368223 ,10.568886 ,5.895393))
point_cloud.append((4.114188 ,10.534639 ,6.579065))
point_cloud.append((4.096771 ,10.568887 ,6.508844))
point_cloud.append((5.425215 ,10.778636 ,5.968491))
point_cloud.append((4.119590 ,10.778636 ,6.600924))
point_cloud.append((2.669658 ,10.778634 ,6.651858))
point_cloud.append((1.326003 ,10.778636 ,6.104593))
point_cloud.append((0.318130 ,10.778635 ,5.061125))
point_cloud.append((-0.174533 ,10.778636 ,3.696447))
point_cloud.append((-0.073003 ,10.778636 ,2.249378))
point_cloud.append((0.610960 ,10.778639 ,0.969788))
point_cloud.append((1.754650 ,10.778638 ,0.077578))
point_cloud.append((3.163043 ,10.778638 ,-0.271164))
point_cloud.append((4.591189 ,10.778638 ,-0.017429))
point_cloud.append((5.793259 ,10.778638 ,0.795109))
point_cloud.append((6.559621 ,10.778639 ,2.026675))
point_cloud.append((6.760949 ,10.778639 ,3.463552))
point_cloud.append((6.357805 ,10.778639 ,4.857034))
point_cloud.append((-0.122750 ,10.565723 ,3.689190))
point_cloud.append((0.360884 ,10.565697 ,5.034526))
point_cloud.append((-0.146971 ,10.877974 ,3.693205))
point_cloud.append((0.342329 ,10.877947 ,5.046560))
point_cloud.append((1.355400 ,10.565675 ,6.061105))
point_cloud.append((2.678445 ,10.565671 ,6.602390))
point_cloud.append((1.341517 ,10.877926 ,6.082109))
point_cloud.append((2.674570 ,10.877921 ,6.624207))
point_cloud.append((0.650744 ,10.565790 ,1.003167))
point_cloud.append((-0.024361 ,10.565759 ,2.263454))
point_cloud.append((0.632303 ,10.878044 ,0.988462))
point_cloud.append((-0.045687 ,10.878010 ,2.257844))
point_cloud.append((3.164854 ,10.565828 ,-0.219705))
point_cloud.append((1.776895 ,10.565816 ,0.123512))
point_cloud.append((3.163850 ,10.878081 ,-0.242380))
point_cloud.append((1.766981 ,10.878069 ,0.103675))
point_cloud.append((5.756552 ,10.565816 ,0.830571))
point_cloud.append((4.571762 ,10.565828 ,0.030258))
point_cloud.append((5.772694 ,10.878069 ,0.815362))
point_cloud.append((4.580513 ,10.878081 ,0.009315))
point_cloud.append((6.710440 ,10.565759 ,3.460014))
point_cloud.append((6.510775 ,10.565790 ,2.044307))
point_cloud.append((6.732387 ,10.878015 ,3.462091))
point_cloud.append((6.533151 ,10.878044 ,2.036854))
point_cloud.append((5.394244 ,10.565697 ,5.928792))
point_cloud.append((6.311694 ,10.565723 ,4.832385))
point_cloud.append((5.407517 ,10.877948 ,5.946482))
point_cloud.append((6.333045 ,10.877978 ,4.844496))
point_cloud.append((4.106971 ,10.565675 ,6.549971))
point_cloud.append((4.112773 ,10.877927 ,6.574473))
point_cloud.append((-0.045007 ,10.585985 ,3.678383))
point_cloud.append((0.427433 ,10.585984 ,4.993052))
point_cloud.append((0.513742 ,10.371150 ,4.939264))
point_cloud.append((0.055827 ,10.371150 ,3.664363))
point_cloud.append((1.399384 ,10.585986 ,5.996089))
point_cloud.append((2.692160 ,10.585984 ,6.525189))
point_cloud.append((2.709950 ,10.371150 ,6.425060))
point_cloud.append((1.456439 ,10.371151 ,5.911749))
point_cloud.append((0.710787 ,10.585986 ,1.053709))
point_cloud.append((0.051007 ,10.585984 ,2.285159))
point_cloud.append((0.148748 ,10.371149 ,2.313302))
point_cloud.append((0.788661 ,10.371150 ,1.119263))
point_cloud.append((3.167469 ,10.585986 ,-0.141278))
point_cloud.append((1.811213 ,10.585986 ,0.194061))
point_cloud.append((1.855712 ,10.371151 ,0.285547))
point_cloud.append((3.170856 ,10.371150 ,-0.039567))
point_cloud.append((5.700035 ,10.585986 ,0.884981))
point_cloud.append((4.542290 ,10.585986 ,0.102984))
point_cloud.append((4.504074 ,10.371150 ,0.197304))
point_cloud.append((5.626741 ,10.371151 ,0.955539))
point_cloud.append((6.632206 ,10.585983 ,3.454428))
point_cloud.append((6.436996 ,10.585986 ,2.071073))
point_cloud.append((6.341310 ,10.371150 ,2.105790))
point_cloud.append((6.530759 ,10.371146 ,3.447181))
point_cloud.append((5.346051 ,10.585984 ,5.866933))
point_cloud.append((6.242430 ,10.585984 ,4.795458))
point_cloud.append((6.152597 ,10.371146 ,4.747566))
point_cloud.append((5.283554 ,10.371149 ,5.786708))
point_cloud.append((4.088075 ,10.585986 ,6.473783))
point_cloud.append((4.063564 ,10.371151 ,6.374955))
point_cloud.append((3.166987 ,0.276333 ,-0.156237))
point_cloud.append((1.804662 ,0.276333 ,0.180641))
point_cloud.append((-0.059886 ,0.276333 ,3.680437))
point_cloud.append((0.414808 ,0.276333 ,5.000927))
point_cloud.append((1.390955 ,0.276333 ,6.008537))
point_cloud.append((2.689559 ,0.276332 ,6.539830))
point_cloud.append((0.699322 ,0.276333 ,1.044038))
point_cloud.append((0.036683 ,0.276333 ,2.281047))
point_cloud.append((6.647072 ,0.276333 ,3.455502))
point_cloud.append((6.451091 ,0.276333 ,2.065943))
point_cloud.append((5.710804 ,0.276333 ,0.874637))
point_cloud.append((4.547896 ,0.276333 ,0.089106))
point_cloud.append((4.091699 ,0.276333 ,6.488372))
point_cloud.append((5.355191 ,0.276333 ,5.878676))
point_cloud.append((6.255693 ,0.276333 ,4.802514))
point_cloud.append((3.169418 ,0.349825 ,-0.083309))
point_cloud.append((1.836574 ,0.349825 ,0.246240))
point_cloud.append((0.012421 ,0.349825 ,3.670386))
point_cloud.append((0.476693 ,0.349824 ,4.962359))
point_cloud.append((1.431866 ,0.349825 ,5.948065))
point_cloud.append((2.702314 ,0.349824 ,6.468037))
point_cloud.append((0.755157 ,0.349825 ,1.091042))
point_cloud.append((0.106768 ,0.349825 ,2.301226))
point_cloud.append((6.574324 ,0.349825 ,3.450304))
point_cloud.append((6.382481 ,0.349825 ,2.090837))
point_cloud.append((5.658248 ,0.349825 ,0.925230))
point_cloud.append((4.520494 ,0.349825 ,0.156734))
point_cloud.append((4.074122 ,0.349825 ,6.417510))
point_cloud.append((5.310378 ,0.349824 ,5.821149))
point_cloud.append((6.191274 ,0.349826 ,4.768171))
point_cloud.append((3.164978 ,0.498595 ,-0.216618))
point_cloud.append((1.778241 ,0.498594 ,0.126331))
point_cloud.append((-0.119752 ,0.498595 ,3.688757))
point_cloud.append((0.363572 ,0.498593 ,5.032858))
point_cloud.append((1.357084 ,0.498595 ,6.058606))
point_cloud.append((2.678999 ,0.498592 ,6.599267))
point_cloud.append((0.653089 ,0.498595 ,1.005119))
point_cloud.append((-0.021339 ,0.498594 ,2.264341))
point_cloud.append((6.707300 ,0.498594 ,3.459805))
point_cloud.append((6.507895 ,0.498595 ,2.045331))
point_cloud.append((5.754317 ,0.498594 ,0.832752))
point_cloud.append((4.570581 ,0.498595 ,0.033112))
point_cloud.append((4.106251 ,0.498595 ,6.547045))
point_cloud.append((5.392293 ,0.498593 ,5.926301))
point_cloud.append((6.309029 ,0.498596 ,4.830945))
point_cloud.append((3.280161 ,0.349825 ,3.215641))
point_cloud.append((3.165553 ,-0.064520 ,-0.209167))
point_cloud.append((1.782575 ,-0.064520 ,0.132760))
point_cloud.append((3.168099 ,0.012503 ,-0.132734))
point_cloud.append((1.816021 ,0.012503 ,0.201511))
point_cloud.append((-0.110170 ,-0.064520 ,3.685747))
point_cloud.append((0.371495 ,-0.064519 ,5.026345))
point_cloud.append((-0.034388 ,0.012503 ,3.675215))
point_cloud.append((0.436353 ,0.012503 ,4.985924))
point_cloud.append((1.362666 ,-0.064521 ,6.049082))
point_cloud.append((2.680876 ,-0.064520 ,6.588700))
point_cloud.append((1.405543 ,0.012502 ,5.985706))
point_cloud.append((2.694246 ,0.012502 ,6.513459))
point_cloud.append((0.660483 ,-0.064520 ,1.009382))
point_cloud.append((-0.012340 ,-0.064520 ,2.265059))
point_cloud.append((0.719004 ,0.012503 ,1.058644))
point_cloud.append((0.061115 ,0.012503 ,2.286209))
point_cloud.append((6.698601 ,-0.064519 ,3.457380))
point_cloud.append((6.499489 ,-0.064520 ,2.046786))
point_cloud.append((6.622357 ,0.012503 ,3.451932))
point_cloud.append((6.427582 ,0.012503 ,2.072876))
point_cloud.append((5.748032 ,-0.064520 ,0.837294))
point_cloud.append((4.567475 ,-0.064520 ,0.039910))
point_cloud.append((5.692952 ,0.012503 ,0.890317))
point_cloud.append((4.538757 ,0.012503 ,0.110787))
point_cloud.append((4.104290 ,-0.064521 ,6.536180))
point_cloud.append((4.085869 ,0.012502 ,6.461914))
point_cloud.append((5.387100 ,-0.064519 ,5.917458))
point_cloud.append((6.301073 ,-0.064520 ,4.824821))
point_cloud.append((5.340132 ,0.012503 ,5.857169))
point_cloud.append((6.233557 ,0.012503 ,4.788829))
point_cloud.append((5.478942 ,10.916224 ,6.037486))
point_cloud.append((4.140416 ,10.916224 ,6.684844))
point_cloud.append((2.654351 ,10.916222 ,6.738008))
point_cloud.append((1.277544 ,10.916224 ,6.176203))
point_cloud.append((0.243923 ,10.916224 ,5.107392))
point_cloud.append((-0.260271 ,10.916224 ,3.708333))
point_cloud.append((-0.156917 ,10.916224 ,2.225251))
point_cloud.append((0.544647 ,10.916226 ,0.913910))
point_cloud.append((1.716496 ,10.916225 ,-0.000751))
point_cloud.append((3.160196 ,10.916225 ,-0.358036))
point_cloud.append((4.623784 ,10.916225 ,-0.098003))
point_cloud.append((5.856059 ,10.916225 ,0.734717))
point_cloud.append((6.641123 ,10.916227 ,1.997058))
point_cloud.append((6.848035 ,10.916227 ,3.469807))
point_cloud.append((6.434197 ,10.916227 ,4.897726))
point_cloud.append((-0.162946 ,10.895081 ,3.695425))
point_cloud.append((0.328535 ,10.895054 ,5.055159))
point_cloud.append((1.332474 ,10.895032 ,6.095473))
point_cloud.append((2.671723 ,10.895026 ,6.640228))
point_cloud.append((0.619969 ,10.895154 ,0.978076))
point_cloud.append((-0.061267 ,10.895118 ,2.253363))
point_cloud.append((3.163317 ,10.895192 ,-0.258505))
point_cloud.append((1.759905 ,10.895180 ,0.089141))
point_cloud.append((5.784344 ,10.895180 ,0.804154))
point_cloud.append((4.586568 ,10.895192 ,-0.005639))
point_cloud.append((6.748557 ,10.895123 ,3.463251))
point_cloud.append((6.548307 ,10.895154 ,2.031352))
point_cloud.append((5.417503 ,10.895054 ,5.959304))
point_cloud.append((6.347278 ,10.895085 ,4.852082))
point_cloud.append((4.116658 ,10.895033 ,6.590134))
point_cloud.append((5.478942 ,10.916224 ,6.037486))
point_cloud.append((4.140416 ,10.916224 ,6.684844))
point_cloud.append((2.654351 ,10.916222 ,6.738008))
point_cloud.append((1.277544 ,10.916224 ,6.176203))
point_cloud.append((0.243923 ,10.916224 ,5.107392))
point_cloud.append((-0.260271 ,10.916224 ,3.708333))
point_cloud.append((-0.156917 ,10.916224 ,2.225251))
point_cloud.append((0.544647 ,10.916226 ,0.913910))
point_cloud.append((1.716496 ,10.916225 ,-0.000751))
point_cloud.append((3.160196 ,10.916225 ,-0.358036))
point_cloud.append((4.623784 ,10.916225 ,-0.098003))
point_cloud.append((5.856059 ,10.916225 ,0.734717))
point_cloud.append((6.641123 ,10.916227 ,1.997058))
point_cloud.append((6.848035 ,10.916227 ,3.469807))
point_cloud.append((6.434197 ,10.916227 ,4.897726))
point_cloud.append((5.478942 ,10.950573 ,6.037486))
point_cloud.append((4.140416 ,10.950571 ,6.684844))
point_cloud.append((2.654351 ,10.950569 ,6.738008))
point_cloud.append((1.277544 ,10.950571 ,6.176203))
point_cloud.append((0.243923 ,10.950571 ,5.107392))
point_cloud.append((-0.260271 ,10.950571 ,3.708333))
point_cloud.append((-0.156917 ,10.950571 ,2.225251))
point_cloud.append((0.544647 ,10.950574 ,0.913910))
point_cloud.append((1.716496 ,10.950573 ,-0.000751))
point_cloud.append((3.160196 ,10.950573 ,-0.358036))
point_cloud.append((4.623784 ,10.950573 ,-0.098003))
point_cloud.append((5.856059 ,10.950573 ,0.734717))
point_cloud.append((6.641123 ,10.950575 ,1.997058))
point_cloud.append((6.848035 ,10.950575 ,3.469807))
point_cloud.append((6.434197 ,10.950575 ,4.897726))
point_cloud.append((-0.162946 ,10.895081 ,3.695425))
point_cloud.append((0.328535 ,10.895054 ,5.055159))
point_cloud.append((1.332474 ,10.895032 ,6.095473))
point_cloud.append((2.671723 ,10.895026 ,6.640228))
point_cloud.append((0.619969 ,10.895154 ,0.978076))
point_cloud.append((-0.061267 ,10.895118 ,2.253363))
point_cloud.append((3.163317 ,10.895192 ,-0.258505))
point_cloud.append((1.759905 ,10.895180 ,0.089141))
point_cloud.append((5.784344 ,10.895180 ,0.804154))
point_cloud.append((4.586568 ,10.895192 ,-0.005639))
point_cloud.append((6.748557 ,10.895123 ,3.463251))
point_cloud.append((6.548307 ,10.895154 ,2.031352))
point_cloud.append((5.417503 ,10.895054 ,5.959305))
point_cloud.append((6.347278 ,10.895085 ,4.852082))
point_cloud.append((4.116658 ,10.895033 ,6.590134))
point_cloud.append((-0.162946 ,10.950529 ,3.695426))
point_cloud.append((0.328536 ,10.950502 ,5.055159))
point_cloud.append((1.329279 ,10.950480 ,6.113470))
point_cloud.append((2.671723 ,10.950475 ,6.640227))
point_cloud.append((0.619969 ,10.950603 ,0.978077))
point_cloud.append((-0.056237 ,10.950245 ,2.249445))
point_cloud.append((3.163317 ,10.950641 ,-0.258505))
point_cloud.append((1.759904 ,10.950628 ,0.089141))
point_cloud.append((5.784344 ,10.950628 ,0.804155))
point_cloud.append((4.586568 ,10.950641 ,-0.005639))
point_cloud.append((6.748556 ,10.950572 ,3.463252))
point_cloud.append((6.548307 ,10.950603 ,2.031353))
point_cloud.append((5.417502 ,10.950502 ,5.959305))
point_cloud.append((6.347277 ,10.950534 ,4.852083))
point_cloud.append((4.116657 ,10.950481 ,6.590134))
point_cloud.append((1.307509 ,10.228682 ,4.120849))
point_cloud.append((1.550650 ,10.353782 ,5.912493))
point_cloud.append((5.468255 ,10.226073 ,3.084603))
point_cloud.append((6.070018 ,10.354319 ,4.793732))
point_cloud.append((3.946349 ,10.307128 ,6.236073))
point_cloud.append((4.209584 ,10.177548 ,3.558779))
point_cloud.append((4.122898 ,10.195040 ,3.177014))
point_cloud.append((4.114052 ,10.196825 ,3.138056))
point_cloud.append((4.095206 ,10.200627 ,3.055061))
point_cloud.append((4.015021 ,10.216707 ,2.704244))
point_cloud.append((4.002913 ,10.218773 ,2.659598))
point_cloud.append((3.857510 ,10.248590 ,2.008252))
point_cloud.append((3.827100 ,10.254726 ,1.874331))
point_cloud.append((3.740414 ,10.272219 ,1.492566))
point_cloud.append((3.653728 ,10.289710 ,1.110800))
point_cloud.append((3.592538 ,10.302057 ,0.841327))
point_cloud.append((4.084571 ,10.170713 ,3.745415))
point_cloud.append((3.997883 ,10.188206 ,3.363649))
point_cloud.append((3.954867 ,10.196886 ,3.174205))
point_cloud.append((3.936020 ,10.200688 ,3.091210))
point_cloud.append((3.851337 ,10.217805 ,2.717611))
point_cloud.append((3.841289 ,10.219804 ,2.674013))
point_cloud.append((3.698325 ,10.248652 ,2.044400))
point_cloud.append((3.667916 ,10.254787 ,1.910481))
point_cloud.append((3.581229 ,10.272279 ,1.528715))
point_cloud.append((3.472117 ,10.294296 ,1.048192))
point_cloud.append((3.385428 ,10.311788 ,0.666425))
point_cloud.append((3.952234 ,10.165357 ,3.899803))
point_cloud.append((3.899262 ,10.172608 ,3.509913))
point_cloud.append((3.840971 ,10.183326 ,3.205605))
point_cloud.append((3.827957 ,10.185640 ,3.134037))
point_cloud.append((3.732705 ,10.204861 ,2.714556))
point_cloud.append((3.611242 ,10.234163 ,2.398087))
point_cloud.append((3.539140 ,10.248712 ,2.080549))
point_cloud.append((3.535991 ,10.246568 ,1.940060))
point_cloud.append((3.465627 ,10.259103 ,1.554361))
point_cloud.append((3.281787 ,10.300641 ,0.947176))
point_cloud.append((3.195100 ,10.318133 ,0.565410))
point_cloud.append((3.813797 ,10.161231 ,4.027319))
point_cloud.append((3.740622 ,10.176162 ,3.701249))
point_cloud.append((3.418299 ,10.241035 ,2.285565))
point_cloud.append((3.379956 ,10.248773 ,2.116697))
point_cloud.append((3.349546 ,10.254909 ,1.982778))
point_cloud.append((3.097685 ,10.305730 ,0.873593))
point_cloud.append((3.010999 ,10.323221 ,0.491827))
point_cloud.append((3.676579 ,10.156859 ,4.160209))
point_cloud.append((3.619913 ,10.173102 ,3.800079))
point_cloud.append((3.241170 ,10.244717 ,2.242692))
point_cloud.append((3.202824 ,10.252454 ,2.073823))
point_cloud.append((3.190361 ,10.254969 ,2.018926))
point_cloud.append((2.923552 ,10.308806 ,0.843904))
point_cloud.append((2.836862 ,10.326299 ,0.462138))
point_cloud.append((3.535631 ,10.153239 ,4.276677))
point_cloud.append((3.456013 ,10.169305 ,3.926042))
point_cloud.append((3.078820 ,10.245416 ,2.264895))
point_cloud.append((3.040475 ,10.253153 ,2.096027))
point_cloud.append((3.031176 ,10.255030 ,2.055075))
point_cloud.append((2.753713 ,10.322001 ,0.833120))
point_cloud.append((2.673606 ,10.338164 ,0.480333))
point_cloud.append((3.358209 ,10.156981 ,4.232506))
point_cloud.append((3.285539 ,10.171644 ,3.912467))
point_cloud.append((2.922801 ,10.244838 ,2.314988))
point_cloud.append((2.884458 ,10.252575 ,2.146121))
point_cloud.append((2.871991 ,10.255090 ,2.091223))
point_cloud.append((2.605181 ,10.308928 ,0.916201))
point_cloud.append((2.518493 ,10.326420 ,0.534435))
point_cloud.append((3.177057 ,10.161473 ,4.171914))
point_cloud.append((3.114479 ,10.174100 ,3.896325))
point_cloud.append((2.781561 ,10.241278 ,2.430159))
point_cloud.append((2.743216 ,10.249015 ,2.261292))
point_cloud.append((2.712806 ,10.255151 ,2.127372))
point_cloud.append((2.460946 ,10.305972 ,1.018188))
point_cloud.append((2.374259 ,10.323463 ,0.636422))
point_cloud.append((2.997124 ,10.165720 ,4.116694))
point_cloud.append((2.876513 ,10.172997 ,3.742164))
point_cloud.append((2.797668 ,10.183722 ,3.442524))
point_cloud.append((2.778494 ,10.186040 ,3.372355))
point_cloud.append((2.683244 ,10.205259 ,2.952874))
point_cloud.append((2.656134 ,10.234527 ,2.614978))
point_cloud.append((2.584032 ,10.249075 ,2.297440))
point_cloud.append((2.526192 ,10.246952 ,2.169371))
point_cloud.append((2.423080 ,10.259500 ,1.791107))
point_cloud.append((2.326677 ,10.301004 ,1.164067))
point_cloud.append((2.239990 ,10.318497 ,0.782301))
point_cloud.append((2.811092 ,10.171198 ,4.034604))
point_cloud.append((2.724405 ,10.188690 ,3.652838))
point_cloud.append((2.681388 ,10.197371 ,3.463393))
point_cloud.append((2.662541 ,10.201173 ,3.380399))
point_cloud.append((2.575857 ,10.218665 ,2.998631))
point_cloud.append((2.567810 ,10.220288 ,2.963202))
point_cloud.append((2.424847 ,10.249136 ,2.333589))
point_cloud.append((2.394438 ,10.255272 ,2.199669))
point_cloud.append((2.307747 ,10.272764 ,1.817903))
point_cloud.append((2.198639 ,10.294781 ,1.337380))
point_cloud.append((2.111953 ,10.312272 ,0.955614))
point_cloud.append((2.617737 ,10.178154 ,3.920265))
point_cloud.append((2.531052 ,10.195645 ,3.538500))
point_cloud.append((2.522205 ,10.197431 ,3.499542))
point_cloud.append((2.503360 ,10.201233 ,3.416548))
point_cloud.append((2.416672 ,10.218725 ,3.034780))
point_cloud.append((2.408628 ,10.220348 ,2.999350))
point_cloud.append((2.265659 ,10.249196 ,2.369737))
point_cloud.append((2.235251 ,10.255332 ,2.235817))
point_cloud.append((2.148564 ,10.272824 ,1.854052))
point_cloud.append((2.061878 ,10.290316 ,1.472285))
point_cloud.append((2.000690 ,10.302663 ,1.202813))
point_cloud.append((4.212547 ,10.466381 ,3.571341))
point_cloud.append((4.125860 ,10.483872 ,3.189575))
point_cloud.append((4.000845 ,10.477038 ,3.376210))
point_cloud.append((4.087533 ,10.459546 ,3.757976))
point_cloud.append((4.117014 ,10.485658 ,3.150617))
point_cloud.append((3.957829 ,10.485718 ,3.186766))
point_cloud.append((4.098168 ,10.489460 ,3.067623))
point_cloud.append((3.938983 ,10.489521 ,3.103771))
point_cloud.append((4.013296 ,10.507075 ,2.696589))
point_cloud.append((3.852298 ,10.507012 ,2.722004))
point_cloud.append((4.003437 ,10.508575 ,2.650425))
point_cloud.append((3.844251 ,10.508636 ,2.686574))
point_cloud.append((3.860472 ,10.537423 ,2.020813))
point_cloud.append((3.701287 ,10.537484 ,2.056961))
point_cloud.append((3.830062 ,10.543559 ,1.886893))
point_cloud.append((3.670878 ,10.543620 ,1.923042))
point_cloud.append((3.743376 ,10.561051 ,1.505127))
point_cloud.append((3.584192 ,10.561111 ,1.541276))
point_cloud.append((3.656691 ,10.578543 ,1.123362))
point_cloud.append((3.475080 ,10.583128 ,1.060753))
point_cloud.append((3.595500 ,10.590890 | |
return x
scripted = torch.jit.script(grad_in_loop)
outer = scripted.graph_for(x, y)
loop = outer.findNode("prim::Loop")
loop_block = next(loop.blocks())
param_node = loop_block.paramNode()
x_value = list(param_node.outputs())[1]
self.assertTrue(x_value.requires_grad())
def test_tensor_grad(self):
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(3, 4, requires_grad=False)
def f_requires_grad(x):
return x.requires_grad
self.checkScript(f_requires_grad, (x,))
self.checkScript(f_requires_grad, (y,))
def f_grad(x):
return x.grad
x.sum().backward()
self.checkScript(f_grad, (x,))
self.checkScript(f_grad, (y,))
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, "shape analysis is only enabled in Legacy")
def test_prim_grad_undefined(self):
x = torch.ones(2)
def f_grad(x):
return x.grad
scripted = self.checkScript(f_grad, (x,))
g = scripted.graph_for(x)
prim_grad_node = g.findNode("prim::grad")
self.assertTrue(next(prim_grad_node.outputs()).type().undefined() is None)
def test_tensor_data(self):
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(4, 5)
def f_data(x):
return x.data
scripted_f_data = torch.jit.script(f_data)
scripted_x = scripted_f_data(x)
self.assertEqual(scripted_x, f_data(x))
self.assertEqual(scripted_x.requires_grad, False)
scripted_y = scripted_f_data(y)
self.assertEqual(scripted_y, f_data(y))
self.assertEqual(scripted_x.requires_grad, False)
def test_tensor_dtype(self):
x_byte = torch.empty(34, 56, 78, dtype=torch.uint8)
x_long = torch.empty(34, 56, 78, dtype=torch.long)
x_float32 = torch.empty(34, 56, 78, dtype=torch.float32)
@torch.jit.script
def byte(x):
return x.dtype == torch.uint8
@torch.jit.script
def long(x):
return x.dtype == torch.long
@torch.jit.script
def float32(x):
return x.dtype == torch.float32
self.assertTrue(byte(x_byte))
self.assertFalse(byte(x_long))
self.assertFalse(byte(x_float32))
self.assertFalse(long(x_byte))
self.assertTrue(long(x_long))
self.assertFalse(long(x_float32))
self.assertFalse(float32(x_byte))
self.assertFalse(float32(x_long))
self.assertTrue(float32(x_float32))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_device(self):
cpu = torch.empty(34, 56, 78, device='cpu')
gpu = torch.empty(34, 56, 78, device='cuda')
@torch.jit.script
def same_device(x, y):
return x.device == y.device
self.assertTrue(same_device(cpu, cpu))
self.assertTrue(same_device(gpu, gpu))
self.assertFalse(same_device(cpu, gpu))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_to_device(self):
def to_device(x):
return x.to(device="cuda").to(device=torch.device("cpu"))
self.checkScript(to_device, (torch.ones(3, 4),))
def test_tensor_to_cpu(self):
def to_cpu(x):
return x.cpu()
x = torch.ones(3, 4)
script_fn = torch.jit.script(to_cpu)
self.assertEqual(to_cpu(x).device, script_fn(x).device)
self.checkScript(to_cpu, (x,))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_tensor_to_cuda(self):
def to_cuda(x):
return x.cuda()
x = torch.ones(3, 4)
script_fn = torch.jit.script(to_cuda)
self.assertEqual(to_cuda(x).device, script_fn(x).device)
self.checkScript(to_cuda, (x,))
def test_generic_list_errors(self):
with self.assertRaisesRegex(RuntimeError, "previously matched to type"):
@torch.jit.script
def foo(x):
return [[x]] + [[1]]
def test_script_cu(self):
cu = torch.jit.CompilationUnit('''
def foo(a):
b = a
return b
''')
a = Variable(torch.rand(1))
self.assertEqual(a, cu.foo(a))
# because the compilation unit ingests python strings
# to use an escape sequence escape the backslash (\\n = \n)
def test_string_cu(self):
cu = torch.jit.CompilationUnit('''
def foo(a):
print(a, """a\\n\tb\\n""", 2, "a\
a")
return a
''')
FileCheck().check("aa").check("a\\n\\tb\\n").run(str(cu.foo.graph))
def test_function_compilation_caching(self):
def fun():
return 1 + 2
fun_compiled = torch.jit.script(fun)
# python wrapper around the script function is a different pointer,
# but the underlying script function graph is the same
self.assertIs(fun_compiled.graph, torch.jit.script(fun).graph)
def fun():
return 3 + 4
num_ref_counts = sys.getrefcount(fun)
# caching doesn't get tripped up by same qualname
fun_compiled_2 = torch.jit.script(fun)
self.assertIsNot(fun_compiled, fun_compiled_2)
self.assertEqual(fun_compiled_2(), 7)
# caching doesnt increase refcounts to function (holds weak reference)
self.assertTrue(sys.getrefcount(fun), num_ref_counts)
def test_string_ops(self):
def foo():
a = "a" + "b"
return a + a, "ab" == "b", "ab" != "b", "ab" == "ab", "ab" != "ab"
self.checkScript(foo, ())
def test_string_sorted(self):
def foo(strs: List[str]):
return sorted(strs)
FileCheck() \
.check("graph") \
.check_next("str[] = aten::sorted") \
.check_next("return") \
.run(str(torch.jit.script(foo).graph))
inputs = ["str3", "str2", "str1"]
self.checkScript(foo, (inputs,))
def test_string_sort(self):
def foo(strs: List[str]):
strs.sort()
return strs
inputs = ["str3", "str2", "str1"]
self.checkScript(foo, (inputs,))
def test_tuple_sorted(self):
def foo(tups: List[Tuple[int, int]]):
return sorted(tups)
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_sort(self):
def foo(tups: List[Tuple[int, int]]):
tups.sort()
return tups
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_sort_reverse(self):
def foo(tups: List[Tuple[int, int]]):
tups.sort(reverse=True)
return tups
inputs = [(1, 2), (0, 2), (1, 3)]
self.checkScript(foo, (inputs,))
def test_tuple_unsortable_element_type(self):
@torch.jit.script
def foo():
tups = [({1: 2}, {2: 3})]
tups.sort()
return tups
with self.assertRaisesRegexWithHighlight(RuntimeError, "are not sortable", "tups.sort"):
foo()
def test_tuple_unsortable_diff_type(self):
@torch.jit.script
def foo(inputs: List[Any]):
inputs.sort()
return inputs
inputs = [(1, 2), ("foo", "bar")]
with self.assertRaisesRegexWithHighlight(RuntimeError, "Only values of same type can be compared", "inputs.sort"):
foo(inputs)
def test_tuple_nested_sort(self):
def foo(inputs: List[Tuple[int, Tuple[int, str]]]):
inputs.sort()
return inputs
inputs = [(1, (2, "foo")), (1, (2, "bar")), (1, (0, "bar"))]
self.checkScript(foo, (inputs,))
def test_tuple_unsortable_nested_diff_type(self):
@torch.jit.script
def foo(inputs: List[Any]):
inputs.sort()
return inputs
inputs = [(1, (2, 3)), (2, ("foo", "bar"))]
with self.assertRaisesRegexWithHighlight(RuntimeError, "Only values of same type can be compared", "inputs.sort"):
foo(inputs)
def test_string_new_line(self):
with self.assertRaisesRegex(RuntimeError, "expected a valid token*"):
torch.jit.CompilationUnit('''
def test_while(a):
print("
a")
return a
''')
def test_string_single_escape(self):
with self.assertRaisesRegex(RuntimeError, "expected a valid token*"):
torch.jit.CompilationUnit('''
def test_while(a):
print("\\")
return a
''')
def test_script_annotation(self):
@torch.jit.script
def foo(a):
return a + a + a
s = Variable(torch.rand(2))
self.assertEqual(s + s + s, foo(s))
def test_torch_pow(self):
def func(a, b):
return pow(a, b)
def func2(a, b, c, d):
return pow(pow(c + a, b), d)
def func3(a : int, b : float):
# type: (int, float) -> float
return pow(a, b)
def func4():
# type: () -> float
return pow(2, -2)
def func5(x, y):
return pow(x.item(), y.item())
def func6(a : int, b : int):
# type: (int, int) -> float
return pow(a, b)
a = torch.rand(1)
b = torch.rand(1)
c = torch.rand(1)
d = torch.rand(1)
self.checkScript(func, (a, b))
self.checkScript(func2, (a, b, c, d))
self.checkScript(func3, (4, -0.5))
self.checkScript(func4, ())
self.checkScript(func6, (2, 4))
inputs = [torch.tensor(2), torch.tensor(-2), torch.tensor(.5), torch.tensor(.2)]
for x in inputs:
for y in inputs:
if x < 0:
continue
else:
self.checkScript(func5, (x, y))
@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_pow_scalar_backward_cuda(self):
# see that scalar exponent works with cuda base (#19253)
with enable_profiling_mode_for_profiling_tests():
for dtype in [torch.float, torch.double]:
@torch.jit.script
def func(a, b):
# type: (Tensor, float) -> Tensor
return (a * 2) ** b
a = torch.rand(1, requires_grad=True, device='cuda', dtype=dtype)
func(a, 1, profile_and_replay=True).backward()
@torch.jit.script
def func(a, b):
# type: (float, Tensor) -> Tensor
return a ** (b * 2 + 1)
a = torch.rand(1, requires_grad=True, device='cuda', dtype=dtype)
func(2, a, profile_and_replay=True).backward()
def _check_code(self, code_str, fn_name, inputs):
scope = {}
exec(code_str, globals(), scope)
cu = torch.jit.CompilationUnit(code_str)
self.assertEqual(cu.func(*inputs), scope[fn_name](*inputs))
@unittest.skipIf(not RUN_CUDA, 'no CUDA')
def test_scriptmodule_releases_tensors_cuda(self):
with enable_profiling_mode_for_profiling_tests():
@torch.jit.script
def fn(x, y):
return x.sigmoid() * y.tanh()
def test(backward=False):
x = torch.randn(3, 3, dtype=torch.double, device='cuda', requires_grad=True)
y = torch.randn(3, 3, dtype=torch.double, device='cuda', requires_grad=True)
out = fn(x, y, profile_and_replay=True)
if backward:
out.sum().backward()
with self.assertLeaksNoCudaTensors():
test()
test()
test()
if GRAPH_EXECUTOR != ProfilingMode.SIMPLE:
with self.assertLeaksNoCudaTensors():
test(backward=True)
test(backward=True)
test(backward=True)
def test_index(self):
def consec(size, start=0):
numel = torch.tensor(size).prod().item()
return torch.arange(numel).view(size)
def consec_list(size):
return list(range(size))
def random_string(size):
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(size))
def check_indexing(indexing, tensor):
template = dedent("""
def func(x):
return x{}
""")
self._check_code(template.format(indexing), "func", [tensor])
def check_dynamic_indexing(indexing, tensor, value1, value2):
value1 = torch.tensor(value1)
value2 = torch.tensor(value2)
template = dedent("""
def func(x, value1, value2):
i = int(value1)
j = int(value2)
return x{}
""")
self._check_code(template.format(indexing), "func", [tensor, value1, value2])
# Torchscript assumes type Tensor by default, so we need this explicit
# declaration.
def check_indexing_list_int(indexing, list):
template = dedent("""
def func(x):
# type: (List[int]) -> Any
return x{}
""")
self._check_code(template.format(indexing), "func", [list])
def check_indexing_str(indexing, str):
template = dedent("""
def func(x):
# type: (str) -> Any
return x{}
""")
self._check_code(template.format(indexing), "func", [str])
# basic slices
check_indexing('[0]', consec((3, 3)))
check_indexing('[1]', consec((3, 3), 10))
check_indexing('[2]', consec((3, 3), 19))
check_indexing('[2]', consec((3,)))
check_indexing('[-1]', consec((3, 3), 19))
check_indexing('[0:2]', consec((3, 3, 3)))
check_indexing('[1:-1]', consec((3, 3, 3)))
check_indexing('[-3:-1]', consec((6, 3)))
check_indexing('[1:]', consec((3, 3)))
check_indexing('[:1]', consec((3, 3)))
check_indexing('[:]', consec((3, 2)))
# multi-dim: indexes
check_indexing('[0, 1]', consec((3, 3)))
check_indexing('[0, 1]', consec((3, 3, 2)))
check_indexing('[1, 0, 2]', consec((3, 3, 3)))
check_indexing('[2, -1]', consec((3, 3)))
# multi-dim: mixed slicing and indexing
check_indexing('[0, 1:2]', consec((3, 3)))
check_indexing('[0, :1]', consec((3, 3, 2)))
check_indexing('[1, 2:]', consec((3, 3, 3)))
check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
check_indexing('[1:, -1, 0]', consec((3, 3, 3, 3)))
check_indexing('[-1, 2:, 1:2]', consec((3, 3, 3, 3)))
check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
check_indexing('[-1, :, 0, 2]', consec((3, 3, 3, 3)))
# zero-sized slices
check_indexing('[0:0]', consec((2, 2)))
check_indexing('[0:0, 1]', consec((3, 3)))
# trivial expression usage
check_indexing('[1+1]', consec((3, 3)))
check_indexing('[1:(0 + 2)]', consec((3, 3, 3)))
# None for new dimensions
check_indexing('[None, 0]', consec((3, 3)))
check_indexing('[1, None]', consec((3, 3), 10))
check_indexing('[None, None, 2]', consec((3, 3), 19))
check_indexing('[None, 2, None]', consec((3,)))
check_indexing('[0:2, None]', consec((3, 3, 3)))
check_indexing('[None, 1:-1]', consec((3, 3, 3)))
check_indexing('[None, -3:-1, None]', consec((6, 3)))
check_indexing('[-1, None, 2:, None, 1:2]', consec((3, 3, 3, 3)))
check_indexing('[None, -1, None, 2:, None, 1:2, None]', consec((3, 3, 3, 3)))
# dynamic expression usage
check_dynamic_indexing("[i + j]", consec((3, 3)), 0, 1)
check_dynamic_indexing("[i:j, i]", consec((3, 3, 2)), 0, 2)
# positive striding
check_indexing_list_int('[0]', consec_list(6))
check_indexing_list_int('[1]', consec_list(7))
| |
def cluster_change_slider(self, value):
self.cluster_slider_val = value
self.cluster_sld_label.setText( str( value ) )
def palette_change_slider(self, value):
self.palette_slider_val = value
self.palette_sld_label.setText( str( value ) )
def blend_change_slider(self, value):
self.blend_slider_val = value
self.blend_sld_label.setText( str( value ) )
# Function for selecting an input image
def get_image( self ):
img = QFileDialog.getOpenFileName( self, 'Select file' )
if img:
path = img[0]
self.load_image( path )
else:
QMessageBox.warning( self, 'Warning' , 'No file selected.' )
def paste_previous_image( self ):
self.current_image_indx -= 1
if self.current_image_indx == -2:
QMessageBox.warning( self,'Warning','Please select an image first.' )
self.current_image_indx += 1
elif self.current_image_indx == -1:
QMessageBox.warning( self,'Warning','No more previous image.' )
self.current_image_indx += 1
else:
if self.current_image_indx != 0 and self.show_palette == 1:
self.set_image( self.paletteLabel, self.paletteList[self.current_image_indx] )
else: # input image has no palette, so place a blank
self.paletteLabel.setPixmap( QPixmap() )
self.paletteLabel.repaint()
self.set_image( self.imageLabel, self.imageList[self.current_image_indx] )
def paste_next_image( self ):
self.current_image_indx += 1
if self.current_image_indx == 0:
QMessageBox.warning( self,'Warning','Please select an image first.' )
self.current_image_indx -= 1
elif self.current_image_indx == len( self.imageList ):
QMessageBox.warning( self,'Warning','No more next image.' )
self.current_image_indx -= 1
else:
if self.current_image_indx != 0 and self.show_palette == 1:
self.set_image( self.paletteLabel, self.paletteList[self.current_image_indx] )
else: # input image has no palette, so place a blank
self.paletteLabel.setPixmap( QPixmap() )
self.paletteLabel.repaint()
self.set_image( self.imageLabel, self.imageList[self.current_image_indx] )
#Load new image function
def set_image( self, panel, image ):
#Load the image into the label
height, width, dim = image.shape
qim = QImage( image.data, width, height, 3 * width, QImage.Format_RGB888 )
panel.setPixmap( QPixmap( qim ) )
panel.repaint()
def add_to_imageList( self, image ):
self.imageList.append( np.asarray( image ) )
def add_to_paletteList( self, palette ):
self.paletteList.append( np.asarray( palette ) )
def load_image( self, path ):
print ( "Loading Image." )
self.imageList = [] # initialized back to empty when giving another input image
self.paletteList = [-1 * np.ones( ( 1, 1, 1 ) )]
self.paletteLabel.setPixmap( QPixmap() )
# push input image in the list
self.current_image_indx += 1
self.input_image = cv2.cvtColor( cv2.imread( path ), cv2.COLOR_BGR2RGB )
self.add_to_imageList( self.input_image )
self.imageLabel.setPixmap( QPixmap( path ) )
self.imagePath = path
def show_hide_palette( self ):
#if self.imagePath == "":
# QMessageBox.warning( self, 'Warning', 'Please select an image first.' )
if self.paletteList[-1][0, 0, 0] == -1:
QMessageBox.warning( self, 'Warning', 'You do not have palette. Please posterize the image first.' )
else:
self.show_palette = 1 - self.show_palette
if self.current_image_indx != 0 and self.show_palette == 1:
self.set_image( self.paletteLabel, self.paletteList[self.current_image_indx] )
else: # input image has no palette, so place a blank
self.paletteLabel.setPixmap( QPixmap() )
def show_hide_input_image( self ):
#if self.imagePath == "":
# QMessageBox.warning( self, 'Warning', 'Please select an image first.' )
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'This is your input image.' )
else:
self.show_input = 1 - self.show_input
if self.show_input == 1:
self.set_image( self.imageLabel, self.imageList[0] )
else:
self.set_image( self.imageLabel, self.imageList[self.current_image_indx] )
# posterization
def posterize( self ):
#if self.imagePath == "":
# QMessageBox.warning( self, 'Warning', 'Please select an image first.' )
#else:
if self.imagePath == "":
img_arr = np.asfarray( PIL.Image.open( self.welcome_img_path ).convert( 'RGB' ) ) / 255.
self.input_image = img_arr
path = self.welcome_img_path
else:
img_arr = np.asfarray( PIL.Image.open( self.imagePath ).convert( 'RGB' ) ) / 255.
path = self.imagePath
width, height, dim = img_arr.shape
length = max( width, height )
self.message = "This image has size " + str( height ) + ' x ' + str( width ) + '.\n\n'
if length >= 1800:
self.message += 'This is a large image and may take more than 8 mins to process.\n' + 'We suggest you posterize a downsized version to select appropriate parameters or vectorize the output.\n\n'
else:
if 500 < length < 600:
self.waitingtime = 2
elif 600 < length < 1000:
self.waitingtime = 3
elif 1000 <= length:
self.waitingtime = 4
self.message += 'This will take roughly ' + str( self.waitingtime ) + ' minutes to process.\n\n'
reply = QMessageBox.question( self, 'Message', self.message + 'Do you want to proceed and posterize the image?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No )
if reply == QMessageBox.Yes:
print( "Start posterizing." )
# algorithm starts
start = time.time()
messagebox = TimerMessageBox( 1, self )
messagebox.open()
# if downsampled version is selected, downsize the input and divide the penality by 2
if self.switch.isChecked():
print( 'Downsampled version selected.' )
img_arr = rescale( img_arr, 0.5, order=0, multichannel=True , anti_aliasing=False )
self.binary_slider_val /= 2
# K-means
img_arr_re = img_arr.reshape( ( -1, 3 ) )
img_arr_cluster = get_kmeans_cluster_image( self.cluster_slider_val, img_arr_re, img_arr.shape[0], img_arr.shape[1] )
# MLO
post_img, final_colors, add_mix_layers, palette = \
posterization( path, img_arr, img_arr_cluster, self.palette_slider_val, self.blend_slider_val, self.binary_slider_val )
if self.switch.isChecked():
### 'Greyscale' might fail in this case since the palette size for greyscale is 2.
new_am = add_mix_layers.reshape( ( post_img.shape[0], post_img.shape[1], self.palette_slider_val ) )
self.weights_per_pixel = rescale( new_am, 2, order=0, multichannel=True, anti_aliasing=False ).reshape( -1, self.palette_slider_val )
else:
self.weights_per_pixel = add_mix_layers # save weight list per pixel
# save palette
# 'ascontiguousarray' to make a C contiguous copy
self.palette = np.ascontiguousarray( np.clip( 0, 255, simplepalettes.palette2swatch( palette ) * 255. ).astype( np.uint8 ).transpose( ( 1, 0, 2 ) ) )
if self.switch.isChecked():
post_img = rescale( post_img, 2, order=0, multichannel=True, anti_aliasing=False )
self.posterized_image_wo_smooth = np.clip( 0, 255, post_img*255. ).astype( np.uint8 )
self.posterized_image_wo_smooth = cv2.medianBlur( self.posterized_image_wo_smooth, 5 )
else:
self.posterized_image_wo_smooth = np.clip( 0, 255, post_img * 255. ).astype( np.uint8 )
# convert to uint8 format
#self.posterized_image_wo_smooth = np.clip( 0, 255, post_img * 255. ).astype( np.uint8 )
# make a map from unique colors to weights
unique_colors, unique_indices = np.unique( self.posterized_image_wo_smooth.reshape( -1, 3 ), return_index = True, axis = 0 )
color2weights = {}
for col, index in zip( unique_colors, unique_indices ):
weights = self.weights_per_pixel[ index ]
color2weights[ tuple( col ) ] = weights
# post-smoothing
self.posterized_image_w_smooth = post_smoothing( PIL.Image.fromarray( self.posterized_image_wo_smooth, 'RGB' ), self.blur_slider_val, blur_window = self.blur_window_slider_val )
# pass smoothing along to the weights
self.weights_per_pixel_smooth = self.weights_per_pixel.copy()
for col, weights in color2weights.items():
#color_mask = ( self.posterized_image_w_smooth.reshape( -1, 3 ) == np.array( col ) [None,:] ).all()
color_mask = np.where( np.all( self.posterized_image_w_smooth.reshape( -1, 3 ) == np.array( col ), axis = 1 ) )[0]
self.weights_per_pixel_smooth[ color_mask ] = weights
self.weights_per_pixel_smooth.shape = self.weights_per_pixel.shape
### setting for recoloring
self.palette_recolor = palette # save for palette recoloring
self.palette_og = self.palette_recolor.copy()
self.set_combo_icon()
end = time.time()
print( "Finished. Total time: ", end - start )
self.add_to_paletteList( self.palette )
self.add_to_imageList( self.posterized_image_w_smooth )
self.set_image( self.imageLabel, self.imageList[-1] )
self.set_image( self.paletteLabel, self.paletteList[-1] )
# update current index position
self.current_image_indx = len( self.imageList ) - 1
else:
pass
# re-smooth the image
def smooth( self ):
#if self.imagePath == "":
# QMessageBox.warning( self,'Warning','Please select an image first!' )
#else:
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'Please posterize your image first' )
else:
print( "Start smoothing." )
#messagebox = TimerMessageBox( 1, self )
#messagebox.open()
self.posterized_image_w_smooth = post_smoothing( PIL.Image.fromarray( self.posterized_image_wo_smooth, 'RGB' ), self.blur_slider_val, blur_window = self.blur_window_slider_val )
print( "Smoothing Finished." )
self.add_to_paletteList( self.paletteList[-1] )
self.add_to_imageList( self.posterized_image_w_smooth )
self.set_image( self.imageLabel, self.imageList[-1] )
# update current index position
self.current_image_indx = len( self.imageList ) - 1
# function to save current image
def save_current_image( self ):
#if self.imagePath == "":
# QMessageBox.warning( self,'Warning','Please select an image first.' )
#else:
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'Please posterize your image first.' )
else:
reply = QMessageBox.question( self, 'Message', "Are you sure to save your current image on this panel?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No )
if reply == QMessageBox.Yes:
image_name = QFileDialog.getSaveFileName( self, 'Save Image' )
if not image_name:
return
if image_name[0][-4:] in ['.jpg', '.png']:
path_name = image_name[0]
else:
path_name = image_name[0] + '.png'
Image.fromarray( self.imageList[self.current_image_indx] ).save( path_name )
else:
pass
# function to save current image
def save_current_palette( self ):
#if self.imagePath | |
SecondaryIpv4Address(object):
"""
A VRRP secondary IPv4 address
.. attribute:: ip_address <key>
VRRP Secondary IPv4 address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ip_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.ip_address is None:
raise YPYModelError('Key property ip_address is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:secondary-ipv4-address[Cisco-IOS-XR-ipv4-vrrp-cfg:ip-address = ' + str(self.ip_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.ip_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter.SecondaryIpv4Addresses.SecondaryIpv4Address']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:secondary-ipv4-addresses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.secondary_ipv4_address is not None:
for child_ref in self.secondary_ipv4_address:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter.SecondaryIpv4Addresses']['meta_info']
class TrackedObjects(object):
"""
Track an object, reducing priority if it
goes down
.. attribute:: tracked_object
Object to be tracked
**type**\: list of :py:class:`TrackedObject <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_vrrp_cfg.Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter.TrackedObjects.TrackedObject>`
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tracked_object = YList()
self.tracked_object.parent = self
self.tracked_object.name = 'tracked_object'
class TrackedObject(object):
"""
Object to be tracked
.. attribute:: object_name <key>
Object to be tracked, interface name for interfaces
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: priority_decrement
Priority decrement
**type**\: int
**range:** 1..254
**mandatory**\: True
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object_name = None
self.priority_decrement = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.object_name is None:
raise YPYModelError('Key property object_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:tracked-object[Cisco-IOS-XR-ipv4-vrrp-cfg:object-name = ' + str(self.object_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.object_name is not None:
return True
if self.priority_decrement is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter.TrackedObjects.TrackedObject']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:tracked-objects'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.tracked_object is not None:
for child_ref in self.tracked_object:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter.TrackedObjects']['meta_info']
class Tracks(object):
"""
Track an item, reducing priority if it
goes down
.. attribute:: track
Object to be tracked
**type**\: list of :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_vrrp_cfg.Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter.Tracks.Track>`
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.track = YList()
self.track.parent = self
self.track.name = 'track'
class Track(object):
"""
Object to be tracked
.. attribute:: interface_name <key>
Object to be tracked, interface name for interfaces
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: priority
Priority decrement
**type**\: int
**range:** 1..254
**mandatory**\: True
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.priority = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:track[Cisco-IOS-XR-ipv4-vrrp-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.priority is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter.Tracks.Track']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:tracks'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.track is not None:
for child_ref in self.track:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter.Tracks']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.vr_id is None:
raise YPYModelError('Key property vr_id is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:virtual-router[Cisco-IOS-XR-ipv4-vrrp-cfg:vr-id = ' + str(self.vr_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vr_id is not None:
return True
if self.accept_mode_disable is not None:
return True
if self.bfd is not None:
return True
if self.preempt is not None:
return True
if self.primary_ipv4_address is not None:
return True
if self.priority is not None:
return True
if self.secondary_ipv4_addresses is not None and self.secondary_ipv4_addresses._has_data():
return True
if self.session_name is not None:
return True
if self.timer is not None and self.timer._has_data():
return True
if self.tracked_objects is not None and self.tracked_objects._has_data():
return True
if self.tracks is not None and self.tracks._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters.VirtualRouter']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:virtual-routers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.virtual_router is not None:
for child_ref in self.virtual_router:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3.VirtualRouters']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-vrrp-cfg:version3'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.virtual_routers is not None and self.virtual_routers._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_vrrp_cfg as meta
return meta._meta_table['Vrrp.Interfaces.Interface.Ipv4.Version3']['meta_info']
class SlaveVirtualRouters(object):
"""
The VRRP slave group configuration table
.. attribute:: slave_virtual_router
The VRRP slave being configured
**type**\: list of :py:class:`SlaveVirtualRouter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_vrrp_cfg.Vrrp.Interfaces.Interface.Ipv4.SlaveVirtualRouters.SlaveVirtualRouter>`
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.slave_virtual_router = YList()
self.slave_virtual_router.parent = self
self.slave_virtual_router.name = 'slave_virtual_router'
class SlaveVirtualRouter(object):
"""
The VRRP slave being configured
.. attribute:: slave_virtual_router_id <key>
Virtual Router ID
**type**\: int
**range:** 1..255
.. attribute:: accept_mode_disable
Disable Accept Mode for this virtual IPAddress
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: follow
VRRP Session name for this slave to follow
**type**\: str
.. attribute:: primary_ipv4_address
The Primary VRRP IPv4 address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: secondary_ipv4_addresses
The table of VRRP secondary IPv4 addresses
**type**\: :py:class:`SecondaryIpv4Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_vrrp_cfg.Vrrp.Interfaces.Interface.Ipv4.SlaveVirtualRouters.SlaveVirtualRouter.SecondaryIpv4Addresses>`
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.slave_virtual_router_id = None
self.accept_mode_disable = None
self.follow = None
self.primary_ipv4_address = None
self.secondary_ipv4_addresses = Vrrp.Interfaces.Interface.Ipv4.SlaveVirtualRouters.SlaveVirtualRouter.SecondaryIpv4Addresses()
self.secondary_ipv4_addresses.parent = self
class SecondaryIpv4Addresses(object):
"""
The table of VRRP secondary IPv4 addresses
.. attribute:: secondary_ipv4_address
A VRRP secondary IPv4 address
**type**\: list of :py:class:`SecondaryIpv4Address <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_vrrp_cfg.Vrrp.Interfaces.Interface.Ipv4.SlaveVirtualRouters.SlaveVirtualRouter.SecondaryIpv4Addresses.SecondaryIpv4Address>`
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.secondary_ipv4_address = YList()
self.secondary_ipv4_address.parent = self
self.secondary_ipv4_address.name = 'secondary_ipv4_address'
class SecondaryIpv4Address(object):
"""
A VRRP secondary IPv4 address
.. attribute:: ip_address <key>
VRRP Secondary IPv4 address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv4-vrrp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ip_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.ip_address is None:
raise YPYModelError('Key property ip_address | |
e
o
f
t
h
e
a
c
c
i
d
e
n
t
.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(ExplanationOfBenefitAccident, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ExplanationOfBenefitAccident, self).elementProperties()
js.extend([
("date", "date", fhirdate.FHIRDate, False, None, False),
("locationAddress", "locationAddress", address.Address, False, "location", False),
("locationReference", "locationReference", fhirreference.FHIRReference, False, "location", False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class ExplanationOfBenefitAddItem(backboneelement.BackboneElement):
"""
I
n
s
u
r
e
r
a
d
d
e
d
l
i
n
e
i
t
e
m
s
.
T
h
e
f
i
r
s
t
-
t
i
e
r
s
e
r
v
i
c
e
a
d
j
u
d
i
c
a
t
i
o
n
s
f
o
r
p
a
y
o
r
a
d
d
e
d
p
r
o
d
u
c
t
o
r
s
e
r
v
i
c
e
l
i
n
e
s
.
"""
resource_type = "ExplanationOfBenefitAddItem"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.adjudication = None
"""
A
d
d
e
d
i
t
e
m
s
a
d
j
u
d
i
c
a
t
i
o
n
.
List of `ExplanationOfBenefitItemAdjudication` items (represented as `dict` in JSON). """
self.bodySite = None
"""
A
n
a
t
o
m
i
c
a
l
l
o
c
a
t
i
o
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.detail = None
"""
I
n
s
u
r
e
r
a
d
d
e
d
l
i
n
e
i
t
e
m
s
.
List of `ExplanationOfBenefitAddItemDetail` items (represented as `dict` in JSON). """
self.detailSequence = None
"""
D
e
t
a
i
l
s
e
q
u
e
n
c
e
n
u
m
b
e
r
.
List of `int` items. """
self.factor = None
"""
P
r
i
c
e
s
c
a
l
i
n
g
f
a
c
t
o
r
.
Type `float`. """
self.itemSequence = None
"""
I
t
e
m
s
e
q
u
e
n
c
e
n
u
m
b
e
r
.
List of `int` items. """
self.locationAddress = None
"""
P
l
a
c
e
o
f
s
e
r
v
i
c
e
o
r
w
h
e
r
e
p
r
o
d
u
c
t
w
a
s
s
u
p
p
l
i
e
d
.
Type `Address` (represented as `dict` in JSON). """
self.locationCodeableConcept = None
"""
P
l
a
c
e
o
f
s
e
r
v
i
c
e
o
r
w
h
e
r
e
p
r
o
d
u
c
t
w
a
s
s
u
p
p
l
i
e
d
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.locationReference = None
"""
P
l
a
c
e
o
f
s
e
r
v
i
c
e
o
r
w
h
e
r
e
p
r
o
d
u
c
t
w
a
s
s
u
p
p
l
i
e
d
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.modifier = None
"""
S
e
r
v
i
c
e
/
P
r
o
d
u
c
t
b
i
l
l
i
n
g
m
o
d
i
f
i
e
r
s
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.net = None
"""
T
o
t
a
l
i
t
e
m
c
o
s
t
.
Type `Money` (represented as `dict` in JSON). """
self.noteNumber = None
"""
A
p
p
l
i
c
a
b
l
e
n
o
t
e
n
u
m
b
e
r
s
.
List of `int` items. """
self.productOrService = None
"""
B
i
l
l
i
n
g
,
s
e
r
v
i
c
e
,
p
r
o
d
u
c
t
,
o
r
d
r
u
g
c
o
d
e
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.programCode = None
"""
P
r
o
g
r
a
m
t
h
e
p
r
o
d
u
c
t
o
r
s
e
r
v
i
c
e
i
s
p
r
o
v
i
d
e
d
u
n
d
e
r
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.provider = None
"""
A
u
t
h
o
r
i
z
e
d
p
r
o
v
i
d
e
r
s
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.quantity = None
"""
C
o
u
n
t
o
f
p
r
o
d
u
c
t
s
o
r
s
e
r
v
i
c
e
s
.
Type `Quantity` (represented as `dict` in JSON). """
self.servicedDate = None
"""
D
a
t
e
o
r
d
a
t
e
s
o
f
s
e
r
v
i
c
e
o
r
p
r
o
d
u
c
t
d
e
l
i
v
e
r
y
.
Type `FHIRDate` (represented as `str` in JSON). """
self.servicedPeriod = None
"""
D
a
t
e
o
r
d
a
t
e
s
o
f
s
e
r
v
i
c
e
o
r
p
r
o
d
u
c
t
d
e
l
i
v
e
r
y
.
Type `Period` (represented as `dict` in JSON). """
self.subDetailSequence = None
"""
S
u
b
d
e
t
a
i
l
s
e
q
u
e
n
c
e
n
u
m
b
e
r
.
List of `int` items. """
self.subSite = None
"""
A
n
a
t
o
m
i
c
a
l
s
u
b
-
l
o
c
a
t
i
o
n
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.unitPrice = None
"""
F
e
e
,
c
h
a
r
g
e
o
r
c
o
s
t
p
e
r
i
t
e
m
.
Type `Money` (represented as `dict` in JSON). """
super(ExplanationOfBenefitAddItem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ExplanationOfBenefitAddItem, self).elementProperties()
js.extend([
("adjudication", "adjudication", ExplanationOfBenefitItemAdjudication, True, None, False),
("bodySite", "bodySite", codeableconcept.CodeableConcept, False, None, False),
("detail", "detail", ExplanationOfBenefitAddItemDetail, True, None, False),
("detailSequence", "detailSequence", int, True, None, False),
("factor", "factor", float, False, None, False),
("itemSequence", "itemSequence", int, True, None, False),
("locationAddress", "locationAddress", address.Address, False, "location", False),
("locationCodeableConcept", "locationCodeableConcept", codeableconcept.CodeableConcept, False, "location", False),
("locationReference", "locationReference", fhirreference.FHIRReference, False, "location", False),
("modifier", "modifier", codeableconcept.CodeableConcept, True, None, False),
("net", "net", money.Money, False, None, False),
("noteNumber", "noteNumber", int, True, None, False),
("productOrService", "productOrService", codeableconcept.CodeableConcept, False, None, True),
("programCode", "programCode", codeableconcept.CodeableConcept, True, None, False),
("provider", "provider", fhirreference.FHIRReference, True, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("servicedDate", "servicedDate", fhirdate.FHIRDate, False, "serviced", False),
("servicedPeriod", "servicedPeriod", period.Period, False, "serviced", False),
("subDetailSequence", "subDetailSequence", int, True, None, False),
("subSite", "subSite", codeableconcept.CodeableConcept, True, None, False),
("unitPrice", "unitPrice", money.Money, False, None, False),
])
return js
class ExplanationOfBenefitAddItemDetail(backboneelement.BackboneElement):
"""
I
n
s
u
r
e
r
a
d
d
e
d
l
i
n
e
i
t
e
m
s
.
T
h
e
s
e
c
o
n
d
-
t
i
e
r
s
e
r
v
i
c
e
a
d
j
u
d
i
c
a
t
i
o
n
s
f
| |
self.obs_dim = obsDim
high = np.inf*np.ones(self.obs_dim)
low = -high
self.observation_space = spaces.Box(low, high)
#set what this skeleton's init pose should be
#only call this whenever initial base pose _actually chagnes_ (like upon loading)
def setStartingPose(self):
#initPose is base initial pose that gets varied upon reset if randomized - base pose is treated as constant
self.initPose = self._makeInitPoseIndiv()
self.setToInitPose()
#reset this skeleton to its initial base pose - uses preset self.initPose
def setToInitPose(self):
#priv method is so that helper bots can IK to appropriate location based on where ANA hand is
self._setToInitPose_Priv()
self.skel.set_positions(self.initPose)
#set skel velocities
self.skel.set_velocities(np.zeros(self.skel.dq.shape))
#if monitoring the force/torques at pull application point, reset minmax arrays
if(self.monitorGenForce):
self.totGenFrc = list()
self.minMaxFrcDict = {}
self._resetMinMaxMonitors(self.minMaxFrcDict, self.useForce.shape)
#self._reset_minMaxGenForceVals()
self.postPoseInit()
#will reset passed dictionary of min and max values to the given shape of initial sentinel values
def _resetMinMaxMonitors(self, minMaxDict, sizeShape):
minMaxDict['min'] = np.ones(sizeShape)*1000000000
minMaxDict['max'] = np.ones(sizeShape)*-1000000000
#set initial state externally - call before reset_model is called by training process
#Does not override initPose
def setNewInitState(self, _qpos, _qvel):
#set to false to use specified states
self.randomizeInitState = False
self.initQPos = np.asarray(_qpos, dtype=np.float64)
self.initQVel = np.asarray(_qvel, dtype=np.float64)
self.loadedInitStateSet = True
#set this skeleton's state
def state_vector(self):
return np.concatenate([
self.skel.q,
self.skel.dq
])
#sets skeleton state to be passed position and velocity
def set_state(self, qpos, qvel):
assert qpos.shape == (self.ndofs,) and qvel.shape == (self.ndofs,)
self.skel.set_positions(qpos)
self.skel.set_velocities(qvel)
#sets skeleton state to be passed state vector, split in half (for external use)
def set_state_vector(self, state):
numVals = int(len(state)/2.0)
self.set_state(state[0:numVals], state[numVals:])
#called in do_simulate if perturbation is true - adds external force to passed body nodes
def add_perturbation(self, nodes, frc):
self.skel.bodynodes[nodes].add_ext_force(frc)
#build tau from a, using control clamping, for skel, starting at stIdx and action scale for skeleton
#a is specified from RL policy, and is clamped to be within control_bounds
def setClampedTau(self, a):
self.a = np.copy(a)
#print('pre clamped cntl : {}'.format(a))
#idx 0 is min, idx 1 is max.
clamped_control = np.clip(self.a, self.control_bounds[0],self.control_bounds[1])
#print('clamped cntl : {}'.format(clamped_control))
self.tau = np.zeros(self.ndofs)
self.tau[self.stTauIdx:] = clamped_control * self.action_scale
#print('tau : {}'.format(self.tau))
return self.tau
#send torques to skeleton
def applyTau(self):
if(self.monitorTorques):
self._checkiMinMaxVals(self.tau, self.monTrqDict)
#apply torques every step since they are cleared after world steps
self.skel.set_forces(self.tau)
#record # of tau applications
self.numSimSteps += 1
#any private per-step torque application functionality - this is where the external force is applied to human skeleton during RL training to simulate robot assistant
self.applyTau_priv()
#return limits of Obs values (q, qdot, force)
def getObsLimits(self):
jtlims = {}
jtlims['lowQLims'] = np.zeros(self.ndofs)
jtlims['highQLims'] = np.zeros(self.ndofs)
jtlims['lowDQLims'] = np.zeros(self.ndofs)
jtlims['highDQLims'] = np.zeros(self.ndofs)
dofs = self.skel.dofs
for i in range(len(dofs)):
dof = dofs[i]
if dof.has_position_limit():
jtlims['lowQLims'][i]=dof.position_lower_limit()
jtlims['highQLims'][i]=dof.position_upper_limit()
else:
jtlims['lowQLims'][i]=-3.14
jtlims['highQLims'][i]=3.14
vLL = dof.velocity_lower_limit()
if np.isfinite(vLL):
jtlims['lowDQLims'][i] = vLL
else :
jtlims['lowDQLims'][i] = -self.qDotBnd
vUL = dof.velocity_upper_limit()
if np.isfinite(vUL):
jtlims['highDQLims'][i] = vUL
else :
jtlims['highDQLims'][i] = self.qDotBnd
frcBnds = self.env.frcBnds * self.mg
#limits of location are arbitrary except for y, must be positive and less than ~2 - observation of rootLocDOFS is actually COM values, so won't be below ground
jtlims['lowQLims'][self.rootLocDofs[1]] = 0
jtlims['highQLims'][self.rootLocDofs[1]] = 2
jtlims['obs_lowBnds']=self._concat3Aras(jtlims['lowQLims'],jtlims['lowDQLims'], frcBnds[0])
jtlims['obs_highBnds']=self._concat3Aras(jtlims['highQLims'],jtlims['highDQLims'], frcBnds[1])
return jtlims
def _concat3Aras(self, a, b,c):
res1 = np.concatenate([a,b])
res = np.concatenate([res1,c])
return res
#return a random initial state for this skeleton
def getRandomInitState(self, poseDel=None):
if(poseDel is None):
poseDel=self.poseDel
lPoseDel = -1*poseDel
#checkBestState means instead of reusing initial state to begin rollout, we use the best state from last rollout. NOTE need to make sure the traj ball is following it's own location, and not coupled to the hand.
if((self.checkBestState) and (self.bestState is not None)):
#set walker to be best state previously seen
self.set_state_vector(self.bestState)
else :
#set walker to be laying on ground
self.setToInitPose()
#perturb init state and statedot
qpos = self.skel.q + self.env.np_random.uniform(low= lPoseDel, high=poseDel, size=self.ndofs)
qvel = self.skel.dq + self.env.np_random.uniform(low= lPoseDel, high=poseDel, size=self.ndofs)
return qpos, qvel
#returns a random observation, based on the full range of possible q and qdot, governed by joint limits and joint vel limits, if any exist
def getRandomObservation(self):
#get all known joint limits
jtlims = self.getObsLimits()
#print('{}'.format(jtlims))
rndQ = self.env.np_random.uniform(low=jtlims['lowQLims'],high=jtlims['highQLims'])
# if not (np.isfinite(jtlims['lowDQLims']).all()) or not (np.isfinite(jtlims['highDQLims']).all()) :
# rndQDot = self.env.np_random.uniform(low=-self.qDotBnd, high= self.qDotBnd, size=self.ndofs)
# else :
rndQDot = self.env.np_random.uniform(low=jtlims['lowDQLims'],high=jtlims['highDQLims'])
#build observation out of these state values and return this observation
return self.getObsFromState(rndQ, rndQDot)
#calculate orientation along certain orientation axes
def procOrient(self, orientAxes):
oVec = np.array(orientAxes)
oVec_W = self.skel.bodynodes[0].to_world(oVec) - self.skel.bodynodes[0].to_world(np.array([0, 0, 0]))
norm = np.linalg.norm(oVec_W)
if(norm == 0):#should never happen, since this is used as a marker of failing, a large value will signal done
return 10
oVec_W /= norm
ang_cos = np.arccos(np.dot(oVec, oVec_W))
return ang_cos
def dispResetDebug(self, notice=''):
print('{} Notice : Setting specified init q/qdot/frc'.format(notice))
print('initQPos : {}'.format(self.initQPos))
print('initQVel : {}'.format(self.initQVel))
print('initFrc : {}'.format(self.desExtFrcVal))
#this will return the force, or force multiplier, currently being used as the final component of the observation
def getObsForce(self):
#this is specified in environment constructor
if self.useMultNotForce :
return self.desExtFrcVal_mults
else :
return self.desExtFrcVal
#return observation given passed state and state dots
#obs is slightly different than pure q/qdot (includes height in world frame), requiring skel to be modified
#restores skel pose when finished - make sure q is correctly configured
def getObsFromState(self, q, qdot):
#save current state so can be restored
oldState = self.state_vector()
#set passed state
self.set_state(np.asarray(q, dtype=np.float64), np.asarray(qdot, dtype=np.float64))
#get obs (observation can be bounded or modified, that's why we use this method) - INCLUDES FORCE VALUE - if using to build new force value, need to replace last 3 elements
obs = self.getObs()
#return to original state
self.set_state_vector(oldState)
return obs
#check passed array of values to find and set min/maxs
def _checkiMinMaxVals(self, vals, minMaxDict):
#print('minmax check performed')
minVals = minMaxDict['min']
maxVals = minMaxDict['max']
#check min and max torques seen
for i in range(len(vals)):
if minVals[i] > vals[i] :
minVals[i] = vals[i]
elif maxVals[i] < vals[i] :
maxVals[i] = vals[i]
#called at beginning of each rollout - resets this model, resetting its state
#this is for RL-controlled models
def reset_model(self, dispDebug=False):
if(self.randomizeInitState):#if random, set random perturbation from initial pose
#sets init pose then perturbs by some random amount
qpos, qvel = self.getRandomInitState()
self.set_state(qpos, qvel)
else:
#reset to be in initial pose
self.setToInitPose()
#resetting to pre-set initial pose
if (self.loadedInitStateSet):
if(dispDebug):
self.dispResetDebug('skelHolder::reset_model')
self.set_state(self.initQPos, self.initQVel)
self.loadedInitStateSet = False
# else:
# print('skelHolder {}::reset_model Warning : init skel state not randomized nor set to precalced random state'.format(self.skel.name))
if(self.monitorTorques):
#self._reset_monTorques()
self.monTrqDict = {}
self._resetMinMaxMonitors(self.monTrqDict, self.ndofs)
#individual reset functionality
self._resetIndiv(dispDebug)
return self.getObs()
#this will yield the reward parabola given min and
#max vals that yield rewards (roots of parabola) and reward for peak of parabola (1/2 between min and max)
def calcRwdRootMethod(self, val, minRootVal, maxRootVal):
#maxRwd is y val for avg of min and max root vals - set to 1, manage magnitude by weight instead
xMax = (minRootVal + maxRootVal)/2.0
mult = 1.0/((xMax-minRootVal) * (xMax - maxRootVal))
return mult * (val - minRootVal) * (val - maxRootVal)
#handle if sim is broken
def _handleBrokenSim(self, resDict):
print('\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('{} has broken sim : reason : {}'.format(self.name, resDict['reason']))
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
done = True
rwd = 0
d = {'broke_sim': True, 'raiseVelScore': -10000000,'height_rew':-10000000, 'actionPenalty':-10000000, 'done':True}
self.env.numBrokeSims+=1
#observation of current state
obs = self.getObs()
return obs, rwd, done, d, {}
#functionality after sim step is taken -
#calculate reward, determine if done(temrination conditions) and return observation
#and return informational dictionary
def postStep(self, resDict):
#resDict holds whether the sim was broken or not - if sim breaks, we need
#holds these values : {'broken':False, 'frame':n_frames, 'stableStates':stblState}
#check first if sim is broken - illegal actions or otherwise exploding
if(resDict['broken'] and (self.name in resDict['skelhndlr'])):
return self._handleBrokenSim(resDict)
debug = False
#not broken
rwd, done, d, | |
import random
import uuid
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from queue import Empty
import requests
import pytest
import time
HOUR_MILLISEC = 1000 * 3600
def test_outcomes_processing(relay_with_processing, mini_sentry, outcomes_consumer):
"""
Tests outcomes are sent to the kafka outcome topic
Send one event to a processing Relay and verify that the event is placed on the
kafka outcomes topic and the event has the proper information.
"""
relay = relay_with_processing()
outcomes_consumer = outcomes_consumer()
message_text = "some message {}".format(datetime.now())
event_id = "11122233344455566677788899900011"
start = datetime.utcnow()
relay.send_event(
42,
{
"event_id": event_id,
"message": message_text,
"extra": {"msg_text": message_text},
},
)
outcome = outcomes_consumer.get_outcome()
assert outcome["project_id"] == 42
assert outcome["event_id"] == event_id
assert outcome.get("org_id") is None
assert outcome.get("key_id") is None
assert outcome["outcome"] == 3
assert outcome["reason"] == "project_id"
assert outcome["remote_addr"] == "127.0.0.1"
# deal with the timestamp separately (we can't control it exactly)
timestamp = outcome.get("timestamp")
event_emission = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
end = datetime.utcnow()
assert start <= event_emission <= end
def _send_event(relay, project_id=42, event_type="error"):
"""
Send an event to the given project.
If the project doesn't exist, relay should generate INVALID outcome with reason "project_id".
"""
event_id = uuid.uuid1().hex
message_text = "some message {}".format(datetime.now())
event_body = {
"event_id": event_id,
"message": message_text,
"extra": {"msg_text": message_text},
"type": event_type,
}
try:
relay.send_event(project_id=project_id, payload=event_body)
except Exception:
pass
return event_id
@pytest.mark.parametrize("event_type", ["error", "transaction"])
def test_outcomes_non_processing(relay, mini_sentry, event_type):
"""
Test basic outcome functionality.
Send one event that generates an outcome and verify that we get an outcomes batch
with all necessary information set.
"""
config = {"outcomes": {"emit_outcomes": True, "batch_size": 1, "batch_interval": 1}}
relay = relay(mini_sentry, config)
event_id = _send_event(relay, event_type=event_type)
outcomes_batch = mini_sentry.captured_outcomes.get(timeout=0.2)
assert mini_sentry.captured_outcomes.qsize() == 0 # we had only one batch
outcomes = outcomes_batch.get("outcomes")
assert len(outcomes) == 1
outcome = outcomes[0]
del outcome["timestamp"] # 'timestamp': '2020-06-03T16:18:59.259447Z'
expected_outcome = {
"project_id": 42,
"outcome": 3, # invalid
"reason": "project_id", # missing project id
"event_id": event_id,
"remote_addr": "127.0.0.1",
"category": 2 if event_type == "transaction" else 1,
}
assert outcome == expected_outcome
# no events received since all have been for an invalid project id
assert mini_sentry.captured_events.empty()
def test_outcomes_not_sent_when_disabled(relay, mini_sentry):
"""
Test that no outcomes are sent when outcomes are disabled.
Set batching to a very short interval and verify that we don't receive any outcome
when we disable outcomes.
"""
config = {
"outcomes": {"emit_outcomes": False, "batch_size": 1, "batch_interval": 1}
}
relay = relay(mini_sentry, config)
try:
mini_sentry.captured_outcomes.get(timeout=0.2)
assert False # we should not be here ( previous call should have failed)
except Empty:
pass # we do expect not to get anything since we have outcomes disabled
def test_outcomes_non_processing_max_batch_time(relay, mini_sentry):
"""
Test that outcomes are not batched more than max specified time.
Send events at an interval longer than max_batch_time and expect
not to have them batched although we have a very large batch size.
"""
events_to_send = 3
config = {
"outcomes": {
"emit_outcomes": True,
"batch_size": 1000, # a huge batch size
"batch_interval": 1, # very short batch time
}
}
relay = relay(mini_sentry, config)
event_ids = set()
# send one less events than the batch size (and check we don't send anything)
for _ in range(events_to_send):
event_id = _send_event(relay)
event_ids.add(event_id)
time.sleep(0.12) # sleep more than the batch time
# we should get one batch per event sent
batches = []
for _ in range(events_to_send):
batch = mini_sentry.captured_outcomes.get(timeout=1)
batches.append(batch)
# verify that the batches contain one outcome each and the event_ids are ok
for batch in batches:
outcomes = batch.get("outcomes")
assert len(outcomes) == 1 # one outcome per batch
assert outcomes[0].get("event_id") in event_ids # a known event id
def test_outcomes_non_processing_batching(relay, mini_sentry):
"""
Test that outcomes are batched according to max size.
Send max_outcome_batch_size events with a very large max_batch_time and expect all
to come in one batch.
"""
batch_size = 3
config = {
"outcomes": {
"emit_outcomes": True,
"batch_size": batch_size,
"batch_interval": HOUR_MILLISEC, # batch every hour
}
}
relay = relay(mini_sentry, config)
event_ids = set()
# send one less events than the batch size (and check we don't send anything)
for _ in range(batch_size - 1):
event_id = _send_event(relay)
event_ids.add(event_id)
# nothing should be sent at this time
try:
mini_sentry.captured_outcomes.get(timeout=0.2)
assert False # the request should timeout, there is no outcome coming
except Empty:
pass # yes we expect to timout since there should not be any outcome sent yet
event_id = _send_event(relay)
event_ids.add(event_id)
# now we should be getting a batch
outcomes_batch = mini_sentry.captured_outcomes.get(timeout=0.2)
# we should have received only one outcomes batch (check nothing left)
assert mini_sentry.captured_outcomes.qsize() == 0
outcomes = outcomes_batch.get("outcomes")
assert len(outcomes) == batch_size
received_event_ids = [outcome.get("event_id") for outcome in outcomes]
for event_id in received_event_ids:
assert event_id in event_ids # the outcome is one of those we sent
# no events received since all have been for an invalid project id
assert mini_sentry.captured_events.empty()
def test_outcome_source(relay, mini_sentry):
"""
Test that the source is picked from configuration and passed in outcomes
"""
config = {
"outcomes": {
"emit_outcomes": True,
"batch_size": 1,
"batch_interval": 1,
"source": "my-layer",
}
}
relay = relay(mini_sentry, config)
_send_event(relay)
outcomes_batch = mini_sentry.captured_outcomes.get(timeout=0.2)
assert mini_sentry.captured_outcomes.qsize() == 0 # we had only one batch
outcomes = outcomes_batch.get("outcomes")
assert len(outcomes) == 1
outcome = outcomes[0]
assert outcome.get("source") == "my-layer"
@pytest.mark.parametrize("num_intermediate_relays", [1, 3])
@pytest.mark.parametrize("event_type", ["error", "transaction"])
def test_outcome_forwarding(
relay, relay_with_processing, outcomes_consumer, num_intermediate_relays, event_type
):
"""
Tests that Relay forwards outcomes from a chain of relays
Have a chain of many relays that eventually connect to Sentry
and verify that the outcomes sent by the first (downstream relay)
are properly forwarded up to sentry.
"""
outcomes_consumer = outcomes_consumer(timeout=2)
processing_config = {
"outcomes": {
"emit_outcomes": False, # The default, overridden by processing.enabled: true
"batch_size": 1,
"batch_interval": 1,
"source": "processing-layer",
}
}
# The innermost Relay needs to be in processing mode
upstream = relay_with_processing(processing_config)
intermediate_config = {
"outcomes": {
"emit_outcomes": True,
"batch_size": 1,
"batch_interval": 1,
"source": "intermediate-layer",
}
}
# build a chain of identical relays
for _ in range(num_intermediate_relays):
upstream = relay(upstream, intermediate_config)
# mark the downstream relay so we can identify outcomes originating from it
config_downstream = deepcopy(intermediate_config)
config_downstream["outcomes"]["source"] = "downstream-layer"
downstream_relay = relay(upstream, config_downstream)
event_id = _send_event(downstream_relay, event_type=event_type)
outcome = outcomes_consumer.get_outcome()
expected_outcome = {
"project_id": 42,
"outcome": 3,
"source": "downstream-layer",
"reason": "project_id",
"event_id": event_id,
"remote_addr": "127.0.0.1",
"category": 2 if event_type == "transaction" else 1,
}
outcome.pop("timestamp")
assert outcome == expected_outcome
def test_outcomes_forwarding_rate_limited(
mini_sentry, relay, relay_with_processing, outcomes_consumer
):
"""
Tests that external relays do not emit duplicate outcomes for forwarded messages.
External relays should not produce outcomes for messages already forwarded to the upstream.
In this test, we send two events that should be rate-limited. The first one is dropped in the
upstream (processing) relay, and the second -- in the downstream, because it should cache the
rate-limited response from the upstream.
In total, two outcomes have to be emitted:
- The first one from the upstream relay
- The second one is emittedy by the downstream, and then sent to the upstream that writes it
to Kafka.
"""
outcomes_consumer = outcomes_consumer()
processing_config = {
"outcomes": {
"emit_outcomes": True,
"batch_size": 1,
"batch_interval": 1,
"source": "processing-layer",
}
}
# The innermost Relay needs to be in processing mode
upstream = relay_with_processing(processing_config)
config_downstream = {
"outcomes": {
"emit_outcomes": True,
"batch_size": 1,
"batch_interval": 1,
"source": "downstream-layer",
}
}
downstream_relay = relay(upstream, config_downstream)
# Create project config
project_id = 42
category = "error"
project_config = mini_sentry.add_full_project_config(project_id)
project_config["config"]["quotas"] = [
{
"id": "drop-everything",
"categories": [category],
"limit": 0,
"window": 1600,
"reasonCode": "rate_limited",
}
]
# Send an event, it should be dropped in the upstream (processing) relay
result = downstream_relay.send_event(project_id, _get_message(category))
event_id = result["id"]
outcome = outcomes_consumer.get_outcome()
outcome.pop("timestamp")
expected_outcome = {
"reason": "rate_limited",
"org_id": 1,
"key_id": 123,
"outcome": 2,
"project_id": 42,
"remote_addr": "127.0.0.1",
"event_id": event_id,
"source": "processing-layer",
"category": 1,
}
assert outcome == expected_outcome
# Send another event, now the downstream should drop it because it'll cache the 429
# response from the previous event, but the outcome should be emitted
with pytest.raises(requests.exceptions.HTTPError, match="429 Client Error"):
downstream_relay.send_event(project_id, _get_message(category))
expected_outcome_from_downstream = deepcopy(expected_outcome)
expected_outcome_from_downstream["source"] = "downstream-layer"
expected_outcome_from_downstream.pop("event_id")
| |
##########################################################################################
#
# Copyright (c) 2017 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
##########################################################################################
import logging
import math
try:
import madgraph
except ImportError:
MADEVENT = True
import internal.misc as misc
import internal.subtraction as sub
from internal import InvalidCmd, MadGraph5Error
else:
MADEVENT= False
import madgraph.various.misc as misc
import madgraph.core.subtraction as sub
from madgraph import InvalidCmd, MadGraph5Error
import copy
from madgraph.integrator.vectors import Vector, LorentzVector
from madgraph.integrator.vectors import LorentzVectorDict, LorentzVectorList
logger = logging.getLogger('madgraph.PhaseSpaceGenerator')
class MappingError(Exception):
"""Exception raised if an exception is triggered in implementation of the currents."""
pass
#=========================================================================================
# Kinematic functions
#=========================================================================================
def Kaellen(*args):
l = len(args)
foo = 0.
for i in range(l):
foo += args[i]**2
for j in range(i+1, l):
foo -= 2*args[i]*args[j]
return foo
#=========================================================================================
# Compute parent & children numbers
#=========================================================================================
def get_structure_numbers(structure, momenta_dict):
"""Return the number of the parent and children of a given structure
according to some momenta dictionary.
"""
legs = structure.get_all_legs()
children = frozenset((leg.n for leg in legs))
if structure.name() == "S":
return None, children, None
else:
parent = momenta_dict.inv[children]
is_legs = tuple(
leg.n for leg in legs
if leg.state == leg.INITIAL )
if not is_legs:
return parent, children, None
is_leg = is_legs[0]
fs_children = frozenset((child for child in children if child != is_leg))
return parent, fs_children, is_leg
#=========================================================================================
# Final-collinear variables
#=========================================================================================
class FinalCollinearVariables(object):
precision_loss_message = "Precision loss detected when computing collinear variables."
@staticmethod
def names(parent, children):
"""Get the names of variables describing particles going unresolved."""
names = ['s' + str(parent), ]
for child in children:
names += ['z' + str(child), 'm2' + str(child), 'kt' + str(child), ]
return names
@staticmethod
def collinear_and_reference(p):
"""Given a momentum, return normalized vectors on the light-cone."""
n = Vector(p.space())
n.normalize()
return LorentzVector([1, ] + list(n)), LorentzVector([1, 0, 0, 1])
@staticmethod
def get(
PS_point, children, na, nb, kinematic_variables,
precision=1e-6 ):
"""Given unmapped momenta and reference vectors, compute the kinematic variables
that describe the internal structure of particles going unresolved.
Children indices should already refer to the position
of momenta within the PS_point (no momentum dictionary used).
Sum rules are checked to assess numerical accuracy.
"""
if len(children) < 2: return
# Compute the sum of momenta
p = LorentzVector()
for i in children:
p += PS_point[i]
# Pre-compute scalar products
nap = na.dot(p)
nbp = nb.dot(p)
nanb = na.dot(nb)
pt = p - (nbp*na + nap * nb) / nanb
# Initialize variables for sum rules check
zsum = 0
ktsum = LorentzVector()
ktabssum = LorentzVector()
# Compute all kinematic variables
for i in children:
pi = PS_point[i]
napi = na.dot(pi)
nbpi = nb.dot(pi)
zi = nbpi / nbp
kti = pi - (nbpi*na+napi*nb) / nanb - zi*pt
kinematic_variables['z' + str(i)] = zi
kinematic_variables['kt' + str(i)] = kti
kinematic_variables['m2' + str(i)] = pi.square()
zsum += zi
ktsum += kti
for j in range(len(kti)):
ktabssum[j] += abs(kti[j])
# Check numerical accuracy
# TODO Ideally switch to quadruple precision if the check fails
ktsum_abs = abs(ktsum)
ktabssum_abs = abs(ktabssum)
ktsum_ratio = ktsum_abs / ktabssum_abs
if (abs(zsum - 1) > precision) or (ktsum_ratio > precision):
logger.critical(FinalCollinearVariables.precision_loss_message)
logger.critical("The sum of z's is %.16e" % zsum)
logger.critical("The sum of kt's is %s" % str(ktsum))
logger.critical("abs(sum(kt's)) / sum(abs(kt's)) = %s" % ktsum_ratio)
logger.critical("Inputs for CollinearVariables.get():")
logger.critical("na = %s, nb = %s" % (str(na), str(nb)))
for i in children:
logger.critical("child %d: %s" % (i, str(PS_point[i])))
logger.critical("Output of CollinearVariables.get():")
logger.critical(str(kinematic_variables))
return
@staticmethod
def set(
PS_point, parent, children, na, nb, kinematic_variables,
precision=1e-6 ):
"""Given a phase-space point with an off-shell parent and collinear variables,
compute and set the children momenta.
The parent's momentum is removed.
Parent and children are indices that already refer to the position
of momenta within the PS_point (no momentum dictionary used).
Sum rules are checked to assess numerical accuracy.
"""
if len(children) < 2:
for child in children:
if child != parent:
PS_point[child] = PS_point[parent]
del PS_point[parent]
return
# Rename the sum of momenta
p = PS_point[parent]
# Pre-compute scalar products
nap = na.dot(p)
nbp = nb.dot(p)
nanb = na.dot(nb)
pt = p - (nbp*na + nap*nb) / nanb
# Variables for sums
p_sum = LorentzVector()
# Set momenta for all children
for i in children:
zi = kinematic_variables['z' + str(i)]
kti = kinematic_variables['kt' + str(i)]
pi2 = kinematic_variables['m2' + str(i)]
nbpi = zi*nbp
pti = kti + zi*pt
napi = (pi2-pti.square())*nanb/(2*nbpi)
PS_point[i] = (nbpi*na+napi*nb) / nanb + pti
p_sum += PS_point[i]
# Check how well the parent's momentum is reproduced
# TODO Ideally switch to quadruple precision if the check fails
deviation = abs(p - p_sum)
benchmark = abs(p)
if deviation / benchmark > precision:
logger.critical(FinalCollinearVariables.precision_loss_message)
logger.critical("The sum of children momenta is %s" % str(p_sum))
logger.critical("Inputs for FinalCollinearVariables.set():")
logger.critical("total momentum = %s" % str(p))
logger.critical("na = %s, nb = %s" % (str(na), str(nb)))
logger.critical("kinematic variables:")
logger.critical(str(kinematic_variables))
logger.critical("Output of FinalCollinearVariables.set():")
for i in children:
logger.critical("child %d: %s" % (i, str(PS_point[i])))
del PS_point[parent]
return
#=========================================================================================
# Initial-collinear variables
#=========================================================================================
class InitialCollinearVariables(object):
@staticmethod
def names(parent, fs_children, is_child):
"""Get the names of variables describing particles going unresolved."""
names = ['z' + str(parent), ]
for child in fs_children:
names += ['z' + str(child), 'm2' + str(child), 'kt' + str(child), ]
return names
@staticmethod
def collinear_and_reference(p):
"""Given a momentum, return normalized vectors on the light-cone."""
# In this case taking the anti-collinear direction as a reference poses no risks,
# because the direction of the incoming parton is fixed
n = Vector(p.space())
n.normalize()
# For good phase space points, p[0] >= 0, but test PS points might not be valid
if p[0] >= 0:
return LorentzVector([1, ] + list(+n)), LorentzVector([1, ] + list(-n))
else:
return LorentzVector([1, ] + list(-n)), LorentzVector([1, ] + list(+n))
@staticmethod
def get(
PS_point, fs_children, is_child, na, nb, kinematic_variables,
precision=1e-6 ):
"""Given unmapped momenta and reference vectors, compute the kinematic variables
that describe the internal structure of particles going unresolved.
Children indices should already refer to the position
of momenta within the PS_point (no momentum dictionary used).
Sum rules are checked to assess numerical accuracy.
"""
pa = PS_point[is_child]
# Compute the sum of momenta
pA = LorentzVector(pa)
for i in fs_children:
pA -= PS_point[i]
# Pre-compute variables
napA = na.dot(pA)
nbpA = nb.dot(pA)
nanb = na.dot(nb)
nbpa = nb.dot(pa)
zA = nbpA / nbpa
ktA = pA - (nbpA * na + napA * nb) / nanb
# ptA = pA - (nbpA * na + napA * nb) / nanb
# ktA = ptA / zA
# Initialize variables for sum rules check
zsum = 0
ktsum = LorentzVector()
ktabssum = LorentzVector()
# Fill in A data, using child number improperly
kinematic_variables['z' + str(is_child)] = zA
kinematic_variables['kt' + str(is_child)] = ktA
kinematic_variables['s' + str(is_child)] = pA.square()
zsum += zA
ktsum += ktA
for j in range(len(ktA)):
ktabssum[j] += abs(ktA[j])
# Compute all kinematic variables
for i in fs_children:
pi = PS_point[i]
napi = na.dot(pi)
nbpi = nb.dot(pi)
zi = nbpi / nbpa
kti = pi - (nbpi*na+napi*nb) / nanb
# pti = pi - (nbpi*na+napi*nb) / nanb
# kti = pti - zi * ktA
kinematic_variables['z' + str(i)] = zi
kinematic_variables['kt' + str(i)] = kti
kinematic_variables['m2' + str(i)] = pi.square()
zsum += zi
ktsum += kti
for j in range(len(kti)):
ktabssum[j] += abs(kti[j])
# Check numerical accuracy
# TODO Ideally switch to quadruple precision if the check fails
if not fs_children: return
ktsum_abs = abs(ktsum)
ktabssum_abs = abs(ktabssum)
ktsum_ratio = ktsum_abs / ktabssum_abs
if (abs(zsum - 1) > precision) or (ktsum_ratio > precision):
logger.critical(FinalCollinearVariables.precision_loss_message)
logger.critical("The sum of z's is %.16e" % zsum)
logger.critical("The sum of kt's is %s" % str(ktsum))
logger.critical("abs(sum(kt's)) / sum(abs(kt's)) = %s" % ktsum_ratio)
logger.critical("Inputs for InitialCollinearVariables.get():")
logger.critical("na, nb = %s, %s" % (str(na), str(nb)))
for i in fs_children:
logger.critical("fs_child %d: %s" % (i, str(PS_point[i])))
logger.critical("is_child %d: %s" % (is_child, str(PS_point[is_child])))
| |
<filename>scale/storage/test/test_models.py
from __future__ import unicode_literals
import datetime
import os
import django
import django.contrib.gis.geos as geos
from django.db import transaction
from django.test import TestCase, TransactionTestCase
from django.utils.text import get_valid_filename
from django.utils.timezone import utc
from mock import MagicMock, patch
import storage.test.utils as storage_test_utils
from storage.brokers.broker import FileDownload, FileMove, FileUpload
from storage.exceptions import ArchivedWorkspace, DeletedFile, InvalidDataTypeTag
from storage.models import CountryData, PurgeResults, ScaleFile, Workspace
from storage.brokers.exceptions import InvalidBrokerConfiguration
from storage.configuration.json.workspace_config_v6 import WorkspaceConfigurationV6
from trigger.test import utils as trigger_test_utils
class TestScaleFileUpdateUUID(TestCase):
def setUp(self):
django.setup()
def test_none(self):
"""Tests calling update_uuid with no arguments."""
the_file = ScaleFile()
self.assertRaises(ValueError, the_file.update_uuid)
def test_one_string(self):
"""Tests calling update_uuid with a single string."""
the_file = ScaleFile()
the_file.update_uuid('test.txt')
self.assertEqual(len(the_file.uuid), 32)
self.assertEqual(the_file.uuid, 'dd18bf3a8e0a2a3e53e2661c7fb53534')
def test_multi_strings(self):
"""Tests calling update_uuid with multiple strings."""
the_file = ScaleFile()
the_file.update_uuid('test.txt', 'test1', 'test2')
self.assertEqual(len(the_file.uuid), 32)
self.assertEqual(the_file.uuid, '8ff66acfc019330bba973b408c63ad15')
def test_objects(self):
"""Tests calling update_uuid with multiple object types."""
the_file = ScaleFile()
the_file.update_uuid('test.txt', 1, True, {'key': 'value'})
self.assertEqual(len(the_file.uuid), 32)
self.assertEqual(the_file.uuid, 'ee6535359fbe02d50589a823951eb491')
def test_partial(self):
"""Tests calling update_uuid with some ignored None types."""
the_file1 = ScaleFile()
the_file1.update_uuid('test.txt', 'test')
the_file2 = ScaleFile()
the_file2.update_uuid('test.txt', None, 'test', None)
self.assertEqual(the_file1.uuid, the_file2.uuid)
class TestScaleFileAddDataTypeTag(TestCase):
def setUp(self):
django.setup()
def test_valid(self):
"""Tests calling add_data_type_tag() with valid tags"""
the_file = ScaleFile()
the_file.add_data_type_tag('Hello-1')
the_file.add_data_type_tag('foo_BAR')
tags = the_file.get_data_type_tags()
correct_set = set()
correct_set.add('Hello-1')
correct_set.add('foo_BAR')
self.assertSetEqual(tags, correct_set)
def test_same_tag(self):
"""Tests calling add_data_type_tag() with the same tag twice"""
the_file = ScaleFile()
the_file.add_data_type_tag('Hello-1')
the_file.add_data_type_tag('Hello-1')
tags = the_file.get_data_type_tags()
correct_set = set()
correct_set.add('Hello-1')
self.assertSetEqual(tags, correct_set)
class TestScaleFileGetDataTypeTags(TestCase):
def setUp(self):
django.setup()
def test_tags(self):
"""Tests calling get_data_type_tags() with tags"""
the_file = ScaleFile(data_type_tags=['A','B','c'])
tags = the_file.get_data_type_tags()
correct_set = set()
correct_set.add('A')
correct_set.add('B')
correct_set.add('c')
self.assertSetEqual(tags, correct_set)
def test_no_tags(self):
"""Tests calling get_data_type_tags() with no tags"""
the_file = ScaleFile()
tags = the_file.get_data_type_tags()
self.assertSetEqual(tags, set())
class TestScaleFileManagerDeleteFiles(TestCase):
def setUp(self):
django.setup()
def test_success(self):
"""Tests deleting files successfully"""
workspace_1 = storage_test_utils.create_workspace()
workspace_1.delete_files = MagicMock()
file_1 = storage_test_utils.create_file(workspace=workspace_1)
workspace_2 = storage_test_utils.create_workspace()
workspace_2.delete_files = MagicMock()
file_2 = storage_test_utils.create_file(workspace=workspace_2)
files = [file_1, file_2]
ScaleFile.objects.delete_files(files)
workspace_1.delete_files.assert_called_once_with([file_1])
workspace_2.delete_files.assert_called_once_with([file_2])
def test_inactive_workspace(self):
"""Tests calling deleting files from an inactive workspace"""
workspace_1 = storage_test_utils.create_workspace()
workspace_1.download_files = MagicMock()
file_1 = storage_test_utils.create_file(workspace=workspace_1)
workspace_2 = storage_test_utils.create_workspace(is_active=False)
file_2 = storage_test_utils.create_file(workspace=workspace_2)
files = [file_1, file_2]
self.assertRaises(ArchivedWorkspace, ScaleFile.objects.delete_files, files)
def test_deleted_file(self):
"""Tests attempting to delete a file that is already deleted"""
workspace_1 = storage_test_utils.create_workspace()
workspace_1.delete_files = MagicMock()
file_1a = storage_test_utils.create_file(workspace=workspace_1)
file_1b = storage_test_utils.create_file(workspace=workspace_1, is_deleted=True)
workspace_2 = storage_test_utils.create_workspace()
workspace_2.delete_files = MagicMock()
file_2 = storage_test_utils.create_file(workspace=workspace_2)
files = [file_1a, file_1b, file_2]
ScaleFile.objects.delete_files(files)
workspace_1.delete_files.assert_called_once_with([file_1a, file_1b])
workspace_2.delete_files.assert_called_once_with([file_2])
class TestScaleFileManagerDownloadFiles(TestCase):
def setUp(self):
django.setup()
def test_success(self):
"""Tests calling ScaleFileManager.download_files() successfully"""
workspace_1 = storage_test_utils.create_workspace()
file_1 = storage_test_utils.create_file(workspace=workspace_1)
local_path_1 = '/my/local/path/file.txt'
file_2 = storage_test_utils.create_file(workspace=workspace_1)
local_path_2 = '/another/local/path/file.txt'
file_3 = storage_test_utils.create_file(workspace=workspace_1)
local_path_3 = '/another/local/path/file.json'
workspace_1.setup_download_dir = MagicMock()
workspace_1.download_files = MagicMock()
workspace_2 = storage_test_utils.create_workspace()
file_4 = storage_test_utils.create_file(workspace=workspace_2)
local_path_4 = '/my/local/path/4/file.txt'
file_5 = storage_test_utils.create_file(workspace=workspace_2)
local_path_5 = '/another/local/path/5/file.txt'
workspace_2.setup_download_dir = MagicMock()
workspace_2.download_files = MagicMock()
files = [FileDownload(file_1, local_path_1, False), FileDownload(file_2, local_path_2, False),
FileDownload(file_3, local_path_3, False), FileDownload(file_4, local_path_4, False),
FileDownload(file_5, local_path_5, False)]
ScaleFile.objects.download_files(files)
workspace_1.download_files.assert_called_once_with([FileDownload(file_1, local_path_1, False),
FileDownload(file_2, local_path_2, False),
FileDownload(file_3, local_path_3, False)])
workspace_2.download_files.assert_called_once_with([FileDownload(file_4, local_path_4, False),
FileDownload(file_5, local_path_5, False)])
def test_inactive_workspace(self):
"""Tests calling ScaleFileManager.download_files() with an inactive workspace"""
workspace_1 = storage_test_utils.create_workspace()
file_1 = storage_test_utils.create_file(workspace=workspace_1)
local_path_1 = '/my/local/path/file.txt'
file_2 = storage_test_utils.create_file(workspace=workspace_1)
local_path_2 = '/another/local/path/file.txt'
file_3 = storage_test_utils.create_file(workspace=workspace_1)
local_path_3 = '/another/local/path/file.json'
workspace_1.download_files = MagicMock()
workspace_2 = storage_test_utils.create_workspace()
workspace_2.is_active = False
workspace_2.save()
file_4 = storage_test_utils.create_file(workspace=workspace_2)
local_path_4 = '/my/local/path/4/file.txt'
file_5 = storage_test_utils.create_file(workspace=workspace_2)
local_path_5 = '/another/local/path/5/file.txt'
files = [FileDownload(file_1, local_path_1, False), FileDownload(file_2, local_path_2, False),
FileDownload(file_3, local_path_3, False), FileDownload(file_4, local_path_4, False),
FileDownload(file_5, local_path_5, False)]
self.assertRaises(ArchivedWorkspace, ScaleFile.objects.download_files, files)
def test_deleted_file(self):
"""Tests calling ScaleFileManager.download_files() with a deleted file"""
workspace_1 = storage_test_utils.create_workspace()
file_1 = storage_test_utils.create_file(workspace=workspace_1)
local_path_1 = '/my/local/path/file.txt'
file_2 = storage_test_utils.create_file(workspace=workspace_1)
local_path_2 = '/another/local/path/file.txt'
file_2.is_deleted = True
file_2.save()
file_3 = storage_test_utils.create_file(workspace=workspace_1)
local_path_3 = '/another/local/path/file.json'
workspace_1.download_files = MagicMock()
workspace_2 = storage_test_utils.create_workspace()
file_4 = storage_test_utils.create_file(workspace=workspace_2)
local_path_4 = '/my/local/path/4/file.txt'
file_5 = storage_test_utils.create_file(workspace=workspace_2)
local_path_5 = '/another/local/path/5/file.txt'
workspace_2.download_files = MagicMock()
files = [FileDownload(file_1, local_path_1, False), FileDownload(file_2, local_path_2, False),
FileDownload(file_3, local_path_3, False), FileDownload(file_4, local_path_4, False),
FileDownload(file_5, local_path_5, False)]
self.assertRaises(DeletedFile, ScaleFile.objects.download_files, files)
class TestScaleFileManagerMoveFiles(TestCase):
def setUp(self):
django.setup()
def test_success(self):
"""Tests calling ScaleFileManager.move_files() successfully"""
workspace_1 = storage_test_utils.create_workspace()
file_1 = storage_test_utils.create_file(file_name='my_file_1.txt', workspace=workspace_1)
new_workspace_path_1 = os.path.join('my', 'new', 'path', '1', os.path.basename(file_1.file_path))
file_2 = storage_test_utils.create_file(file_name='my_file_2.txt', workspace=workspace_1)
new_workspace_path_2 = os.path.join('my', 'new', 'path', '2', os.path.basename(file_2.file_path))
workspace_1.move_files = MagicMock()
workspace_2 = storage_test_utils.create_workspace()
file_3 = storage_test_utils.create_file(file_name='my_file_3.txt', workspace=workspace_2)
new_workspace_path_3 = os.path.join('my', 'new', 'path', '3', os.path.basename(file_3.file_path))
file_4 = storage_test_utils.create_file(file_name='my_file_4.txt', workspace=workspace_2)
new_workspace_path_4 = os.path.join('my', 'new', 'path', '4', os.path.basename(file_4.file_path))
workspace_2.move_files = MagicMock()
files = [FileMove(file_1, new_workspace_path_1), FileMove(file_2, new_workspace_path_2),
FileMove(file_3, new_workspace_path_3), FileMove(file_4, new_workspace_path_4)]
ScaleFile.objects.move_files(files)
workspace_1.move_files.assert_called_once_with([FileMove(file_1, new_workspace_path_1),
FileMove(file_2, new_workspace_path_2)])
workspace_2.move_files.assert_called_once_with([FileMove(file_3, new_workspace_path_3),
FileMove(file_4, new_workspace_path_4)])
def test_inactive_workspace(self):
"""Tests calling ScaleFileManager.move_files() with an inactive workspace"""
workspace_1 = storage_test_utils.create_workspace()
file_1 = storage_test_utils.create_file(file_name='my_file_1.txt', workspace=workspace_1)
new_workspace_path_1 = os.path.join('my', 'new', 'path', '1', os.path.basename(file_1.file_path))
file_2 = storage_test_utils.create_file(file_name='my_file_2.txt', workspace=workspace_1)
new_workspace_path_2 = os.path.join('my', 'new', 'path', '2', os.path.basename(file_2.file_path))
workspace_1.move_files = MagicMock()
workspace_2 = storage_test_utils.create_workspace()
workspace_2.is_active = False
workspace_2.save()
file_3 = storage_test_utils.create_file(file_name='my_file_3.txt', workspace=workspace_2)
new_workspace_path_3 = os.path.join('my', 'new', 'path', '3', os.path.basename(file_3.file_path))
file_4 = storage_test_utils.create_file(file_name='my_file_4.txt', workspace=workspace_2)
new_workspace_path_4 = os.path.join('my', 'new', 'path', '4', os.path.basename(file_4.file_path))
workspace_2.move_files = MagicMock()
files = [FileMove(file_1, new_workspace_path_1), FileMove(file_2, new_workspace_path_2),
FileMove(file_3, new_workspace_path_3), FileMove(file_4, new_workspace_path_4)]
self.assertRaises(ArchivedWorkspace, ScaleFile.objects.move_files, files)
def test_deleted_file(self):
"""Tests calling ScaleFileManager.move_files() with a deleted file"""
workspace_1 = storage_test_utils.create_workspace()
file_1 = storage_test_utils.create_file(file_name='my_file_1.txt', workspace=workspace_1)
new_workspace_path_1 = os.path.join('my', 'new', 'path', '1', os.path.basename(file_1.file_path))
file_2 = storage_test_utils.create_file(file_name='my_file_2.txt', workspace=workspace_1)
file_2.is_deleted = True
file_2.save()
new_workspace_path_2 = os.path.join('my', 'new', 'path', '2', os.path.basename(file_2.file_path))
workspace_1.move_files = MagicMock()
workspace_2 = storage_test_utils.create_workspace()
workspace_2.is_active = False
workspace_2.save()
file_3 = storage_test_utils.create_file(file_name='my_file_3.txt', workspace=workspace_2)
new_workspace_path_3 = os.path.join('my', 'new', 'path', '3', os.path.basename(file_3.file_path))
file_4 = storage_test_utils.create_file(file_name='my_file_4.txt', workspace=workspace_2)
new_workspace_path_4 = os.path.join('my', 'new', 'path', '4', os.path.basename(file_4.file_path))
workspace_2.move_files = MagicMock()
files = [FileMove(file_1, new_workspace_path_1), FileMove(file_2, new_workspace_path_2),
FileMove(file_3, new_workspace_path_3), FileMove(file_4, new_workspace_path_4)]
self.assertRaises(DeletedFile, ScaleFile.objects.move_files, files)
class TestScaleFileManagerUploadFiles(TestCase):
def setUp(self):
django.setup()
@patch('storage.models.os.path.getsize')
def test_success(self, mock_getsize):
"""Tests calling ScaleFileManager.upload_files() successfully"""
def new_getsize(path):
return 100
mock_getsize.side_effect = new_getsize
workspace = storage_test_utils.create_workspace()
file_1 = ScaleFile()
file_1.set_basic_fields('file.txt', 100, None) # Scale should auto-detect text/plain
remote_path_1 = 'my/remote/path/file.txt'
local_path_1 = 'my/local/path/file.txt'
file_1.file_path = remote_path_1
file_2 = ScaleFile()
file_2.set_basic_fields('file.json', 100, 'application/json')
remote_path_2 = 'my/remote/path/2/file.json'
local_path_2 = 'my/local/path/2/file.json'
file_2.file_path = remote_path_2
workspace.upload_files = MagicMock()
files = [FileUpload(file_1, local_path_1), FileUpload(file_2, local_path_2)]
models = ScaleFile.objects.upload_files(workspace, files)
workspace.upload_files.assert_called_once_with([FileUpload(file_1, local_path_1),
FileUpload(file_2, local_path_2)])
self.assertEqual('file.txt', models[0].file_name)
self.assertEqual(remote_path_1, models[0].file_path)
self.assertEqual('text/plain', models[0].media_type)
self.assertEqual(workspace.id, models[0].workspace_id)
self.assertEqual('file.json', models[1].file_name)
self.assertEqual(remote_path_2, models[1].file_path)
self.assertEqual('application/json', models[1].media_type)
self.assertEqual(workspace.id, models[1].workspace_id)
@patch('storage.models.os.path.getsize')
@patch('storage.models.makedirs')
def test_fails(self, mock_makedirs, mock_getsize):
"""Tests calling ScaleFileManager.upload_files() when Workspace.upload_files() fails"""
def new_getsize(path):
return 100
mock_getsize.side_effect = new_getsize
upload_dir = os.path.join('upload', 'dir')
work_dir = os.path.join('work', 'dir')
workspace = storage_test_utils.create_workspace()
file_1 = ScaleFile()
file_1.media_type = None # Scale should auto-detect text/plain
remote_path_1 = 'my/remote/path/file.txt'
local_path_1 = 'my/local/path/file.txt'
file_2 = ScaleFile()
file_2.media_type = 'application/json'
remote_path_2 = 'my/remote/path/2/file.json'
local_path_2 = 'my/local/path/2/file.json'
workspace.upload_files = MagicMock()
workspace.upload_files.side_effect = Exception
workspace.delete_files = MagicMock()
delete_work_dir = os.path.join(work_dir, 'delete', get_valid_filename(workspace.name))
files = [(file_1, local_path_1, remote_path_1), (file_2, local_path_2, remote_path_2)]
self.assertRaises(Exception, ScaleFile.objects.upload_files, upload_dir, work_dir, workspace, files)
class TestScaleFile(TestCase):
def setUp(self):
django.setup()
def test_url(self):
"""Tests building a URL for a file."""
ws = storage_test_utils.create_workspace(name='test', base_url='http://localhost')
scale_file = storage_test_utils.create_file(file_name='test.txt', workspace=ws)
self.assertEqual(scale_file.url, 'http://localhost/file/path/test.txt')
def test_url_base_url_missing(self):
"""Tests building a URL for a file in a workspace with no configured base URL."""
ws = storage_test_utils.create_workspace(name='test')
scale_file = storage_test_utils.create_file(file_name='test.txt', workspace=ws)
self.assertIsNone(scale_file.url)
def test_url_base_slash(self):
"""Tests building a URL for a file where the workspace base URL has a trailing slash."""
ws = storage_test_utils.create_workspace(name='test', base_url='http://localhost/')
scale_file = storage_test_utils.create_file(file_name='test.txt', workspace=ws)
self.assertEqual(scale_file.url, 'http://localhost/file/path/test.txt')
def test_url_file_slash(self):
"""Tests building a URL for a file where the file path URL has a leading slash."""
ws = storage_test_utils.create_workspace(name='test', base_url='http://localhost')
scale_file = storage_test_utils.create_file(file_name='test.txt', file_path='/file/path/test.txt', workspace=ws)
self.assertEqual(scale_file.url, 'http://localhost/file/path/test.txt')
def test_country_data(self):
"""Tests adding a border and country intersection calculation."""
testborder = geos.Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)))
testborder2 = geos.Polygon(((11, 0), (11, 8), (19, 8), (19, 0), (11, 0)))
testborder3 = geos.Polygon(((11, 11), (11, 15), (15, 15), (15, 11), (11, 11)))
testeffective = datetime.datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc)
CountryData.objects.create(name='Test Country', fips='TC', gmi='TCY', iso2='TC', iso3='TCY', iso_num=42,
border=testborder, effective=testeffective)
CountryData.objects.create(name='Test Country 2', fips='TT', gmi='TCT', iso2='TT', iso3='TCT', iso_num=43,
border=testborder2, effective=testeffective)
CountryData.objects.create(name='Test Country 3', fips='TH', gmi='TCH', iso2='TH', iso3='TCH', iso_num=44,
border=testborder3, effective=testeffective)
ws = storage_test_utils.create_workspace(name='test', base_url='http://localhost')
scale_file = storage_test_utils.create_file(file_name='test.txt', workspace=ws)
with transaction.atomic():
scale_file.geometry = geos.Polygon(((5, 5), (5, 10), (12, 10), (12, 5), (5, 5)))
scale_file.set_countries()
scale_file.save()
tmp = [c.iso2 for c in scale_file.countries.all()]
self.assertEqual(len(tmp), 2)
self.assertIn('TC', tmp)
self.assertIn('TT', tmp)
def test_set_deleted(self):
"""Tests marking a file as deleted."""
scale_file = storage_test_utils.create_file()
scale_file.set_deleted()
self.assertTrue(scale_file.is_deleted)
self.assertIsNotNone(scale_file.deleted)
class TestCountryData(TestCase):
def setUp(self):
django.setup()
self.testborder = geos.Polygon(((0, 0), (0, 10), (10, 10), (0, 10), | |
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 <NAME> and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Implements support for HDF5 compression filters via the high-level
interface. The following types of filter are available:
"gzip"
Standard DEFLATE-based compression, at integer levels from 0 to 9.
Built-in to all public versions of HDF5. Use this if you want a
decent-to-good ratio, good portability, and don't mind waiting.
"lzf"
Custom compression filter for h5py. This filter is much, much faster
than gzip (roughly 10x in compression vs. gzip level 4, and 3x faster
in decompressing), but at the cost of a worse compression ratio. Use
this if you want cheap compression and portability is not a concern.
"szip"
Access to the HDF5 SZIP encoder. SZIP is a non-mainstream compression
format used in space science on integer and float datasets. SZIP is
subject to license requirements, which means the encoder is not
guaranteed to be always available. However, it is also much faster
than gzip.
The following constants in this module are also useful:
decode
Tuple of available filter names for decoding
encode
Tuple of available filter names for encoding
"""
from collections.abc import Mapping
import operator
import numpy as np
from .compat import filename_encode
from .. import h5z, h5p, h5d, h5f
_COMP_FILTERS = {'gzip': h5z.FILTER_DEFLATE,
'szip': h5z.FILTER_SZIP,
'lzf': h5z.FILTER_LZF,
'shuffle': h5z.FILTER_SHUFFLE,
'fletcher32': h5z.FILTER_FLETCHER32,
'scaleoffset': h5z.FILTER_SCALEOFFSET }
DEFAULT_GZIP = 4
DEFAULT_SZIP = ('nn', 8)
def _gen_filter_tuples():
""" Bootstrap function to figure out what filters are available. """
dec = []
enc = []
for name, code in _COMP_FILTERS.items():
if h5z.filter_avail(code):
info = h5z.get_filter_info(code)
if info & h5z.FILTER_CONFIG_ENCODE_ENABLED:
enc.append(name)
if info & h5z.FILTER_CONFIG_DECODE_ENABLED:
dec.append(name)
return tuple(dec), tuple(enc)
decode, encode = _gen_filter_tuples()
def _external_entry(entry):
""" Check for and return a well-formed entry tuple for
a call to h5p.set_external. """
# We require only an iterable entry but also want to guard against
# raising a confusing exception from unpacking below a str or bytes that
# was mistakenly passed as an entry. We go further than that and accept
# only a tuple, which allows simpler documentation and exception
# messages.
if not isinstance(entry, tuple):
raise TypeError(
"Each external entry must be a tuple of (name, offset, size)")
name, offset, size = entry # raise ValueError without three elements
name = filename_encode(name)
offset = operator.index(offset)
size = operator.index(size)
return (name, offset, size)
def _normalize_external(external):
""" Normalize external into a well-formed list of tuples and return. """
if external is None:
return []
try:
# Accept a solitary name---a str, bytes, or os.PathLike acceptable to
# filename_encode.
return [_external_entry((external, 0, h5f.UNLIMITED))]
except TypeError:
pass
# Check and rebuild each entry to be well-formed.
return [_external_entry(entry) for entry in external]
class FilterRefBase(Mapping):
"""Base class for referring to an HDF5 and describing its options
Your subclass must define filter_id, and may define a filter_options tuple.
"""
filter_id = None
filter_options = ()
# Mapping interface supports using instances as **kwargs for compatibility
# with older versions of h5py
@property
def _kwargs(self):
return {
'compression': self.filter_id,
'compression_opts': self.filter_options
}
def __hash__(self):
return hash((self.filter_id, self.filter_options))
def __eq__(self, other):
return (
isinstance(other, FilterRefBase)
and self.filter_id == other.filter_id
and self.filter_options == other.filter_options
)
def __len__(self):
return len(self._kwargs)
def __iter__(self):
return iter(self._kwargs)
def __getitem__(self, item):
return self._kwargs[item]
class Gzip(FilterRefBase):
filter_id = h5z.FILTER_DEFLATE
def __init__(self, level=DEFAULT_GZIP):
self.filter_options = (level,)
def fill_dcpl(plist, shape, dtype, chunks, compression, compression_opts,
shuffle, fletcher32, maxshape, scaleoffset, external,
allow_unknown_filter=False):
""" Generate a dataset creation property list.
Undocumented and subject to change without warning.
"""
if shape is None or shape == ():
shapetype = 'Empty' if shape is None else 'Scalar'
if any((chunks, compression, compression_opts, shuffle, fletcher32,
scaleoffset is not None)):
raise TypeError(
f"{shapetype} datasets don't support chunk/filter options"
)
if maxshape and maxshape != ():
raise TypeError(f"{shapetype} datasets cannot be extended")
return h5p.create(h5p.DATASET_CREATE)
def rq_tuple(tpl, name):
""" Check if chunks/maxshape match dataset rank """
if tpl in (None, True):
return
try:
tpl = tuple(tpl)
except TypeError:
raise TypeError('"%s" argument must be None or a sequence object' % name)
if len(tpl) != len(shape):
raise ValueError('"%s" must have same rank as dataset shape' % name)
rq_tuple(chunks, 'chunks')
rq_tuple(maxshape, 'maxshape')
if compression is not None:
if isinstance(compression, FilterRefBase):
compression_opts = compression.filter_options
compression = compression.filter_id
if compression not in encode and not isinstance(compression, int):
raise ValueError('Compression filter "%s" is unavailable' % compression)
if compression == 'gzip':
if compression_opts is None:
gzip_level = DEFAULT_GZIP
elif compression_opts in range(10):
gzip_level = compression_opts
else:
raise ValueError("GZIP setting must be an integer from 0-9, not %r" % compression_opts)
elif compression == 'lzf':
if compression_opts is not None:
raise ValueError("LZF compression filter accepts no options")
elif compression == 'szip':
if compression_opts is None:
compression_opts = DEFAULT_SZIP
err = "SZIP options must be a 2-tuple ('ec'|'nn', even integer 0-32)"
try:
szmethod, szpix = compression_opts
except TypeError:
raise TypeError(err)
if szmethod not in ('ec', 'nn'):
raise ValueError(err)
if not (0<szpix<=32 and szpix%2 == 0):
raise ValueError(err)
elif compression_opts is not None:
# Can't specify just compression_opts by itself.
raise TypeError("Compression method must be specified")
if scaleoffset is not None:
# scaleoffset must be an integer when it is not None or False,
# except for integral data, for which scaleoffset == True is
# permissible (will use SO_INT_MINBITS_DEFAULT)
if scaleoffset < 0:
raise ValueError('scale factor must be >= 0')
if dtype.kind == 'f':
if scaleoffset is True:
raise ValueError('integer scaleoffset must be provided for '
'floating point types')
elif dtype.kind in ('u', 'i'):
if scaleoffset is True:
scaleoffset = h5z.SO_INT_MINBITS_DEFAULT
else:
raise TypeError('scale/offset filter only supported for integer '
'and floating-point types')
# Scale/offset following fletcher32 in the filter chain will (almost?)
# always triggers a read error, as most scale/offset settings are
# lossy. Since fletcher32 must come first (see comment below) we
# simply prohibit the combination of fletcher32 and scale/offset.
if fletcher32:
raise ValueError('fletcher32 cannot be used with potentially lossy'
' scale/offset filter')
external = _normalize_external(external)
# End argument validation
if (chunks is True) or \
(chunks is None and any((shuffle, fletcher32, compression, maxshape,
scaleoffset is not None))):
chunks = guess_chunk(shape, maxshape, dtype.itemsize)
if maxshape is True:
maxshape = (None,)*len(shape)
if chunks is not None:
plist.set_chunk(chunks)
plist.set_fill_time(h5d.FILL_TIME_ALLOC) # prevent resize glitch
# scale-offset must come before shuffle and compression
if scaleoffset is not None:
if dtype.kind in ('u', 'i'):
plist.set_scaleoffset(h5z.SO_INT, scaleoffset)
else: # dtype.kind == 'f'
plist.set_scaleoffset(h5z.SO_FLOAT_DSCALE, scaleoffset)
for item in external:
plist.set_external(*item)
if shuffle:
plist.set_shuffle()
if compression == 'gzip':
plist.set_deflate(gzip_level)
elif compression == 'lzf':
plist.set_filter(h5z.FILTER_LZF, h5z.FLAG_OPTIONAL)
elif compression == 'szip':
opts = {'ec': h5z.SZIP_EC_OPTION_MASK, 'nn': h5z.SZIP_NN_OPTION_MASK}
plist.set_szip(opts[szmethod], szpix)
elif isinstance(compression, int):
if not allow_unknown_filter and not h5z.filter_avail(compression):
raise ValueError("Unknown compression filter number: %s" % compression)
plist.set_filter(compression, h5z.FLAG_OPTIONAL, compression_opts)
# `fletcher32` must come after `compression`, otherwise, if `compression`
# is "szip" and the data is 64bit, the fletcher32 checksum will be wrong
# (see GitHub issue #953).
if fletcher32:
plist.set_fletcher32()
return plist
def get_filters(plist):
""" Extract a dictionary of active filters from a DCPL, along with
their settings.
Undocumented and subject to change without warning.
"""
filters = {h5z.FILTER_DEFLATE: 'gzip', h5z.FILTER_SZIP: 'szip',
h5z.FILTER_SHUFFLE: 'shuffle', h5z.FILTER_FLETCHER32: 'fletcher32',
h5z.FILTER_LZF: 'lzf', h5z.FILTER_SCALEOFFSET: 'scaleoffset'}
pipeline = {}
nfilters = plist.get_nfilters()
for i in range(nfilters):
code, _, vals, _ = plist.get_filter(i)
if code == h5z.FILTER_DEFLATE:
vals = vals[0] # gzip level
elif code == h5z.FILTER_SZIP:
mask, pixels = vals[0:2]
if mask & h5z.SZIP_EC_OPTION_MASK:
mask = 'ec'
elif mask & h5z.SZIP_NN_OPTION_MASK:
mask = 'nn'
else:
raise TypeError("Unknown SZIP configuration")
vals = (mask, pixels)
elif code == h5z.FILTER_LZF:
vals = None
else:
if len(vals) == 0:
vals = None
pipeline[filters.get(code, str(code))] = vals
return pipeline
CHUNK_BASE = 16*1024 # Multiplier by which chunks are adjusted
CHUNK_MIN = 8*1024 # Soft lower limit (8k)
CHUNK_MAX = 1024*1024 # Hard upper limit (1M)
def guess_chunk(shape, maxshape, typesize):
""" Guess an appropriate chunk layout for a dataset, given its shape and
the size | |
import torch.optim
import math
import pickle
from pathlib import Path
import utils
import time
import pkbar
from snn_backprop import *
class Trainer():
def __init__(self, config, data_loaders, logger, gpu=False, task='nmnist') :
"""
Initialize Trainer class.
Args
config : Configuration object that contains all the configurations.
data_loaders : A list of data_loaders containing train, test, test loader.
plotter : A plotter that plots output/states/losses.
logger : A logger that logs the progress of the training.
task : Which task is being solved by this trainer. ['mnist', 'nmnist']
"""
self.config = config
self.train_loader, self.valid_loader, self.test_loader = data_loaders
if config.multi_model:
self.train_num_data = len(self.train_loader[0].dataset)
self.valid_num_data = len(self.valid_loader[0].dataset)
self.test_num_data = len(self.test_loader.dataset)
else:
self.train_num_data = len(self.train_loader.dataset)
self.valid_num_data = len(self.valid_loader.dataset)
self.test_num_data = len(self.test_loader.dataset)
self.data_loaders = data_loaders
self.logger = logger
self.task = task
self.cuda_enabled = gpu
self.resume = self.logger.resume
return
def make_model(self, model_config):
"""
Instantiate SNN models following configurations.
Attribute of each models are set by using setattr().
Args
model_config_list : list of configuration dictionaries.
Ex)model_config_list[0]['ste_type'] = "exp"
same_init : A boolean variable that indicates wheter using same initial
parameters for every models or not.
"""
model = ListSNNMulti(model_config)
self.model = model
if model.optim_name == "sgd":
optim = torch.optim.SGD(model.parameters(), lr=model.learning_rate,
momentum=model.momentum, weight_decay=model.weight_decay)
elif model.optim_name == "adam":
optim = torch.optim.Adam(model.parameters(), lr=model.learning_rate,
weight_decay=model.weight_decay)
self.optim = optim
return
def make_teacher(self, model_config):
"""
Instantiate SNN models following configurations.
Attribute of each models are set by using setattr().
Args
model_config_list : list of configuration dictionaries.
Ex)model_config_list[0]['ste_type'] = "exp"
same_init : A boolean variable that indicates wheter using same initial
parameters for every models or not.
"""
teacher = ListSNNMulti_Teacher(model_config)
self.teacher = teacher
# if teacher.optim_name == "sgd":
# optim = torch.optim.SGD(teacher.parameters(), lr=teacher.learning_rate,
# momentum=teacher.momentum, weight_decay=teacher.weight_decay)
# elif teacher.optim_name == "adam":
# optim = torch.optim.Adam(teacher.parameters(), lr=teacher.learning_rate,
# weight_decay=teacher.weight_decay)
# self.optim = optim
print("Built Teacher Model")
return
def make_scheduler(self):
"""
Make learning rate scheduler for each optimizers.
"""
scheduler = torch.optim.lr_scheduler.StepLR(self.optim,
self.config.lr_step_size,
self.config.lr_gamma)
self.scheduler = scheduler
return
def run(self, config):
"""
Train each model in model_list.
Args
config : model configuration.
"""
# Initialize result holder and plotter.
loss_dict = dict()
model = self.model
optim = self.optim
scheduler = self.scheduler
if self.resume:
if config.multi_model:
best_valid_acc = np.array(self.logger.valid_df['acc'].tolist()).max(0)
best_valid_acc_first = np.array(self.logger.valid_df['acc_first'].tolist()).max(0)
else:
best_valid_acc = self.logger.valid_df['acc'].to_numpy().max()
best_valid_acc_first = self.logger.valid_df['acc_first'].to_numpy().max()
current_epoch = self.logger.valid_df['epoch'].to_numpy().max() + 1
else:
if config.multi_model:
best_valid_acc = np.zeros(config.num_models)
best_valid_acc_first = np.zeros(config.num_models)
else:
best_valid_acc = 0
best_valid_acc_first = 0
current_epoch = 0
for epoch in range(current_epoch, current_epoch + config.epoch):
train_loss, train_acc, train_acc_first, kbar = self.run_epoch('train', model, optim, scheduler, epoch)
train_nums_total = np.array(self.total_num_spike_total).sum() / self.train_num_data
train_nums_nec = np.array(self.total_num_spike_nec).sum() / self.train_num_data
scheduler.step()
self.logger.log_train_df(epoch, train_loss, train_acc, train_acc_first, self.total_num_spike_total, self.total_num_spike_nec, self.min_first_stime_min, self.mean_first_stime_mean, model.multi_model)
self.model.clean_state()
valid_loss, valid_acc, valid_acc_first = self.run_epoch('valid', model)
kbar.add(0, values=[("v_loss", valid_loss), ("v_acc", valid_acc), ("v_acc_first", valid_acc_first)])
valid_nums_total = np.array(self.total_num_spike_total).sum() / self.valid_num_data
valid_nums_nec = np.array(self.total_num_spike_nec).sum() / self.valid_num_data
self.logger.log_valid_df(epoch, valid_loss, valid_acc, valid_acc_first, self.total_num_spike_total, self.total_num_spike_nec, self.min_first_stime_min, self.mean_first_stime_mean, model.multi_model)
self.model.clean_state()
if config.multi_model:
for m in (valid_acc.cpu().numpy() > best_valid_acc).nonzero()[0].tolist():
self.save_multi_model('best', m)
best_valid_acc[m] = valid_acc[m]
for m in (valid_acc_first.cpu().numpy() > best_valid_acc_first).nonzero()[0].tolist():
self.save_multi_model('best_first', m)
best_valid_acc_first[m] = valid_acc_first[m]
else:
if valid_acc > best_valid_acc:
self.save_model('best')
best_valid_acc = valid_acc
if valid_acc_first > best_valid_acc_first:
self.save_model('best_first')
best_valid_acc_first = valid_acc_first
self.save_model('last')
self.save_optim('last')
self.model.clean_state()
return
def run_epoch(self, mode, model, optim=None, scheduler=None, epoch=None):
assert mode in ['train', 'valid', 'test']
if mode == 'train':
loader = self.train_loader
num_data = self.train_num_data
if model.multi_model:
target_iter = len(loader[0])
else:
target_iter = len(loader)
kbar = pkbar.Kbar(target=target_iter, epoch=epoch,
num_epochs=self.config.epoch,
width=16, always_stateful=False)
elif mode == 'valid':
loader = self.valid_loader
num_data = self.valid_num_data
elif mode == 'test':
loader = self.test_loader
num_data = self.test_num_data
progress = 0
total_loss = 0
total_correct = 0
total_correct_first = 0
total_distill_loss = 0
# input.shape = [N, time_length, num_in_features]
# target.shape = [N, time_length, num_target_features]
if model.multi_model and not mode =='test':
if self.config.evaluation_mode:
loader = loader[0]
else:
loader = zip(*loader)
for batch_idx, inp_tar in enumerate(loader):
if model.multi_model:
if mode == 'test' or self.config.evaluation_mode:
input = inp_tar[0].unsqueeze(0).repeat(model.num_models, *[1 for i in range(inp_tar[0].dim())])
target = inp_tar[1].unsqueeze(0).repeat(model.num_models, *[1 for i in range(inp_tar[1].dim())])
else:
input = torch.stack([item[0] for item in inp_tar])
target = torch.stack([item[1] for item in inp_tar])
else:
input = inp_tar[0]
target = inp_tar[1]
if self.cuda_enabled:
input = input.cuda()
target = target.cuda()
if self.task == "mnist":
if model.multi_model:
input = input.reshape(input.shape[0] * input.shape[1], -1)
input = self.float2spikes(input, model.time_length, self.config.max_input_timing,
self.config.min_input_timing, type = 'latency',
stochastic=False, last=False, skip_zero=True)
input = input.reshape(model.num_models, int(input.shape[0] / model.num_models), *input.shape[1:])
else:
input = input.reshape(input.shape[0], -1)
input = self.float2spikes(input, model.time_length, self.config.max_input_timing,
self.config.min_input_timing, type = 'latency',
stochastic=False, last=False, skip_zero=True)
# Run forward pass.
output = model(input)
if model.distill_mode:
with torch.no_grad():
output_teacher = self.teacher(input)
if self.cuda_enabled:
output_teacher = output_teacher.cuda(non_blocking=True)
student_logits = output.sum(1)
teacher_logits = output_teacher.sum(1)
model.calc_distill_loss(student_logits, teacher_logits)
if mode == 'train':
# Backward and update.
optim.zero_grad()
if self.config.target_type == 'latency':
model.backward_custom(target)
else:
assert self.config.target_type == 'count'
target_spike = self.label2spikes(target.reshape(-1))
# model_batch x time x neuron
model.backward_custom(target_spike)
optim.step()
else:
if self.config.target_type == 'latency':
model.calc_loss(target.reshape(-1))
else:
assert self.config.target_type == 'count'
target_spike = self.label2spikes(target.reshape(-1))
# model_batch x time x neuron
model.calc_loss(target_spike)
loss = model.loss
batch_size = target.shape[-1]
total_loss += loss * batch_size
total_distill_loss += model.distill_loss * batch_size
# Save Key Values
np.save(self.logger.log_dir/f"student_spikes_{self.logger.name_tag}_epoch{epoch}.npy",output.cpu())
np.save(self.logger.log_dir/f"teacher_spikes_{self.logger.name_tag}_epoch{epoch}.npy",output_teacher.cpu())
np.save(self.logger.log_dir/f"distill_loss_{self.logger.name_tag}_epoch{epoch}.npy", total_distill_loss.cpu())
np.save(self.logger.log_dir/f"student_loss_{self.logger.name_tag}_epoch{epoch}.npy", (total_loss - total_distill_loss).cpu())
num_spike_total = model.num_spike_total
num_spike_nec = model.num_spike_nec
first_stime_min = model.first_stime_min
first_stime_mean = model.first_stime_mean
if batch_idx == 0:
self.total_num_spike_total = num_spike_total
self.total_num_spike_nec = num_spike_nec
self.min_first_stime_min = first_stime_min
self.mean_first_stime_mean = first_stime_mean
else:
if model.multi_model:
self.total_num_spike_total = [(np.array(num_spike_total[i]) + np.array(self.total_num_spike_total[i])).tolist() for i in range(len(num_spike_total))]
self.total_num_spike_nec = [(np.array(num_spike_nec[i]) + np.array(self.total_num_spike_nec[i])).tolist() for i in range(len(num_spike_nec))]
self.min_first_stime_min = [min(x, y) for x, y in zip(self.min_first_stime_min, first_stime_min)]
self.mean_first_stime_mean = ((np.array(self.mean_first_stime_mean) * progress + np.array(first_stime_mean) * batch_size) / (progress + batch_size)).tolist()
else:
self.total_num_spike_total = [num_spike_total[i] + self.total_num_spike_total[i] for i in range(len(num_spike_total))]
self.total_num_spike_nec = [num_spike_nec[i] + self.total_num_spike_nec[i] for i in range(len(num_spike_nec))]
self.min_first_stime_min = min(self.min_first_stime_min, first_stime_min)
self.mean_first_stime_mean = (self.mean_first_stime_mean * progress + first_stime_mean * batch_size) / (progress + batch_size)
pred_class = self.spikes2label(output, 'count')
pred_class_first = self.spikes2label(output, 'first')
if model.multi_model:
num_correct = (pred_class.reshape(target.shape) == target).sum(1).float()
num_correct_first = (pred_class_first.reshape(target.shape) == target).sum(1).float()
total_correct += num_correct
total_correct_first += num_correct_first
else:
num_correct = (pred_class == target).sum().float()
num_correct_first = (pred_class_first == target).sum().float()
total_correct += float(num_correct.item())
total_correct_first += float(num_correct_first.item())
current_acc_count = num_correct / batch_size
current_acc_first = num_correct_first / batch_size
progress += batch_size
# if mode == 'train':
# self.logger.log_train(model.multi_model, epoch, progress, loss, num_spike_total, num_spike_nec, first_stime_min, first_stime_mean, num_correct, num_correct_first, batch_size, model.term_length, (batch_idx % self.config.log_interval == 0))
if mode == "train":
kbar.update(batch_idx+1, values=[("loss", loss),
("acc", current_acc_count),
("acc_first", current_acc_first)])
if mode == "train":
return (total_loss / progress), (total_correct / progress), (total_correct_first / progress), kbar
else:
return (total_loss / progress), (total_correct / progress), (total_correct_first / progress)
def load_model(self, param_dict):
self.model.load_state_dict(param_dict)
return
def load_teacher(self, param_dict):
self.teacher.load_state_dict(param_dict)
print("Loaded Teacher Model")
return
def save_model(self, tag):
self.logger.save_model(self.model, tag)
return
def save_multi_model(self, tag, model_id):
self.logger.save_multi_model(self.model, tag, model_id)
return
def load_optim(self, param_dict):
self.optim.load_state_dict(param_dict)
return
def save_optim(self, tag):
self.logger.save_optim(self.optim, tag)
return
def test(self):
test_loss, test_acc_most, test_acc_earliest = self.run_epoch('test', self.model)
print(f"test_loss : {test_loss:.4f} | test_acc_most : {test_acc_most:.4f} | " \
f"test_acc_earliest : {test_acc_earliest}")
return
def spikes2label(self, spmat, decision_type = 'count'):
"""
Args
spmat : [batch x time x feature]
Return
label : [batch]
"""
if decision_type == 'count':
label = spmat.sum(1).max(1).indices
elif decision_type == 'first':
decreasing_output = spmat * torch.arange(spmat.shape[1], 0, -1).view(1, -1, 1)
max_each_neuron = decreasing_output.max(dim=1).values
# batch x feature
label = max_each_neuron.max(dim=1).indices
return label
def label2spikes(self, label):
"""
Generate target spike train based on the class label.
Target spikes are evenly distributed according to the target spike number.
Args:
label : target label. shape = [batch]
Return:
spmat : shape = [batch, time, feature]
"""
pos_num = self.model.max_target_spikes
neg_num = self.model.min_target_spikes
T = self.model.time_length
spmat = torch.zeros([label.numel(), T, 10])
spmat[:,0,:] = neg_num
for i in range(label.numel()):
spmat[i, 0, label[i]] = pos_num
return spmat
def float2spikes(self, flmat, time_length, num_max_spikes, num_min_spikes, type = 'stretch', stochastic=False, last=False, skip_zero=True):
"""
Args
flmat : float matrix [batch x feature] in [0, 1]
Outputs
spmat : spike matrix [batch x time x feature]
"""
batch_size = flmat.size(0)
if not stochastic:
if type == 'stretch':
flmat_min_to_max = (num_max_spikes - num_min_spikes) * flmat + num_min_spikes
# batch x features
if skip_zero:
flmat_min_to_max[flmat == 0] = 0
| |
import numpy as np
from myutils import *
from easydict import EasyDict as edict
def dcg_at_k(r, k, method=1):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def measure_rec_quality(path_data):
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
metrics_names = ["ndcg", "hr", "recall", "precision"]
metrics = edict()
for metric in metrics_names:
metrics[metric] = {"Overall": []}
for values in attribute_list.values():
if len(attribute_list) == 1: break
attribute_to_name = values[1]
for _, name in attribute_to_name.items():
metrics[metric][name] = []
topk_matches = path_data.uid_topk
test_labels = path_data.test_labels
test_user_idxs = list(test_labels.keys())
invalid_users = []
for uid in test_user_idxs:
if uid not in topk_matches: continue
if len(topk_matches[uid]) < 10:
invalid_users.append(uid)
continue
pred_list, rel_set = topk_matches[uid], test_labels[uid]
if len(pred_list) == 0:
continue
k = 0
hit_num = 0.0
hit_list = []
for pid in pred_list:
k += 1
if pid in rel_set:
hit_num += 1
hit_list.append(1)
else:
hit_list.append(0)
ndcg = ndcg_at_k(hit_list, k)
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
# Based on attribute
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
metrics["ndcg"][attr_name].append(ndcg)
metrics["recall"][attr_name].append(recall)
metrics["precision"][attr_name].append(precision)
metrics["hr"][attr_name].append(hit)
metrics["ndcg"]["Overall"].append(ndcg)
metrics["recall"]["Overall"].append(recall)
metrics["precision"]["Overall"].append(precision)
metrics["hr"]["Overall"].append(hit)
return metrics
def print_rec_metrics(dataset_name, flags, metrics):
attribute_list = get_attribute_list(dataset_name, flags)
print("\n---Recommandation Quality---")
print("Average for the entire user base:", end=" ")
for metric, values in metrics.items():
print("{}: {:.3f}".format(metric, np.array(values["Overall"]).mean()), end=" | ")
print("")
for attribute_category, values in attribute_list.items():
print("\n-Statistic with user grouped by {} attribute".format(attribute_category))
for attribute in values[1].values():
print("{} group".format(attribute), end=" ")
for metric_name, groups_values in metrics.items():
print("{}: {:.3f}".format(metric_name, np.array(groups_values[attribute]).mean()), end=" | ")
print("")
print("\n")
"""
Explanation metrics
"""
def topk_ETV(path_data):
dataset_name = path_data.dataset_name
def simpson_index(topk):
n_path_for_patterns = {k: 0 for k in set(PATH_TYPES[dataset_name])}
N = 0
for path in topk:
path = path
path_type = get_path_type(path)
if path_type == 'self_loop':
path_type = 'described_as'
n_path_for_patterns[path_type] += 1
N += 1
numerator = 0
for path_type, n_path_type_ith in n_path_for_patterns.items():
numerator += n_path_type_ith * (n_path_type_ith - 1)
# N = 0
# for item_path in pred_uv_paths.items():
# N += len(item_path[1])
if N * (N - 1) == 0:
return 0
return 1 - (numerator / (N * (N - 1)))
ETVs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
ETV = simpson_index([path_data.uid_pid_explanation[uid][pid] for pid in topk])
ETVs[uid] = ETV
return ETVs
def avg_ETV(path_data):
uid_ETVs = topk_ETV(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_ETV = {}
groups_ETV_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_ETV_scores[attribute_label] = []
if "Overall" not in groups_ETV_scores:
groups_ETV_scores["Overall"] = []
for uid, ETV in uid_ETVs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue # Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_ETV_scores[attr_name].append(ETV)
groups_ETV_scores["Overall"].append(ETV)
for attribute_label, group_scores in groups_ETV_scores.items():
avg_groups_ETV[attribute_label] = np.array(group_scores).mean()
explanation_type_variety = edict(
avg_groups_ETV=avg_groups_ETV,
groups_ETV_scores=groups_ETV_scores
)
return explanation_type_variety
def avg_LID(path_data):
uid_LIDs = topk_LID(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_LID = {}
groups_LID_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_LID_scores[attribute_label] = []
if "Overall" not in groups_LID_scores:
groups_LID_scores["Overall"] = []
for uid, LID in uid_LIDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_LID_scores[attr_name].append(LID)
groups_LID_scores["Overall"].append(LID)
for attribute_label, group_scores in groups_LID_scores.items():
avg_groups_LID[attribute_label] = np.array(group_scores).mean()
linked_interaction_diversity_results = edict(
avg_groups_LID=avg_groups_LID,
groups_LID_scores=groups_LID_scores
)
return linked_interaction_diversity_results
def topk_LID(path_data):
LIDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_linked_interaction = set()
count = 0
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
li = get_linked_interaction_id(current_path)
if current_path[1][0] == "mention":
li += 10000 #pad in order to not make them overlap, this is a stupid workaround, fix it
unique_linked_interaction.add(li)
if len(topk) == 0 or len(unique_linked_interaction) == 0:
count += 1
LID = len(unique_linked_interaction) / len(topk)
LIDs[uid] = LID
print(count)
return LIDs
def avg_SED(path_data):
uid_SEDs = topk_SED(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_SED = {}
groups_SED_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_SED_scores[attribute_label] = []
if "Overall" not in groups_SED_scores:
groups_SED_scores["Overall"] = []
for uid, SED in uid_SEDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_SED_scores[attr_name].append(SED)
groups_SED_scores["Overall"].append(SED)
for attribute_label, group_scores in groups_SED_scores.items():
avg_groups_SED[attribute_label] = np.array(group_scores).mean()
shared_entity_diversity_results = edict(
avg_groups_SED=avg_groups_SED,
groups_SED_scores=groups_SED_scores
)
return shared_entity_diversity_results
def topk_SED(path_data):
SEDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_shared_entities = set()
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
se = get_shared_entity_id(current_path)
unique_shared_entities.add(se)
if len(topk) > 0:
SED = len(unique_shared_entities) / len(topk)
else:
SED = 1
SEDs[uid] = SED
return SEDs
def topk_ETD(path_data):
ETDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_path_types = set()
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
path_type = get_path_type(current_path)
unique_path_types.add(path_type)
ETD = len(unique_path_types) / TOTAL_PATH_TYPES[path_data.dataset_name]
ETDs[uid] = ETD
return ETDs
def get_attribute_list(dataset_name, flags):
attribute_list = {}
for attribute, flag in flags.items():
if flag and DATASET_SENSIBLE_ATTRIBUTE_MATRIX[dataset_name][attribute]:
attribute_list[attribute] = []
for attribute in attribute_list.keys():
if attribute == "Gender":
user2attribute, attribute2name = get_kg_uid_to_gender_map(dataset_name)
elif attribute == "Age":
user2attribute, attribute2name = get_kg_uid_to_age_map(dataset_name)
elif attribute == "Occupation":
user2attribute, attribute2name = get_kg_uid_to_occupation_map(dataset_name)
elif attribute == "Country":
pass #implement country
else:
print("Unknown attribute")
attribute_list[attribute] = [user2attribute, attribute2name]
return attribute_list
def avg_ETD(path_data):
uid_ETDs = topk_ETD(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_ETD = {}
groups_ETD_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_ETD_scores[attribute_label] = []
if "Overall" not in groups_ETD_scores:
groups_ETD_scores["Overall"] = []
for uid, ETD in uid_ETDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_ETD_scores[attr_name].append(ETD)
groups_ETD_scores["Overall"].append(ETD)
for attribute_label, group_scores in groups_ETD_scores.items():
avg_groups_ETD[attribute_label] = np.array(group_scores).mean()
diversity_results = edict(
avg_groups_ETD=avg_groups_ETD,
groups_ETD_scores=groups_ETD_scores
)
return diversity_results
#Extract the value of LIR for the given user item path from the LIR_matrix
def LIR_single(path_data, path):
uid = int(path[0][-1])
if uid not in path_data.uid_timestamp or uid not in path_data.LIR_matrix or len(path_data.uid_timestamp[uid]) <= 1: return 0. #Should not enter there
predicted_path = path
linked_interaction = int(get_interaction_id(predicted_path))
linked_interaction_type = get_interaction_type(predicted_path)
#Handle the case of Amazon Dataset where a path may have different interaction types
if linked_interaction_type == "mentions":
LIR = path_data.LIR_matrix_words[uid][linked_interaction]
elif linked_interaction_type == "watched" or linked_interaction_type == "listened" or linked_interaction_type == "purchase":
LIR = path_data.LIR_matrix[uid][linked_interaction]
else:
LIR = 0.
return LIR
# Returns a dict where to every uid is associated a value of LIR calculated based on his topk
def topk_LIR(path_data):
LIR_topk = {}
# Precompute user timestamps weigths
LIR_matrix = path_data.LIR_matrix
for uid in path_data.test_labels.keys(): #modified for pgpr labels
LIR_single_topk = []
if uid not in LIR_matrix or uid not in path_data.uid_topk:
continue
for pid | |
<filename>tests/test_framework/revault_network.py
import bip32
import logging
import os
import random
from ephemeral_port_reserve import reserve
from nacl.public import PrivateKey as Curve25519Private
from test_framework import serializations
from test_framework.bitcoind import BitcoindRpcProxy
from test_framework.coordinatord import Coordinatord
from test_framework.cosignerd import Cosignerd
from test_framework.miradord import Miradord
from test_framework.revaultd import ManagerRevaultd, StakeholderRevaultd, StkManRevaultd
from test_framework.utils import (
get_descriptors,
get_participants,
finalize_input,
wait_for,
TIMEOUT,
WT_PLUGINS_DIR,
)
class RevaultNetwork:
# FIXME: we use a single bitcoind for all the wallets because it's much
# more efficient. Eventually, we may have to test with separate ones.
def __init__(
self,
root_dir,
bitcoind,
executor,
postgres_user,
postgres_pass,
postgres_host="localhost",
):
self.root_dir = root_dir
self.bitcoind = bitcoind
self.daemons = []
self.executor = executor
self.postgres_user = postgres_user
self.postgres_pass = <PASSWORD>
self.postgres_host = postgres_host
self.coordinator_port = reserve()
self.stk_wallets = []
self.stkman_wallets = []
self.man_wallets = []
self.csv = None
self.emergency_address = None
self.bitcoind_proxy = None
def deploy(
self,
n_stakeholders,
n_managers,
n_stkmanagers=0,
csv=None,
managers_threshold=None,
with_cosigs=True,
with_watchtowers=True,
with_cpfp=True,
bitcoind_rpc_mocks=[],
):
"""
Deploy a revault setup with {n_stakeholders} stakeholders, {n_managers}
managers.
"""
# They didn't provide it, defaults to n_managers
# PS: No I can't just managers_threshold=n_managers in the method's signature :(
if managers_threshold == None:
managers_threshold = n_managers + n_stkmanagers
assert n_stakeholders + n_stkmanagers >= 2, "Not enough stakeholders"
assert n_managers + n_stkmanagers >= 1, "Not enough managers"
assert managers_threshold <= n_managers + n_stkmanagers, "Invalid threshold"
# Connection info to bitcoind. Change the port depending on whether we are proxying
# the daemons' requests.
bitcoind_cookie = os.path.join(self.bitcoind.bitcoin_dir, "regtest", ".cookie")
if len(bitcoind_rpc_mocks) > 0:
self.bitcoind_proxy = BitcoindRpcProxy(
self.bitcoind.rpcport, bitcoind_cookie, bitcoind_rpc_mocks
)
bitcoind_rpcport = self.bitcoind_proxy.rpcport
else:
bitcoind_rpcport = self.bitcoind.rpcport
(
stkonly_keychains,
stkonly_cosig_keychains,
manonly_keychains,
stkman_stk_keychains,
stkman_cosig_keychains,
stkman_man_keychains,
) = get_participants(n_stakeholders, n_managers, n_stkmanagers, with_cosigs)
stks_keychains = stkonly_keychains + stkman_stk_keychains
cosigs_keychains = stkonly_cosig_keychains + stkman_cosig_keychains
mans_keychains = manonly_keychains + stkman_man_keychains
if csv is None:
# Not more than 6 months
csv = random.randint(1, 26784)
self.csv = csv
man_cpfp_seeds = [os.urandom(32) for _ in range(len(manonly_keychains))]
man_cpfp_privs = [
bip32.BIP32.from_seed(seed, network="test") for seed in man_cpfp_seeds
]
stkman_cpfp_seeds = [os.urandom(32) for _ in range(len(stkman_man_keychains))]
stkman_cpfp_privs = [
bip32.BIP32.from_seed(seed, network="test") for seed in stkman_cpfp_seeds
]
cpfp_xpubs = [c.get_xpub() for c in man_cpfp_privs + stkman_cpfp_privs]
stks_xpubs = [stk.get_xpub() for stk in stks_keychains]
cosigs_keys = [cosig.get_static_key().hex() for cosig in cosigs_keychains]
mans_xpubs = [man.get_xpub() for man in mans_keychains]
(self.deposit_desc, self.unvault_desc, self.cpfp_desc) = get_descriptors(
stks_xpubs, cosigs_keys, mans_xpubs, managers_threshold, cpfp_xpubs, csv
)
# Generate a dummy 2of2 to be used as our Emergency address
desc = "wsh(multi(2,cRE7qAArQYnFQK7S1gXFTArFT4UWvh8J2v2EUajRWXbWFvRzxoeF,\
cTzcgRCmHNqUqZuZgvCPLUDXXrQSoVQpZiXQZWQzsLEytcTr6iXi))"
checksum = self.bitcoind.rpc.getdescriptorinfo(desc)["checksum"]
desc = f"{desc}#{checksum}"
self.emergency_address = self.bitcoind.rpc.deriveaddresses(desc)[0]
desc_import = self.bitcoind.rpc.importdescriptors(
[
{
"desc": desc,
"timestamp": "now",
"label": "revault-emergency",
}
]
)
if not desc_import[0]["success"]:
raise Exception(desc_import)
# FIXME: this is getting dirty.. We should re-centralize information
# about each participant in specified data structures
stkonly_cosigners_ports = []
stkman_cosigners_ports = []
# The Noise keys are interdependant, so generate everything in advance
# to avoid roundtrips
coordinator_noisepriv = os.urandom(32)
coordinator_noisepub = bytes(
Curve25519Private(coordinator_noisepriv).public_key
)
(stkonly_noiseprivs, stkonly_noisepubs) = ([], [])
(stkonly_wt_noiseprivs, stkonly_wt_noisepubs) = ([], [])
(stkonly_cosig_noiseprivs, stkonly_cosig_noisepubs) = ([], [])
for i in range(len(stkonly_keychains)):
stkonly_noiseprivs.append(os.urandom(32))
stkonly_noisepubs.append(
bytes(Curve25519Private(stkonly_noiseprivs[i]).public_key)
)
if with_cosigs:
stkonly_cosig_noiseprivs.append(os.urandom(32))
stkonly_cosig_noisepubs.append(
bytes(Curve25519Private(stkonly_cosig_noiseprivs[i]).public_key)
)
# Unused yet
stkonly_wt_noiseprivs.append(os.urandom(32))
stkonly_wt_noisepubs.append(
bytes(Curve25519Private(stkonly_wt_noiseprivs[i]).public_key)
)
(stkman_noiseprivs, stkman_noisepubs) = ([], [])
(stkman_wt_noiseprivs, stkman_wt_noisepubs) = ([], [])
(stkman_cosig_noiseprivs, stkman_cosig_noisepubs) = ([], [])
for i in range(len(stkman_stk_keychains)):
stkman_noiseprivs.append(os.urandom(32))
stkman_noisepubs.append(
bytes(Curve25519Private(stkman_noiseprivs[i]).public_key)
)
if with_cosigs:
stkman_cosig_noiseprivs.append(os.urandom(32))
stkman_cosig_noisepubs.append(
bytes(Curve25519Private(stkman_cosig_noiseprivs[i]).public_key)
)
# Unused yet
stkman_wt_noiseprivs.append(os.urandom(32))
stkman_wt_noisepubs.append(
bytes(Curve25519Private(stkman_wt_noiseprivs[i]).public_key)
)
(man_noiseprivs, man_noisepubs) = ([], [])
for i in range(len(manonly_keychains)):
man_noiseprivs.append(os.urandom(32))
man_noisepubs.append(bytes(Curve25519Private(man_noiseprivs[i]).public_key))
logging.debug(
f"Using Noise pubkeys:\n- Stakeholders: {stkonly_noisepubs + stkman_noisepubs}"
f" (of which {len(stkman_noisepubs)} are also managers)"
f"\n- Managers: {man_noisepubs}\n- Watchtowers:"
f"{stkonly_wt_noisepubs + stkman_wt_noisepubs}\n"
)
# Spin up the "Sync Server"
coord_datadir = os.path.join(self.root_dir, "coordinatord")
os.makedirs(coord_datadir, exist_ok=True)
coordinatord = Coordinatord(
coord_datadir,
coordinator_noisepriv,
man_noisepubs + stkman_noisepubs,
stkonly_noisepubs + stkman_noisepubs,
stkonly_wt_noisepubs + stkman_wt_noisepubs,
self.coordinator_port,
bitcoind_rpcport,
bitcoind_cookie,
self.postgres_user,
self.postgres_pass,
self.postgres_host,
)
coordinatord.start()
self.daemons.append(coordinatord)
cosigners_info = []
for (i, noisepub) in enumerate(stkonly_cosig_noisepubs):
stkonly_cosigners_ports.append(reserve())
cosigners_info.append(
{
"host": f"127.0.0.1:{stkonly_cosigners_ports[i]}",
"noise_key": noisepub,
}
)
for (i, noisepub) in enumerate(stkman_cosig_noisepubs):
stkman_cosigners_ports.append(reserve())
cosigners_info.append(
{
"host": f"127.0.0.1:{stkman_cosigners_ports[i]}",
"noise_key": noisepub,
}
)
# Start daemons in parallel, as it takes a few seconds for each
start_jobs = []
# By default the watchtower should not revault anything
default_wt_plugin = {
"path": os.path.join(WT_PLUGINS_DIR, "revault_nothing.py"),
"conf": {},
}
# Spin up the stakeholders wallets and their cosigning servers
for i, stk in enumerate(stkonly_keychains):
if with_watchtowers:
datadir = os.path.join(self.root_dir, f"miradord-{i}")
os.makedirs(datadir)
wt_listen_port = reserve()
miradord = Miradord(
datadir,
str(self.deposit_desc),
str(self.unvault_desc),
str(self.cpfp_desc),
self.emergency_address,
wt_listen_port,
stkonly_wt_noiseprivs[i],
stkonly_noisepubs[i].hex(),
coordinator_noisepub.hex(),
self.coordinator_port,
bitcoind_rpcport,
bitcoind_cookie,
plugins=[default_wt_plugin],
)
start_jobs.append(self.executor.submit(miradord.start))
self.daemons.append(miradord)
datadir = os.path.join(self.root_dir, f"revaultd-stk-{i}")
os.makedirs(datadir, exist_ok=True)
stk_config = {
"keychain": stk,
"watchtowers": [
{
"host": f"127.0.0.1:{wt_listen_port}",
"noise_key": stkonly_wt_noisepubs[i].hex(),
}
]
if with_watchtowers
else [],
"emergency_address": self.emergency_address,
}
revaultd = StakeholderRevaultd(
datadir,
str(self.deposit_desc),
str(self.unvault_desc),
str(self.cpfp_desc),
stkonly_noiseprivs[i],
coordinator_noisepub.hex(),
self.coordinator_port,
bitcoind_rpcport,
bitcoind_cookie,
stk_config,
wt_process=miradord if with_watchtowers else None,
)
start_jobs.append(self.executor.submit(revaultd.start))
self.stk_wallets.append(revaultd)
if with_cosigs:
datadir = os.path.join(self.root_dir, f"cosignerd-stk-{i}")
os.makedirs(datadir, exist_ok=True)
cosignerd = Cosignerd(
datadir,
stkonly_cosig_noiseprivs[i],
stkonly_cosig_keychains[i].get_bitcoin_priv(),
stkonly_cosigners_ports[i],
man_noisepubs + stkman_noisepubs,
)
start_jobs.append(self.executor.submit(cosignerd.start))
self.daemons.append(cosignerd)
# Spin up the stakeholder-managers wallets and their cosigning servers
for i, stkman in enumerate(stkman_stk_keychains):
if with_watchtowers:
datadir = os.path.join(self.root_dir, f"miradord-stkman-{i}")
os.makedirs(datadir)
wt_listen_port = reserve()
miradord = Miradord(
datadir,
str(self.deposit_desc),
str(self.unvault_desc),
str(self.cpfp_desc),
self.emergency_address,
wt_listen_port,
stkman_wt_noiseprivs[i],
stkman_noisepubs[i].hex(),
coordinator_noisepub.hex(),
self.coordinator_port,
bitcoind_rpcport,
bitcoind_cookie,
plugins=[default_wt_plugin],
)
start_jobs.append(self.executor.submit(miradord.start))
self.daemons.append(miradord)
datadir = os.path.join(self.root_dir, f"revaultd-stkman-{i}")
os.makedirs(datadir, exist_ok=True)
stk_config = {
"keychain": stkman,
"watchtowers": [
{
"host": f"127.0.0.1:{wt_listen_port}",
"noise_key": stkman_wt_noisepubs[i].hex(),
}
]
if with_watchtowers
else [],
"emergency_address": self.emergency_address,
}
man_config = {
"keychain": stkman_man_keychains[i],
"cosigners": cosigners_info,
}
revaultd = StkManRevaultd(
datadir,
str(self.deposit_desc),
str(self.unvault_desc),
str(self.cpfp_desc),
stkman_noiseprivs[i],
coordinator_noisepub.hex(),
self.coordinator_port,
bitcoind_rpcport,
bitcoind_cookie,
stk_config,
man_config,
wt_process=miradord if with_watchtowers else None,
cpfp_seed=stkman_cpfp_seeds[i] if with_cpfp else None,
)
start_jobs.append(self.executor.submit(revaultd.start))
self.stkman_wallets.append(revaultd)
if with_cosigs:
datadir = os.path.join(self.root_dir, f"cosignerd-stkman-{i}")
os.makedirs(datadir, exist_ok=True)
cosignerd = Cosignerd(
datadir,
stkman_cosig_noiseprivs[i],
stkman_cosig_keychains[i].get_bitcoin_priv(),
stkman_cosigners_ports[i],
man_noisepubs + stkman_noisepubs,
)
start_jobs.append(self.executor.submit(cosignerd.start))
self.daemons.append(cosignerd)
# Spin up the managers (only) wallets
for i, man in enumerate(manonly_keychains):
datadir = os.path.join(self.root_dir, f"revaultd-man-{i}")
os.makedirs(datadir, exist_ok=True)
man_config = {"keychain": man, "cosigners": cosigners_info}
daemon = ManagerRevaultd(
datadir,
str(self.deposit_desc),
str(self.unvault_desc),
str(self.cpfp_desc),
man_noiseprivs[i],
coordinator_noisepub.hex(),
self.coordinator_port,
bitcoind_rpcport,
bitcoind_cookie,
man_config,
cpfp_seed=man_cpfp_seeds[i] if with_cpfp else None,
)
start_jobs.append(self.executor.submit(daemon.start))
self.man_wallets.append(daemon)
for j in start_jobs:
j.result(TIMEOUT)
self.daemons += self.stk_wallets + self.stkman_wallets + self.man_wallets
def mans(self):
return self.stkman_wallets + self.man_wallets
def stks(self):
return self.stkman_wallets + self.stk_wallets
def participants(self):
return self.stkman_wallets + self.stk_wallets + self.man_wallets
def man(self, n):
"""Get the {n}th manager (including the stakeholder-managers first)"""
mans = self.stkman_wallets + self.man_wallets
return mans[n]
def stk(self, n):
"""Get the {n}th stakeholder (including the stakeholder-managers first)"""
stks = self.stkman_wallets + self.stk_wallets
return stks[n]
def signed_unvault_psbt(self, deposit, derivation_index):
"""Get the fully-signed Unvault transaction for this deposit.
This will raise if we don't have all the signatures.
"""
psbt_str = self.stks()[0].rpc.listpresignedtransactions([deposit])[
"presigned_transactions"
][0]["unvault"]
psbt = serializations.PSBT()
psbt.deserialize(psbt_str)
finalize_input(self.deposit_desc, psbt.inputs[0], derivation_index)
psbt.tx.wit.vtxinwit.append(psbt.inputs[0].final_script_witness)
return psbt.tx.serialize_with_witness().hex()
def signed_cancel_psbt(self, deposit, derivation_index):
"""Get the fully-signed Cancel transaction for this deposit.
This picks the lowest feerate version.
This will raise if we don't have all the signatures.
"""
psbt_str = self.stks()[0].rpc.listpresignedtransactions([deposit])[
"presigned_transactions"
][0]["cancel"][0]
psbt = serializations.PSBT()
psbt.deserialize(psbt_str)
finalize_input(self.unvault_desc, psbt.inputs[0], derivation_index)
psbt.tx.wit.vtxinwit.append(psbt.inputs[0].final_script_witness)
return psbt.tx.serialize_with_witness().hex()
def get_vault(self, address):
"""Get a vault entry by outpoint or by address"""
for v in self.man(0).rpc.listvaults()["vaults"]:
if v["address"] == address:
return v
def fund(self, amount=None):
"""Deposit coins into the architectures, by paying to the deposit
descriptor and getting the tx 6 blocks confirmations."""
assert (
len(self.man_wallets + self.stkman_wallets) > 0
), "You must have deploy()ed first"
man = self.man(0)
if amount is None:
amount = 49.9999
addr = man.rpc.getdepositaddress()["address"]
txid = self.bitcoind.rpc.sendtoaddress(addr, amount)
man.wait_for_log(f"Got a new unconfirmed deposit at {txid}")
self.bitcoind.generate_block(6, wait_for_mempool=txid)
man.wait_for_log(f"Vault at {txid}.* is now confirmed")
vaults = man.rpc.listvaults(["funded"])["vaults"]
for v in vaults:
if v["txid"] == txid:
for w in self.man_wallets + self.stk_wallets:
w.wait_for_deposits([f"{txid}:{v['vout']}"])
return v
raise Exception(f"Vault created by '{txid}' got in logs but not in listvaults?")
def fundmany(self, amounts=[]):
"""Deposit coins into the architectures in a single transaction"""
assert (
len(self.man_wallets + self.stkman_wallets) > 0
), "You must have deploy()ed first"
assert len(amounts) > 0, "You must provide at least an amount!"
man = self.man(0)
curr_index = 0
vaults = man.rpc.listvaults()["vaults"]
for v in vaults:
if v["derivation_index"] > curr_index:
curr_index = v["derivation_index"]
indexes = list(range(curr_index + 1, curr_index + 1 + len(amounts)))
| |
# Copyright 2015 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import sqlalchemy as sa
from sqlalchemy import event
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
import sys
from oslo_config import cfg
from oslo_log import log as logging
from mistral.db.sqlalchemy import model_base as mb
from mistral.db.sqlalchemy import types as st
from mistral import exceptions as exc
from mistral.services import security
from mistral import utils
# Definition objects.
LOG = logging.getLogger(__name__)
def _get_hash_function_by(column_name):
def calc_hash(context):
val = context.current_parameters[column_name] or {}
if isinstance(val, dict):
# If the value is a dictionary we need to make sure to have
# keys in the same order in a string representation.
hash_base = json.dumps(sorted(val.items()))
else:
hash_base = str(val)
return hashlib.sha256(hash_base.encode('utf-8')).hexdigest()
return calc_hash
def validate_long_type_length(cls, field_name, value):
"""Makes sure the value does not exceeds the maximum size."""
if value:
# Get the configured limit.
size_limit_kb = cfg.CONF.engine.execution_field_size_limit_kb
# If the size is unlimited.
if size_limit_kb < 0:
return
size_kb = int(sys.getsizeof(str(value)) / 1024)
if size_kb > size_limit_kb:
LOG.error(
"Size limit %dKB exceed for class [%s], "
"field %s of size %dKB.",
size_limit_kb, str(cls), field_name, size_kb
)
raise exc.SizeLimitExceededException(
field_name,
size_kb,
size_limit_kb
)
def register_length_validator(attr_name):
"""Register an event listener on the attribute.
This event listener will validate the size every
time a 'set' occurs.
"""
for cls in utils.iter_subclasses(Execution):
if hasattr(cls, attr_name):
event.listen(
getattr(cls, attr_name),
'set',
lambda t, v, o, i: validate_long_type_length(cls, attr_name, v)
)
class Definition(mb.MistralSecureModelBase):
__abstract__ = True
id = mb.id_column()
name = sa.Column(sa.String(255))
definition = sa.Column(st.MediumText(), nullable=True)
spec = sa.Column(st.JsonMediumDictType())
tags = sa.Column(st.JsonListType())
is_system = sa.Column(sa.Boolean())
# There's no WorkbookExecution so we safely omit "Definition" in the name.
class Workbook(Definition):
"""Contains info about workbook (including definition in Mistral DSL)."""
__tablename__ = 'workbooks_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
class WorkflowDefinition(Definition):
"""Contains info about workflow (including definition in Mistral DSL)."""
__tablename__ = 'workflow_definitions_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_is_system' % __tablename__, 'is_system'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
class ActionDefinition(Definition):
"""Contains info about registered Actions."""
__tablename__ = 'action_definitions_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_is_system' % __tablename__, 'is_system'),
sa.Index('%s_action_class' % __tablename__, 'action_class'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
# Main properties.
description = sa.Column(sa.Text())
input = sa.Column(sa.Text())
# Service properties.
action_class = sa.Column(sa.String(200))
attributes = sa.Column(st.JsonDictType())
# Execution objects.
class Execution(mb.MistralSecureModelBase):
__abstract__ = True
# Common properties.
id = mb.id_column()
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255), nullable=True)
workflow_name = sa.Column(sa.String(255))
workflow_id = sa.Column(sa.String(80))
spec = sa.Column(st.JsonMediumDictType())
state = sa.Column(sa.String(20))
state_info = sa.Column(sa.Text(), nullable=True)
tags = sa.Column(st.JsonListType())
# Internal properties which can be used by engine.
runtime_context = sa.Column(st.JsonLongDictType())
class ActionExecution(Execution):
"""Contains action execution information."""
__tablename__ = 'action_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at')
)
# Main properties.
accepted = sa.Column(sa.Boolean(), default=False)
input = sa.Column(st.JsonLongDictType(), nullable=True)
output = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True))
class WorkflowExecution(Execution):
"""Contains workflow execution information."""
__tablename__ = 'workflow_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at'),
)
# Main properties.
accepted = sa.Column(sa.Boolean(), default=False)
input = sa.Column(st.JsonLongDictType(), nullable=True)
output = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True))
params = sa.Column(st.JsonLongDictType())
# Initial workflow context containing workflow variables, environment,
# openstack security context etc.
# NOTES:
# * Data stored in this structure should not be copied into inbound
# contexts of tasks. No need to duplicate it.
# * This structure does not contain workflow input.
context = sa.Column(st.JsonLongDictType())
class TaskExecution(Execution):
"""Contains task runtime information."""
__tablename__ = 'task_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at'),
sa.UniqueConstraint('unique_key')
)
# Main properties.
action_spec = sa.Column(st.JsonLongDictType())
unique_key = sa.Column(sa.String(250), nullable=True)
type = sa.Column(sa.String(10))
# Whether the task is fully processed (publishing and calculating commands
# after it). It allows to simplify workflow controller implementations
# significantly.
processed = sa.Column(sa.BOOLEAN, default=False)
# Data Flow properties.
in_context = sa.Column(st.JsonLongDictType())
published = sa.Column(st.JsonLongDictType())
@property
def executions(self):
return (
self.action_executions
if not self.spec.get('workflow')
else self.workflow_executions
)
for cls in utils.iter_subclasses(Execution):
event.listen(
# Catch and trim Execution.state_info to always fit allocated size.
# Note that the limit is 65500 which is less than 65535 (2^16 -1).
# The reason is that utils.cut() is not exactly accurate in case if
# the value is not a string, but, for example, a dictionary. If we
# limit it exactly to 65535 then once in a while it may go slightly
# beyond the allowed maximum size. It may depend on the order of
# keys in a string representation and other things that are hidden
# inside utils.cut_dict() method.
cls.state_info,
'set',
lambda t, v, o, i: utils.cut(v, 65500),
retval=True
)
# Many-to-one for 'ActionExecution' and 'TaskExecution'.
ActionExecution.task_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'),
nullable=True
)
TaskExecution.action_executions = relationship(
ActionExecution,
backref=backref('task_execution', remote_side=[TaskExecution.id]),
cascade='all, delete-orphan',
foreign_keys=ActionExecution.task_execution_id,
lazy='select'
)
sa.Index(
'%s_task_execution_id' % ActionExecution.__tablename__,
'task_execution_id'
)
# Many-to-one for 'WorkflowExecution' and 'TaskExecution'.
WorkflowExecution.task_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'),
nullable=True
)
TaskExecution.workflow_executions = relationship(
WorkflowExecution,
backref=backref('task_execution', remote_side=[TaskExecution.id]),
cascade='all, delete-orphan',
foreign_keys=WorkflowExecution.task_execution_id,
lazy='select'
)
sa.Index(
'%s_task_execution_id' % WorkflowExecution.__tablename__,
'task_execution_id'
)
# Many-to-one for 'TaskExecution' and 'WorkflowExecution'.
TaskExecution.workflow_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowExecution.id, ondelete='CASCADE')
)
WorkflowExecution.task_executions = relationship(
TaskExecution,
backref=backref('workflow_execution', remote_side=[WorkflowExecution.id]),
cascade='all, delete-orphan',
foreign_keys=TaskExecution.workflow_execution_id,
lazy='select'
)
sa.Index(
'%s_workflow_execution_id' % TaskExecution.__tablename__,
TaskExecution.workflow_execution_id
)
# Other objects.
class DelayedCall(mb.MistralModelBase):
"""Contains info about delayed calls."""
__tablename__ = 'delayed_calls_v2'
id = mb.id_column()
factory_method_path = sa.Column(sa.String(200), nullable=True)
target_method_name = sa.Column(sa.String(80), nullable=False)
method_arguments = sa.Column(st.JsonDictType())
serializers = sa.Column(st.JsonDictType())
key = sa.Column(sa.String(250), nullable=True)
auth_context = sa.Column(st.JsonDictType())
execution_time = sa.Column(sa.DateTime, nullable=False)
processing = sa.Column(sa.Boolean, default=False, nullable=False)
sa.Index(
'%s_execution_time' % DelayedCall.__tablename__,
DelayedCall.execution_time
)
class Environment(mb.MistralSecureModelBase):
"""Contains environment variables for workflow execution."""
__tablename__ = 'environments_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_name' % __tablename__, 'name'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
# Main properties.
id = mb.id_column()
name = sa.Column(sa.String(200))
description = sa.Column(sa.Text())
variables = sa.Column(st.JsonLongDictType())
class CronTrigger(mb.MistralSecureModelBase):
"""Contains info about cron triggers."""
__tablename__ = 'cron_triggers_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.UniqueConstraint(
'workflow_input_hash', 'workflow_name', 'pattern', 'project_id',
'workflow_params_hash', 'remaining_executions',
'first_execution_time'
),
sa.Index(
'%s_next_execution_time' % __tablename__,
'next_execution_time'
),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_workflow_name' % __tablename__, 'workflow_name'),
)
id = mb.id_column()
name = sa.Column(sa.String(200))
pattern = sa.Column(
sa.String(100),
nullable=True,
default='0 0 30 2 0' # Set default to 'never'.
)
first_execution_time = sa.Column(sa.DateTime, nullable=True)
next_execution_time = sa.Column(sa.DateTime, nullable=False)
workflow_name = sa.Column(sa.String(255))
remaining_executions = sa.Column(sa.Integer)
workflow_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowDefinition.id)
)
workflow = relationship('WorkflowDefinition', lazy='joined')
workflow_params = sa.Column(st.JsonDictType())
workflow_params_hash = sa.Column(
sa.CHAR(64),
default=_get_hash_function_by('workflow_params')
)
workflow_input = sa.Column(st.JsonDictType())
workflow_input_hash = sa.Column(
sa.CHAR(64),
default=_get_hash_function_by('workflow_input')
)
trust_id = sa.Column(sa.String(80))
def to_dict(self):
d = super(CronTrigger, self).to_dict()
utils.datetime_to_str_in_dict(d, 'first_execution_time')
utils.datetime_to_str_in_dict(d, 'next_execution_time')
return d
# Register all hooks related to secure models.
mb.register_secure_model_hooks()
# TODO(rakhmerov): This is a bad solution. It's hard to find in the code,
# configure flexibly etc. Fix it.
# Register an event listener to verify that the size of all the long columns
# affected by the user do not exceed the limit configuration.
for attr_name in ['input', 'output', 'params', 'published']:
register_length_validator(attr_name)
class ResourceMember(mb.MistralModelBase):
"""Contains info about resource members."""
__tablename__ = 'resource_members_v2'
__table_args__ = (
sa.UniqueConstraint(
'resource_id',
'resource_type',
'member_id'
),
)
id = mb.id_column()
resource_id = sa.Column(sa.String(80), nullable=False)
resource_type = sa.Column(
sa.String(50),
nullable=False,
default='workflow'
)
project_id = sa.Column(sa.String(80), default=security.get_project_id)
member_id = sa.Column(sa.String(80), nullable=False)
status = sa.Column(sa.String(20), nullable=False, default="pending")
class EventTrigger(mb.MistralSecureModelBase):
"""Contains info about event triggers."""
__tablename__ = 'event_triggers_v2'
__table_args__ = (
sa.UniqueConstraint('exchange', 'topic', 'event', 'workflow_id',
'project_id'),
sa.Index('%s_project_id_workflow_id' % __tablename__, 'project_id',
'workflow_id'),
)
id = mb.id_column()
name = sa.Column(sa.String(200))
workflow_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowDefinition.id)
)
workflow_params = sa.Column(st.JsonDictType())
workflow_input = sa.Column(st.JsonDictType())
exchange = sa.Column(sa.String(80), nullable=False)
topic = sa.Column(sa.String(80), nullable=False)
event = sa.Column(sa.String(80), nullable=False)
trust_id = sa.Column(sa.String(80))
class NamedLock(mb.MistralModelBase):
"""Contains info about named locks.
Usage of named locks is based on properties of READ COMMITTED
transactions | |
import qiskit, time
from numpy import pi
# define pi so that in string gates we can have pi as an angle.
# Because we use eval for string gates. For example, gate = "rz(pi/2, 1)".
name = "IBM"
simulators = simulator, unitary_simulator, state_simulator = (
"qasm_simulator", "unitary_simulator",
"statevector_simulator"
)
quantum_computer = "ibmqx4"
def apply_credentials():
print("\nApplying credentials...\n")
# with open("qSonify/qc/APItoken.txt") as f: APItoken = f.read().strip()
try:
# qiskit.IBMQ.enable_account(APItoken)
qiskit.IBMQ.load_accounts()
print('Available backends:')
print(qiskit.IBMQ.backends())
print(qiskit.Aer.backends())
print("\nCredientials applied\n")
except:
print('Something went wrong.\nDid you enter a correct token?')
#### String algorithm methods ####
# With this, we can write an algorithm as a list with any of the keys in
# GATE_ARGUMENTS. So, for example,
# alg = ["H(0)", "RX(pi/2, 1)", "CX(1, 2)", "u3(pi/2, pi/4, .2, 0)"]
# then apply it to a qiskit.QuantumCircuit and qiskit.QuantumRegister qc and r
# respectively by calling
# apply_string_algorithm(alg, r, qc).
p = lambda x: ("reg[%d]",)*x
a = lambda x: ("%g",)*x
b = lambda x, y: "(" + ", ".join(a(x)+p(y)) + ")"
GATE_PARAMS = { ## The first number is the number of parameters,
## The second number is the number of qubit arguments.
"ccx": (0, 3), "ch": (0, 2), "crz": (1, 2), "cswap": (0, 3), "cu1": (1, 2),
"cu3": (3, 2), "cx": (0, 2), "cx_base": (0, 2), "cy": (0, 2), "cz": (0, 2),
"h": (0, 1), "iden": (0, 1), "rx": (1, 1), "ry": (1, 1), "rz": (1, 1),
"rzz": (1, 2), "s": (0, 1), "sdg": (0, 1), "swap": (0, 2), "t": (0, 1),
"tdg": (0, 1), "u0": (1, 1), "u1": (1, 1), "u2": (2, 1), "u3": (3, 1),
"u_base": (3, 1), "x": (0, 1), "y": (0, 1), "z": (0, 1),
}
GATE_ARGUMENTS = {gate: b(*args) for gate, args in GATE_PARAMS.items()}
GATE_ARGUMENTS["measure"] = "(reg[%d], c_reg[%d])"
def get_gate_info(gate):
"""
gate: str, string gate. ie H(0), or "cx(1, 0)".
returns: tuple, (gate_name (str), gate_args (tuple)).
"""
gate = gate.strip().lower().replace("cnot", "cx")
i = gate.index("(")
gate_name, gate_args = gate[:i], eval(gate[i:])
try: len(gate_args)
except TypeError: gate_args = gate_args,
return gate_name, gate_args
def get_num_qubits(algorithm):
"""
Determine the max qubit value used in the algorithm.
algorithm: iterable, each element must be a string gate, as in
apply_string_gate above.
ie, algorithm = ["h(0)", "cx(0, 1)", "rx(pi/4, 1)",..]
returns: int, max qubit value in algorithm.
"""
n = -1
for gate in algorithm:
gate_name, gate_args = get_gate_info(gate)
if gate_name == "measure": m = gate_args[0]
# elif sum(GATE_PARAMS[gate_name]) == 1: m = gate_args
else: m = max(gate_args[GATE_PARAMS[gate_name][0]:])
n = max(n, m)
return n + 1
def apply_string_gate(gate, reg, cir, c_reg=None):
"""
gate: str, one of the elements in GATE_ARGUMENTS.keys() + a tuple of
arguments. ie, for a rx rotation by pi/2 radians on qubit 0,
gate = "rx(pi/2, 0)".
reg: qiskit.QuantumRegister, register to apply gate to.
cir: qiskit.QuantumCircuit, circuit to add gate to.
c_reg: qiskit.ClassicalRegister, must be supplied if gate is a measurement.
Classical register to measure to.
returns: int, if gate is a measure gate, then return the integer
corresponding to the classical register to measure to,
otherwise returns -1.
"""
gate_name, gate_args = get_gate_info(gate)
# apply gate
eval("cir." + gate_name + GATE_ARGUMENTS[gate_name] % gate_args)
# value of the classical register to measure to
if "measure" in gate: return gate_args[-1]
else: return -1
def apply_string_algorithm(algorithm, reg, cir, c_reg=None):
"""
algorithm: iterable, each element must be a string gate, as in
apply_string_gate above.
ie, algorithm = ["h(0)", "cx(0, 1)", "rx(pi/4, 1)",..]
reg: qiskit.QuantumRegister, register to apply algorithm to.
cir: qiskit.QuantumCircuit, circuit to add gates in algorithm to.
c_reg: qiskit.ClassicalRegister, must be supplied if gate is a measurement.
Classical register to measure to.
returns: int, if the algorithm has any measure gates, then returns the
integer corresponding to the largest index of the classical
register that is measured to, otherwise returns -1.
"""
if not algorithm: return -1
return max(apply_string_gate(gate, reg, cir, c_reg) for gate in algorithm)
def _make_job(qc, backend, num_samples):
"""
Begin the execution of the circuit qc on the backend with shots=num_samples
qc: qiskit.QuantumCircuit or list of circuits, circuits to run.
backend: str, IBM backend to run circuit on. Can be 'ibmqx4', 'ibmqx5',
'local_qasm_simulator', 'local_unitary_simulator', etc.
num_samples: int, number of samples to take from the quantum computer in
in order to determine the probabilities for each state.
returns: qiskit Job object from qiskit.backends.
"""
if backend in simulators: f = qiskit.Aer
else: f = qiskit.IBMQ
try:
return qiskit.execute(qc, backend=f.get_backend(backend),
shots=num_samples, max_credits=3)
except LookupError:
apply_credentials()
return qiskit.execute(qc, backend=f.get_backend(backend),
shots=num_samples, max_credits=3)
class Result(dict):
""" Just a dictionary that automatically gives default values = 0.0 """
def __getitem__(self, key):
""" Return 0.0 if key not in result dictionary """
return self.get(key, 0.0)
def run(algorithm, num_qubits=None, num_samples=8000, backend=simulator):
"""
Create a quantum circuit, run the algorithm, return the resulting
probability distribution.
algorithm: algorithm (list of strings) or list of algorithms,
each string is a gate in GATE_ARGUMENTS.keys() with whatever
arguments required to define the gate.
num_qubits: int, number of qubits to run each algorithm on. Can be None,
in which case the algorithm will be run on the minimum
number of qubits required.
num_samples: int, number of samples to take from the quantum computer in
in order to determine the probabilities for each state.
backend: str, IBM backend to run the algorithm on. If backend is not
a local simulator then credentials must have already
been applied.
returns: dict (common.Result), keys are states, values are probabilities
found to be in that state.
"""
multiple = bool(algorithm and isinstance(algorithm[0], list))
if not multiple: algorithm = [algorithm]
n = len(algorithm)
if num_qubits is None:
num_qubits = max(get_num_qubits(a) for a in algorithm)
q = qiskit.QuantumRegister(num_qubits)
c = [qiskit.ClassicalRegister(num_qubits) for _ in range(n)]
qc = [qiskit.QuantumCircuit(q, c[j]) for j in range(n)]
for j in range(n):
i = apply_string_algorithm(algorithm[j], q, qc[j], c[j])
if i == -1: qc[j].measure(q, c[j])
else: c[j].size = i + 1
job_exp = _make_job(qc, backend, num_samples)
# Often there are random queue errors that have happened to
# me that cause the job to never complete. Two things I have
# encountered: I lose connection or something, and I get an
# error, or for some reason their server tells me that the
# job is running indefinitely, ie it just get stuck running.
# So if either of those things happen, we reset and
# reinitialize our job(s) into the queue.
if backend not in simulators:
lapse, interval = 0, 30
done = False
while not done:
str_status = str(job_exp.status())
queue_position = job_exp.queue_position()
error = job_exp.error_message()
print('\nStatus @ %d seconds' % (interval * lapse))
print("queue position =", queue_position)
print(str_status)
done = queue_position is not None and queue_position < 1
if error:
print("\nEncountered an error")
print(error)
print("reentering job into queue\n")
job_exp.cancel()
job_exp = _make_job(qc, backend, num_samples)
lapse = 0
lapse += 1
time.sleep(interval)
res = job_exp.result()
## qiskit orders their bits opposite to Cirq nad ProjectQ, and in my
## opinion in a much less intuitive way. So I flip the order of the bits
## here.
if multiple:
return [
Result(
{k[::-1]: v/num_samples
for k, v in res.get_counts(cir).items()}
) for cir in qc
]
else:
return Result(
{k[::-1]: v/num_samples for k, v in res.get_counts(qc[0]).items()}
)
def algorithm_unitary(algorithm, num_qubits=None):
"""
Find the unitary corresponding to the algorithm.
algorithm: list of strings, each string is a gate in GATE_ARGUMENTS.keys()
with whatever arguments required to define the
gate.
num_qubits: int, number of qubits to run the algorithm on.
returns: np.array, unitary matrix corresponding to the algorithm.
"""
if num_qubits is None: num_qubits = get_num_qubits(algorithm)
if not algorithm: algorithm = ["iden(0)"]
## qiskit orders their bits opposite to Cirq nad ProjectQ, and in my
## opinion in a much less intuitive way. So I flip the order of the bits
## here.
a = []
for gate in algorithm:
gate_name, gate_args = get_gate_info(gate)
i = GATE_PARAMS[gate_name][0]
| |
or -n flags.', 'the -s and -n flags will be handled internally based on the alignment you input.')
except ValueError, (ErrorTitle, ErrorMessage, ErrorDescription):
self.message(str(ErrorTitle), str(ErrorMessage), str(ErrorDescription))
return
self.raxmlOperations.raxml_species_tree(self.raxmlOperations.inputFilename, rooted=self.raxmlOperations.speciesTreeRooted, outgroup=self.raxmlOperations.speciesTreeOutGroup, customRax=self.raxmlOperations.speciesTreeUseCustomRax, customRaxCommand=self.raxmlOperations.speciesTreeCustomRaxmlCommand)
def requestedFigures(self):
requestedFigures = set()
if self.checkboxAllTrees.isChecked():
requestedFigures.add('Top Topologies Tree Visualization')
if self.checkboxScatterPlot.isChecked():
requestedFigures.add('Windows to Top Topologies Scatter Plot')
if self.checkboxDonutPlot.isChecked():
requestedFigures.add('Top Topology Frequency Donut Plot')
if self.checkboxWindowsToInfSites.isChecked():
requestedFigures.add('Windows to Informative Sites Line Graph')
if self.checkboxHeatMap.isChecked():
requestedFigures.add('Informative Sites Heat Map')
if self.checkboxRobinsonFoulds.isChecked():
requestedFigures.add('Robinson Foulds')
if self.checkboxPGTST.isChecked():
requestedFigures.add('p(GT | ST))')
return requestedFigures
def generateHeatmap(self):
self.updatedDisplayWindows()
def generateFigures(self):
if self.runComplete:
if self.raxmlInputErrorHandling():
self.figuresToBeRegenerated = self.prevGeneratedFigures.intersection(self.requestedFigures())
if len(self.figuresToBeRegenerated) > 0:
# execute window
regen = self.question("Regenerate Figures?", "You have selected figures which have previously been generated. All selected figures will be generated. Are you sure you want to proceed?")
# if the user selected the 'ok' button
if regen == QtGui.QMessageBox.Yes:
# start raxml operations thread
self.updatedDisplayWindows()
# if raxml hasn't been run before just run it
else:
self.updatedDisplayWindows()
def updatedDisplayWindows(self):
# run commands that are shared by all functions
if self.getNumberChecked() > 0:
num = self.topTopologies
topologies_to_counts, unique_topologies_to_newicks = self.topologyPlotter.topology_counter(rooted=self.rooted, outgroup=self.outgroupComboBox.currentText())
self.numberOfUniqueTopologiesLabel.setText(str(len(topologies_to_counts)))
if num > len(topologies_to_counts):
num = len(topologies_to_counts)
self.topologyPlotter.num = num
list_of_top_counts, labels, sizes = self.topologyPlotter.top_freqs(num, topologies_to_counts)
top_topologies_to_counts = self.topologyPlotter.top_topologies(num, topologies_to_counts)
windows_to_top_topologies, top_topologies_list = self.topologyPlotter.windows_to_newick(top_topologies_to_counts, unique_topologies_to_newicks, rooted=self.rooted, outgroup=self.outgroupComboBox.currentText()) # all trees, scatter, circle, donut
topologies_to_colors, scatter_colors, ylist = self.topologyPlotter.topology_colors(windows_to_top_topologies, top_topologies_list) # scatter, circle, (donut?)
# generate robinson foulds and pgtst graphs
if self.checkboxRobinsonFoulds.isChecked():
self.prevGeneratedFigures.add('<NAME>')
if self.checkboxWeighted.isChecked():
windows_to_w_rf, windows_to_uw_rf = self.statisticsCalculations.calculate_windows_to_rf(self.speciesTree, self.checkboxWeighted.isChecked())
self.robinsonFouldsWindow = robinsonFouldsWindow.RobinsonFouldsWindow('Weighted Robinson Foulds Distance', windows_to_w_rf, 'Unweighted Robinson Foulds Distance', windows_to_uw_rf)
else:
windows_to_uw_rf = self.statisticsCalculations.calculate_windows_to_rf(self.speciesTree, self.checkboxWeighted.isChecked())
self.robinsonFouldsWindow = robinsonFouldsWindow.RobinsonFouldsWindow('Unweighted Robinson Foulds Distance', windows_to_uw_rf)
if self.checkboxPGTST.isChecked():
self.prevGeneratedFigures.add('p(GT | ST)')
windowsToPGTST = self.statisticsCalculations.calculate_windows_to_p_gtst(self.speciesTree)
self.pgtstWindow = pgtstWindow.PGTSTWindow(windowsToPGTST, "p(gt|st)", xLabel="Windows", yLabel="Probability")
# generate donut plot
if self.checkboxDonutPlot.isChecked():
self.prevGeneratedFigures.add('Top Topology Frequency Donut Plot')
donut_colors = self.topologyPlotter.donut_colors(top_topologies_to_counts, topologies_to_colors) # donut
self.donutPlotWindow = donutPlotWindow.DonutPlotWindow('Frequency of Top Topologies', labels, sizes, donut_colors)
# generate scatter plot
if self.checkboxScatterPlot.isChecked():
self.prevGeneratedFigures.add('Windows to Top Topologies Scatter Plot')
self.scatterPlotWindow = scatterPlotWindow.ScatterPlotWindow('Windows to Top Topologies', windows_to_top_topologies, scatter_colors, ylist)
# generate informative sites heatmap graph
if self.checkboxHeatMap.isChecked():
self.prevGeneratedFigures.add('Informative Sites Heat Map')
sites_to_informative, windows_to_informative_count, windows_to_informative_pct, pct_informative = self.informativeSites.calculate_informativeness('windows', 0, self.heatmapPercentage.text(),alignment=self.inputFileEntry.text())
self.heatMapWindow = heatMapWindow.HeatMapWindow('Heat Map', sites_to_informative)
# generate windows to informative sites line graph
if self.checkboxWindowsToInfSites.isChecked():
self.prevGeneratedFigures.add('Windows to Informative Sites Line Graph')
sites_to_informative, windows_to_informative_count, windows_to_informative_pct, pct_informative = self.informativeSites.calculate_informativeness('windows', self.raxmlOperations.windowOffset)
self.windowsToInfSitesWindow = windowsToInfSitesWindow.WindowsToInfSitesWindow('Windows to Informative Sites', windows_to_informative_pct)
# generate bootstrap graph
if self.checkboxBootstrap.isChecked():
internal_nodes_i, internal_nodes_f = self.bootstrapContraction.internal_nodes_after_contraction(self.confidenceLevel)
self.bootstrapContractionWindow = bootstrapContractionWindow.BootstrapContractionWindow(internal_nodes_i, internal_nodes_f, self.confidenceLevel, xLabel="Window Indices", yLabel="Number of Internal Nodes")
# generate all trees graph
if self.checkboxAllTrees.isChecked():
self.prevGeneratedFigures.add('Top Topologies Tree Visualization')
self.allTreesWindow = allTreesWindow.AllTreesWindow('', topologies_to_colors, topologies_to_counts, rooted=self.checkboxRooted.isChecked(), outGroup=self.outgroupComboBox.currentText())
def raxmlInputErrorHandling(self):
"""
returns true if all tests pass otherwise false
"""
try:
# input alignment for raxml
self.raxmlOperations.inputFilename = self.checkEntryPopulated(self.inputFileEntry, errorTitle='Missing Alignment', errorMessage='Please select an alignment.')
self.raxmlOperations.windowSize = self.checkEntryInRange(self.windowSizeEntry, min=0, inclusive=False, errorTitle='Invalid Window Size', errorMessage='Window size needs to be a positive integer.')
self.raxmlOperations.windowOffset = self.checkEntryInRange(self.windowOffsetEntry, min=0, inclusive=False, errorTitle='Invalid Window Offset', errorMessage='Window offset needs to be a positive integer.')
self.raxmlOperations.outGroup = self.outgroupComboBox.currentText()
self.raxmlOperations.model = self.modelComboBox.currentText()
self.raxmlOperations.isCustomRaxmlCommand = self.checkBoxCustomRaxml.isChecked()
self.raxmlOperations.bootstrap = self.checkboxBootstrap.isChecked()
self.raxmlOperations.rooted = self.checkboxRooted.isChecked()
self.rooted = self.checkboxRooted.isChecked()
# if user is generating Top Topologies or scatter plot or donut plor or circle graph run error handling on top topologies entry
if self.checkboxAllTrees.isChecked() or self.checkboxScatterPlot.isChecked() or self.checkboxDonutPlot.isChecked():
self.checkEntryPopulated(self.numberOfTopTopologiesEntry, errorTitle='Number of Top Topologies Field is Blank', errorMessage='Please enter a number of top topologies.')
self.topTopologies = self.checkEntryInRange(self.numberOfTopTopologiesEntry, min=0, max=16, inclusive=False, errorTitle='Invalid Number of Top Topologies', errorMessage='Please enter an integer between 0 and 15.')
# bootstrap error handling
self.raxmlOperations.numBootstraps = 0
if self.checkboxBootstrap.isChecked():
self.confidenceLevel = self.checkEntryInRange(self.confidenceLevelEntry, min=0, max=100, errorTitle='Invalid Confidence Level', errorMessage='Please enter an integer between 0 and 100.')
self.raxmlOperations.numBootstraps = self.checkEntryInRange(self.numberOfBootstrapsEntry, min=2, errorTitle='Invalid Number of Bootstraps', errorMessage='Please enter an integer greater than 1.')
# if using custom rax -- make sure that the user doesn't use the -s or -n flags
if self.checkBoxCustomRaxml.isChecked():
self.raxmlOperations.customRaxmlCommand = self.checkEntryPopulated(self.customRaxmlCommandEntry, errorTitle='No RAxML Command', errorMessage='Please enter a custom raxml command or uncheck the box.')
if re.search('([\-][n])|([\-][s])', self.customRaxmlCommandEntry.text()):
raise ValueError, ('Invalid RAxML Command', 'Please do not specify the -s or -n flags.', 'the -s and -n flags will be handled internally based on the alignment you input.')
# species tree error handling
if self.speciesTreeEntry.text() != "" and self.newickFileEntry.text() != "":
raise ValueError, ('Multiple Species Trees', 'You have both selected a species tree file and entered a species tree. Please only do one.', 'Both the "Species Tree File and "Enter Species Tree" fields are populated. Please only use one.')
# if the user selects either statistic plot -- open the inputted newick and read it into memory as a string on a single line
if self.checkboxRobinsonFoulds.isChecked() or self.checkboxPGTST.isChecked():
if self.newickFileEntry.text() != "":
self.newickFileName = self.checkEntryPopulated(self.newickFileEntry, errorTitle='Missing Species Tree', errorMessage='Please select a species tree.', errorDescription='Please select a species tree.')
with open(self.newickFileEntry.text(), 'r') as f:
self.speciesTree = f.read().replace('\n', '')
else:
self.speciesTree = self.checkEntryPopulated(self.speciesTreeEntry, errorTitle='Missing Species Tree', errorMessage='Please select a species tree.', errorDescription='Please select a species tree.')
except ValueError, (ErrorTitle, ErrorMessage, ErrorDescription):
self.message(str(ErrorTitle), str(ErrorMessage), str(ErrorDescription))
return False
return True
def runRAxML(self):
# if all error handling passes run RAxML
if self.raxmlInputErrorHandling():
# if rax has been run previously, ask the user to confirm that they want to rerun
if self.runComplete:
rerunRax = self.question("Rerun RAxML?", "Are you sure you want to rerun RAxML?")
# if the user selected the 'ok' button
if rerunRax == QtGui.QMessageBox.Yes:
# start raxml operations thread
self.raxmlOperations.start()
# if raxml hasn't been run before just run it
else:
# start raxml operations thread
self.raxmlOperations.start()
def raxmlComplete(self):
topologies_to_counts, unique_topologies_to_newicks = self.topologyPlotter.topology_counter(rooted=self.rooted, outgroup=self.outgroupComboBox.currentText())
self.numberOfUniqueTopologiesLabel.setText(str(len(topologies_to_counts)))
self.runBtn.setText("Rerun RAxML")
self.generateFiguresWrapper.setToolTip("")
self.generateFiguresWrapper.setEnabled(True)
self.progressBar.setValue(100)
self.runComplete = True
# **************************** ABSTRACT ****************************#
def message(self, title, description, extraInfo, type='Err'):
"""
creates and displays and window displaying the message
"""
# create object
errMessage = QtGui.QMessageBox()
# set text
errMessage.setText(title)
errMessage.setInformativeText(description)
errMessage.setDetailedText(extraInfo)
# default pixmap for error
pixmap = QtGui.QPixmap('imgs/warning.png')
# set icon
errMessage.setIconPixmap(pixmap)
# execute window
errMessage.exec_()
def question(self, title, description, type='Question'):
"""
creates and displays and window displaying the message
"""
# create object
qMessage = QtGui.QMessageBox()
# set text
qMessage.setText(title)
qMessage.setInformativeText(description)
# default pixmap for error
pixmap = QtGui.QPixmap('imgs/warning.png')
# set icon
qMessage.setIconPixmap(pixmap)
qMessage.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)
# execute window
return qMessage.exec_()
def checkEntryPopulated(self, entry, errorTitle='Field Not Populated', errorMessage='Please populate field.', errorDescription=None):
"""
checks if given entry is empty or not.
(i) if entry is populated returns text
(ii) otherwise raises value error
"""
# if user does not provide an error description generate one automatically
if not errorDescription:
errorDescription = 'relevant entry name: ' + str(entry.objectName())
text = str(entry.text())
if text == '':
raise ValueError(errorTitle, errorMessage, errorDescription)
return text
def checkEntryInRange(self, entry, min=(-1.0 * float('inf')), max=float('inf'), inclusive=True, errorTitle='Entry Out Of Range', errorMessage='', errorDescription=None):
"""
checks if value of given entry is in range.
i. if entry is in given range return it
ii. otherwise raises value error
"""
# if user does not provide an error description generate one automatically
if not errorDescription:
errorDescription = 'relevant entry name: ' + str(entry.objectName())
# check to make sure the entry is populated
if entry.text() != '':
val = float(int(float(entry.text())))
else:
raise ValueError, (errorTitle, errorMessage, errorDescription)
# check to make sure value is in range
if inclusive:
if val < min or val > max:
raise ValueError, (errorTitle, errorMessage, errorDescription)
else:
if val <= min or val >= max:
raise ValueError, (errorTitle, errorMessage, errorDescription)
return int(val)
def updateTaxonComboBoxes(self, comboBoxes, textEntry, require4Taxons=False):
"""
input:
i. comboBoxes - a list of comboBox widgets (drop down menus)
ii. textEntry - a text entry widget
iii. errHandling=False - a boolean indicating whether or not to require that there be exactly four taxons in the file in the text entry
gets a list of taxons from the file in textEntry and sets the items in a list of combo boxes to that list of taxons.
"""
try:
if textEntry.text() == "":
return
# get list of taxon names from file
taxonNames = list(self.raxmlOperations.taxon_names_getter(textEntry.text()))
if | |
<gh_stars>1-10
import numpy as np
import scipy.optimize
from .c_fastpixelint import pixelintegrate
from .integrate import radintpix, azimintpix, radint_nsector
from .. import misc
from ..misc.basicfit import findpeak_asymmetric
from ..misc.errorvalue import ErrorValue
def findbeam_gravity(data, mask):
"""Find beam center with the "gravity" method
Inputs:
data: scattering image
mask: mask matrix
Output:
a vector of length 2 with the x (row) and y (column) coordinates
of the origin, starting from 1
"""
# for each row and column find the center of gravity
data1 = data.copy() # take a copy, because elements will be tampered with
data1[mask == 0] = 0 # set masked elements to zero
# vector of x (row) coordinates
x = np.arange(data1.shape[0])
# vector of y (column) coordinates
y = np.arange(data1.shape[1])
# two column vectors, both containing ones. The length of onex and
# oney corresponds to length of x and y, respectively.
onex = np.ones_like(x)
oney = np.ones_like(y)
# Multiply the matrix with x. Each element of the resulting column
# vector will contain the center of gravity of the corresponding row
# in the matrix, multiplied by the "weight". Thus: nix_i=sum_j( A_ij
# * x_j). If we divide this by spamx_i=sum_j(A_ij), then we get the
# center of gravity. The length of this column vector is len(y).
nix = np.dot(x, data1)
spamx = np.dot(onex, data1)
# indices where both nix and spamx is nonzero.
goodx = ((nix != 0) & (spamx != 0))
# trim y, nix and spamx by goodx, eliminate invalid points.
nix = nix[goodx]
spamx = spamx[goodx]
# now do the same for the column direction.
niy = np.dot(data1, y)
spamy = np.dot(data1, oney)
goody = ((niy != 0) & (spamy != 0))
niy = niy[goody]
spamy = spamy[goody]
# column coordinate of the center in each row will be contained in
# ycent, the row coordinate of the center in each column will be
# in xcent.
ycent = nix / spamx
xcent = niy / spamy
# return the mean values as the centers.
return [xcent.mean(), ycent.mean()]
def findbeam_slices(data, orig_initial, mask=None, maxiter=0, epsfcn=0.001,
dmin=0, dmax=np.inf, sector_width=np.pi / 9.0, extent=10, callback=None):
"""Find beam center with the "slices" method
Inputs:
data: scattering matrix
orig_initial: estimated value for x (row) and y (column)
coordinates of the beam center, starting from 1.
mask: mask matrix. If None, nothing will be masked. Otherwise it
should be of the same size as data. Nonzero means non-masked.
maxiter: maximum number of iterations for scipy.optimize.leastsq
epsfcn: input for scipy.optimize.leastsq
dmin: disregard pixels nearer to the origin than this
dmax: disregard pixels farther from the origin than this
sector_width: width of sectors in radians
extent: approximate distance of the current and the real origin in pixels.
Too high a value makes the fitting procedure unstable. Too low a value
does not permit to move away the current origin.
callback: callback function (expects no arguments)
Output:
a vector of length 2 with the x (row) and y (column) coordinates
of the origin.
"""
if mask is None:
mask = np.ones(data.shape)
data = data.astype(np.double)
def targetfunc(orig, data, mask, orig_orig, callback):
# integrate four sectors
I = [None] * 4
p, Ints, A = radint_nsector(data, None, -1, -1, -1, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask=mask,
phi0=np.pi / 4 - 0.5 * sector_width, dphi=sector_width,
Nsector=4)
minpix = max(max(p.min(0).tolist()), dmin)
maxpix = min(min(p.max(0).tolist()), dmax)
if (maxpix < minpix):
raise ValueError('The four slices do not overlap! Please give a\
better approximation for the origin or use another centering method.')
for i in range(4):
I[i] = Ints[:, i][(p[:, i] >= minpix) & (p[:, i] <= maxpix)]
ret = ((I[0] - I[2]) ** 2 + (I[1] - I[3]) ** 2) / (maxpix - minpix)
if callback is not None:
callback()
return ret
orig = scipy.optimize.leastsq(targetfunc, np.array([extent, extent]),
args=(data, 1 - mask.astype(np.uint8),
np.array(orig_initial) - extent, callback),
maxfev=maxiter, epsfcn=0.01)
return orig[0] + np.array(orig_initial) - extent
def findbeam_azimuthal(data, orig_initial, mask=None, maxiter=100, Ntheta=50,
dmin=0, dmax=np.inf, extent=10, callback=None):
"""Find beam center using azimuthal integration
Inputs:
data: scattering matrix
orig_initial: estimated value for x (row) and y (column)
coordinates of the beam center, starting from 1.
mask: mask matrix. If None, nothing will be masked. Otherwise it
should be of the same size as data. Nonzero means non-masked.
maxiter: maximum number of iterations for scipy.optimize.fmin
Ntheta: the number of theta points for the azimuthal integration
dmin: pixels nearer to the origin than this will be excluded from
the azimuthal integration
dmax: pixels farther from the origin than this will be excluded from
the azimuthal integration
extent: approximate distance of the current and the real origin in pixels.
Too high a value makes the fitting procedure unstable. Too low a value
does not permit to move away the current origin.
callback: callback function (expects no arguments)
Output:
a vector of length 2 with the x and y coordinates of the origin,
starting from 1
"""
if mask is None:
mask = np.ones(data.shape)
data = data.astype(np.double)
def targetfunc(orig, data, mask, orig_orig, callback):
def sinfun(p, x, y):
return (y - np.sin(x + p[1]) * p[0] - p[2]) / np.sqrt(len(x))
t, I, a = azimintpix(data, None, orig[
0] + orig_orig[0], orig[1] + orig_orig[1], mask.astype('uint8'), Ntheta, dmin, dmax)
if len(a) > (a > 0).sum():
raise ValueError('findbeam_azimuthal: non-complete azimuthal average, please consider changing dmin, dmax and/or orig_initial!')
p = ((I.max() - I.min()) / 2.0, t[I == I.max()][0], I.mean())
p = scipy.optimize.leastsq(sinfun, p, (t, I))[0]
# print "findbeam_azimuthal: orig=",orig,"amplitude=",abs(p[0])
if callback is not None:
callback()
return abs(p[0])
orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]),
args=(data, 1 - mask, np.array(orig_initial) - extent,
callback), maxiter=maxiter, disp=0)
return orig1 + np.array(orig_initial) - extent
def findbeam_azimuthal_fold(data, orig_initial, mask=None, maxiter=100,
Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None):
"""Find beam center using azimuthal integration and folding
Inputs:
data: scattering matrix
orig_initial: estimated value for x (row) and y (column)
coordinates of the beam center, starting from 1.
mask: mask matrix. If None, nothing will be masked. Otherwise it
should be of the same size as data. Nonzero means non-masked.
maxiter: maximum number of iterations for scipy.optimize.fmin
Ntheta: the number of theta points for the azimuthal integration.
Should be even!
dmin: pixels nearer to the origin than this will be excluded from
the azimuthal integration
dmax: pixels farther from the origin than this will be excluded from
the azimuthal integration
extent: approximate distance of the current and the real origin in pixels.
Too high a value makes the fitting procedure unstable. Too low a value
does not permit to move away the current origin.
callback: callback function (expects no arguments)
Output:
a vector of length 2 with the x and y coordinates of the origin,
starting from 1
"""
if Ntheta % 2:
raise ValueError('Ntheta should be even!')
if mask is None:
mask = np.ones_like(data).astype(np.uint8)
data = data.astype(np.double)
# the function to minimize is the sum of squared difference of two halves of
# the azimuthal integral.
def targetfunc(orig, data, mask, orig_orig, callback):
I = azimintpix(data, None, orig[
0] + orig_orig[0], orig[1] + orig_orig[1], mask, Ntheta, dmin, dmax)[1]
if callback is not None:
callback()
return np.sum((I[:Ntheta / 2] - I[Ntheta / 2:]) ** 2) / Ntheta
orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]),
args=(data, 1 - mask, np.array(orig_initial) - extent, callback), maxiter=maxiter, disp=0)
return orig1 + np.array(orig_initial) - extent
def findbeam_semitransparent(data, pri, threshold=0.05):
"""Find beam with 2D weighting of semitransparent beamstop area
Inputs:
data: scattering matrix
pri: list of four: [xmin,xmax,ymin,ymax] for the borders of the beam
area under the semitransparent beamstop. X corresponds to the column
index (ie. A[Y,X] is the element of A from the Xth column and the
Yth row). You can get these by zooming on the figure and retrieving
the result of axis() (like in Matlab)
threshold: do not count pixels if their intensity falls below
max_intensity*threshold. max_intensity is the highest count rate
in the current row or column, respectively. Set None to disable
this feature.
Outputs: bcx,bcy
the x and y coordinates | |
datnum: int = None
# threshold: float = None
# param: str = None
# param_values: np.ndarray = None
# zscores: np.ndarray = None
# accepted_rows: np.ndarray = None
# rejected_rows: np.ndarray = None
# x: np.ndarray = None
# avg_data: np.ndarray = None
# avg_data_std: np.ndarray = None
# avg_fit: DA.FitInfo = None
# original_fit: DA.FitInfo = None
#
#
# def calculate_careful_fit(dat: DatHDF, thresh: Optional[float] = None, param: str = 'theta') -> CarefulFit:
# """
# Fills a CarefulFit dataclass based on looking at individual fits (already calculated in dat.Transition...) to
# determine which rows of data lie within 'thresh' s.d. of the average based on the 'param' fit parameter.
#
# Args:
# dat (DatHDF): Dat to work on
# thresh (Optional[float]): The number of s.d. for the cutoff between accept/reject
# param (str): Which parameter of dat.Transition to base the filtering on
#
# Returns:
# CarefulFit: Dataclass with info including accepted/rejected rows etc... Should be storeable in DatHDF
# """
# if thresh is None:
# thresh = 1
# assert param in ['theta', 'mid', 'const', 'amp', 'g', 'lin']
#
# info = CarefulFit()
# info.param = param
# info.threshold = thresh
# info.datnum = dat.datnum
# info.original_fit = dat.Transition.avg_fit
#
# info.param_values = np.array([getattr(f.best_values, param) for f in dat.Transition.all_fits], dtype=np.float32)
# info.zscores = np.abs(zscore(info.param_values, nan_policy='omit'))
#
# # Accept
# info.accepted_rows = np.where(info.zscores < info.threshold)
# mids = np.array([f.best_values.mid for f in np.array(dat.Transition.all_fits)[info.accepted_rows]],
# dtype=np.float32)
# data = dat.Transition.data[info.accepted_rows]
# info.x = dat.Transition.x
# info.avg_data, info.avg_data_std = CU.mean_data(info.x, data, mids, return_std=True)
# info.avg_fit = DA.FitInfo().from_fit(T.transition_fits(info.x, info.avg_data, auto_bin=True)[0])
#
# # Reject
# info.rejected_rows = np.where(np.logical_or(info.zscores > info.threshold, np.isnan(info.zscores)))
# return info
#
#
#
#
# import plotly.graph_objects as go
# import plotly.express as px
# from dat_analysis.dat_object.DatHDF import DatHDF
#
#
# class Plots:
# @staticmethod
# def _adjust_lightness(color: str, adj: float = -0.3):
# from dat_analysis.plotting.Mpl.PlotUtil import adjust_lightness
# alpha = 1
# if color[0] == '#':
# c = color
# elif color[:4] == 'rgba':
# full_c = color[5:-1].split(',')
# c = [float(sc) / 255 for sc in full_c[0:3]]
# alpha = full_c[3]
# elif color[:3] == 'rgb':
# c = [float(sc) / 255 for sc in color[4:-1].split(',')]
# else:
# raise ValueError(f'Could not interpret {color} as a color')
#
# c = adjust_lightness(c, adj)
# c = [int(np.floor(sc * 255)) for sc in c]
# full_c = f'rgba({c[0]},{c[1]},{c[2]},{alpha})'
# return full_c
#
# @staticmethod
# def _make_hover_template(base_template='Datnum: %{customdata[0]}<br>', x_label='x=%{x:.2f}', y_label='y=%{y:.2f}',
# additional_template: Optional[str] = None):
# """
# Combines the base and additional_template (if passed) and returns a single template which can be used in plotly
# traces
# Args:
# base_template (str): Template string for plotly hover data... e.g. "%{customdata[i]:.3f}<br>{...}"
# additional_template (Optional[str]): Same as above, but optional
#
# Returns:
# str: Combined template
# """
# hover_template = base_template + x_label + '<br>' + y_label
# if isinstance(additional_template, str):
# hover_template = hover_template + '<br>' + additional_template
# return hover_template
#
# @staticmethod
# def _make_customdata_getter(base_customdata: Union[List[Callable], Callable] = lambda dat: dat.datnum,
# additional_customdata: Optional[Union[List[Callable], Callable]] = None):
# """
# Combines base an additional custom data getters, and returns a full list of customdata getters
# Args:
# base_customdata (Union[List[Callable], Callable]): Callable(s) to get data from DatHDF objects
# additional_customdata (Optional[Union[List[Callable], Callable]]): Same but optional
#
# Returns:
# List[Callable]: Full list of Callables which will get all custom data from DatHDF object
# """
# customdata_getters = CU.ensure_list(base_customdata)
# if callable(additional_customdata): # If only passed in one Callable, then just turn it into a list
# additional_customdata = [additional_customdata]
# if isinstance(additional_customdata, list):
# customdata_getters.extend(additional_customdata)
# return customdata_getters
#
# @staticmethod
# def _get_customdata(customdata_getters: List[Callable],
# dat: Union[List[DatHDF], DatHDF],
# data: Optional[np.ndarray] = None):
# """
# Gets an array of customdata which matches the shape of data
# Args:
# customdata_getters (List[Callable]): List of Callables which can get data from a DatHDF object
# dat: Either single Dat or Dats... If using single Dat, data must be provided too to get the shape
# of customdata right
# data (Optional[np.ndarray]): This required for matching the shape of the data being plotted (customdata has
# to have matching shape)
#
# Returns:
# np.ndarray: Array of customdata which matches shape of data
# """
# if not type(dat) == list:
# assert data is not None
# customdata = np.tile(np.array([f(dat) for f in customdata_getters]),
# (data.shape[-1], 1)) # Same outer dim as data
# else:
# customdata = np.array([[f(d) for f in customdata_getters] for d in dat])
# return customdata
#
# @staticmethod
# def sorted(dats: List[DatHDF], which_sort, which_x, which_y, sort_array=None, sort_tol=None, mode='markers',
# uncertainties='fill', fig=None, get_additional_data=None, additional_hover_template=None,
# legend_label=None):
# """
# Returns a plotly figure with multiple traces. Which_sort determines how data is grouped together.
# Args:
# dats (List[DatHDF]): Dats can be multi part (This will ignore anything but part_1)
# which_sort (str): What to sort by
# which_x (str): What to use on x_axis
# which_y (str): What to use on y_axis
# sort_array (Optional[Union[list, tuple, np.ndarray]]): List of values to sort by
# sort_tol (Optional[float]): How close the value from the dat has to be to the sort_array values
# mode (str): Whether to use markers and/or lines (e.g. 'markers+lines')
# uncertainties (Optional[str]): Whether to add uncertainties to plot. Can be (None, 'fill', 'bars')
# fig (go.Figure): Modify the figure passed in
#
# Returns:
# go.Figure: plotly figure with traces
# """
#
# SORT_KEYS = ('lct', 'lct/0.16', 'lct/0.196', 'any_lct', 'temp', 'field', 'hqpc', 'rcb', 'lcb', 'freq', 'lp*2')
# X_KEYS = list(SORT_KEYS) + ['time', 'time_single_day']
# Y_KEYS = ('fit_ds', 'int_ds', 'dt', 'data_minus_fit', 'dmf', 'data_minus_fit_scaled', 'dmfs', 'amp', 'g')
#
# colors = px.colors.DEFAULT_PLOTLY_COLORS # Have to specify so I can add error fill with the same background color
#
# which_sort, which_x, which_y = which_sort.lower(), which_x.lower(), which_y.lower()
#
# if which_sort not in SORT_KEYS:
# raise ValueError(f'Which_sort must be one of: {SORT_KEYS}')
# if which_x not in X_KEYS:
# raise ValueError(f'Which_x must be one of: {X_KEYS}')
# if which_y not in Y_KEYS:
# raise ValueError(f'Which_y must be one of: {Y_KEYS}')
#
# if which_sort == 'temp':
# name = 'Temp'
# units = 'mK'
# get_val = lambda dat: dat.Logs.temps.mc * 1000
# tol = 10
# elif which_sort == 'field':
# name = 'Field'
# units = 'mT'
# # array = np.linspace(-21, -19, 21)
# get_val = lambda dat: dat.Logs.magy.field
# tol = 0.2
# elif which_sort == 'lct':
# name = 'LCT'
# units = 'mV'
# # array = np.linspace(-460, -380, 5)
# get_val = lambda dat: dat.Logs.fds['LCT']
# tol = 5
# elif which_sort == 'lct/0.16':
# name = 'LCT/0.16'
# units = 'mV'
# # array = np.linspace(-460, -380, 5)
# get_val = lambda dat: dat.Logs.fds['LCT/0.16']
# tol = 1
# elif which_sort == 'lct/0.196':
# name = 'LCT/0.196'
# units = 'mV'
# # array = np.linspace(-460, -380, 5)
# get_val = lambda dat: dat.Logs.fds['LCT/0.196']
# tol = 10
# elif which_sort == 'any_lct':
# from dat_analysis.data_standardize.exp_specific.Sep20 import get_lct_name, get_real_lct
# name = 'LCT real'
# units = 'mV'
# # array = np.linspace(-460, -380, 5)
# get_val = lambda dat: get_real_lct(dat)
# tol = 10
# elif which_sort == 'hqpc':
# name = 'HQPC bias'
# units = 'mV'
# # array = np.linspace(117.5, 122.5, 11)
# get_val = lambda dat: dat.AWG.AWs[0][0][1]
# tol = 0.2
# elif which_sort == 'rcb':
# name = 'RCB'
# units = 'mV'
# get_val = lambda dat: dat.Logs.bds['RCB']
# elif which_sort == 'lcb':
# name = 'LCB (/LCSS)'
# units = 'mV'
# get_val = lambda dat: dat.Logs.fds['LCB']
# elif which_sort == 'freq':
# name = 'Heating Frequency'
# units = 'Hz'
# get_val = lambda dat: dat.AWG.freq
# elif which_sort == 'lp*2':
# name = 'LP*2'
# units = 'mV'
# get_val = lambda dat: dat.Logs.fds['LP*2']
# tol = 1
# else:
# raise ValueError
#
# if sort_tol is not None:
# tol = sort_tol
# if sort_array is not None:
# array = sort_array
# else:
# array = set(CU.my_round(np.array([get_val(dat) for dat in dats]), prec=3, base=tol / 2))
#
# if which_x == 'lct':
# get_x = lambda dat: dat.Logs.fds['LCT']
# x_title = 'LCT /mV'
# elif which_x == 'lct/0.16':
# get_x = lambda dat: dat.Logs.fds['LCT/0.16']
# x_title = 'LCT/0.16 /mV'
# elif which_x == 'lct/0.196':
# get_x = lambda dat: dat.Logs.fds['LCT/0.196']
# x_title = 'LCT/0.196 /mV'
# elif which_x == 'any_lct':
# from dat_analysis.data_standardize.exp_specific.Sep20 import get_lct_name, get_real_lct
# get_x = lambda dat: get_real_lct(dat)
# x_title = 'LCT real /mV'
# elif which_x == 'field':
# get_x = lambda dat: dat.Logs.magy.field
# x_title = 'Field /mT'
# elif which_x == 'temp':
# get_x = lambda dat: dat.Logs.temps.mc * 1000
# x_title = 'MC Temp /mK'
# elif which_x == 'hqpc':
# get_x = lambda dat: dat.AWG.AWs[0][0][1]
# x_title = 'HQPC bias /mV'
# elif which_x == 'time':
# get_x = lambda dat: pd.Timestamp(dat.Logs.time_completed)
# x_title = 'Time'
# elif which_x == 'time_single_day':
# get_x = lambda dat: pd.Timestamp(pd.Timestamp(dat.Logs.time_completed).time().isoformat()) # Has to include date to use time, this just uses todays date
# x_title = 'Time (modulo day)'
# elif which_x == 'rcb':
# get_x = lambda dat: dat.Logs.bds['RCB']
# x_title = 'RCB /mV'
# elif which_x == 'lcb':
# get_x = lambda dat: dat.Logs.fds['LCB']
# x_title = 'LCB /mV'
# elif which_x == 'freq':
# get_x = lambda dat: dat.AWG.freq
# x_title = 'Heating Frequency /Hz'
# elif | |
<gh_stars>0
""" Utility functions used in osxphotos """
import datetime
import fnmatch
import glob
import hashlib
import importlib
import inspect
import logging
import os
import os.path
import pathlib
import platform
import re
import sqlite3
import subprocess
import sys
import unicodedata
import urllib.parse
from plistlib import load as plistload
from typing import Callable, List, Optional, Tuple, Union
from uuid import UUID
import CoreFoundation
import requests
import shortuuid
from ._constants import UNICODE_FORMAT
__all__ = [
"dd_to_dms_str",
"expand_and_validate_filepath",
"get_last_library_path",
"get_system_library_path",
"hexdigest",
"increment_filename_with_count",
"increment_filename",
"lineno",
"list_directory",
"list_photo_libraries",
"load_function",
"noop",
"normalize_fs_path",
"normalize_unicode",
"pluralize",
"shortuuid_to_uuid",
"uuid_to_shortuuid",
]
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s",
)
VERSION_INFO_URL = "https://pypi.org/pypi/osxphotos/json"
def _get_logger():
"""Used only for testing
Returns:
logging.Logger object -- logging.Logger object for osxphotos
"""
return logging.Logger(__name__)
def noop(*args, **kwargs):
"""do nothing (no operation)"""
pass
def lineno(filename):
"""Returns string with filename and current line number in caller as '(filename): line_num'
Will trim filename to just the name, dropping path, if any."""
line = inspect.currentframe().f_back.f_lineno
filename = pathlib.Path(filename).name
return f"{filename}: {line}"
def _get_os_version():
# returns tuple of str containing OS version
# e.g. 10.13.6 = ("10", "13", "6")
version = platform.mac_ver()[0].split(".")
if len(version) == 2:
(ver, major) = version
minor = "0"
elif len(version) == 3:
(ver, major, minor) = version
else:
raise (
ValueError(
f"Could not parse version string: {platform.mac_ver()} {version}"
)
)
return (ver, major, minor)
def _check_file_exists(filename):
"""returns true if file exists and is not a directory
otherwise returns false"""
filename = os.path.abspath(filename)
return os.path.exists(filename) and not os.path.isdir(filename)
def _get_resource_loc(model_id):
"""returns folder_id and file_id needed to find location of edited photo"""
""" and live photos for version <= Photos 4.0 """
# determine folder where Photos stores edited version
# edited images are stored in:
# Photos Library.photoslibrary/resources/media/version/XX/00/fullsizeoutput_Y.jpeg
# where XX and Y are computed based on RKModelResources.modelId
# file_id (Y in above example) is hex representation of model_id without leading 0x
file_id = hex_id = hex(model_id)[2:]
# folder_id (XX) in above example if first two chars of model_id converted to hex
# and left padded with zeros if < 4 digits
folder_id = hex_id.zfill(4)[0:2]
return folder_id, file_id
def _dd_to_dms(dd):
"""convert lat or lon in decimal degrees (dd) to degrees, minutes, seconds"""
""" return tuple of int(deg), int(min), float(sec) """
dd = float(dd)
negative = dd < 0
dd = abs(dd)
min_, sec_ = divmod(dd * 3600, 60)
deg_, min_ = divmod(min_, 60)
if negative:
if deg_ > 0:
deg_ = deg_ * -1
elif min_ > 0:
min_ = min_ * -1
else:
sec_ = sec_ * -1
return int(deg_), int(min_), sec_
def dd_to_dms_str(lat, lon):
"""convert latitude, longitude in degrees to degrees, minutes, seconds as string"""
""" lat: latitude in degrees """
""" lon: longitude in degrees """
""" returns: string tuple in format ("51 deg 30' 12.86\" N", "0 deg 7' 54.50\" W") """
""" this is the same format used by exiftool's json format """
# TODO: add this to readme
lat_deg, lat_min, lat_sec = _dd_to_dms(lat)
lon_deg, lon_min, lon_sec = _dd_to_dms(lon)
lat_hemisphere = "N"
if any([lat_deg < 0, lat_min < 0, lat_sec < 0]):
lat_hemisphere = "S"
lon_hemisphere = "E"
if any([lon_deg < 0, lon_min < 0, lon_sec < 0]):
lon_hemisphere = "W"
lat_str = (
f"{abs(lat_deg)} deg {abs(lat_min)}' {abs(lat_sec):.2f}\" {lat_hemisphere}"
)
lon_str = (
f"{abs(lon_deg)} deg {abs(lon_min)}' {abs(lon_sec):.2f}\" {lon_hemisphere}"
)
return lat_str, lon_str
def get_system_library_path():
"""return the path to the system Photos library as string"""
""" only works on MacOS 10.15 """
""" on earlier versions, returns None """
_, major, _ = _get_os_version()
if int(major) < 15:
logging.debug(
f"get_system_library_path not implemented for MacOS < 10.15: you have {major}"
)
return None
plist_file = pathlib.Path(
str(pathlib.Path.home())
+ "/Library/Containers/com.apple.photolibraryd/Data/Library/Preferences/com.apple.photolibraryd.plist"
)
if plist_file.is_file():
with open(plist_file, "rb") as fp:
pl = plistload(fp)
else:
logging.debug(f"could not find plist file: {str(plist_file)}")
return None
return pl.get("SystemLibraryPath")
def get_last_library_path():
"""returns the path to the last opened Photos library
If a library has never been opened, returns None"""
plist_file = pathlib.Path(
str(pathlib.Path.home())
+ "/Library/Containers/com.apple.Photos/Data/Library/Preferences/com.apple.Photos.plist"
)
if plist_file.is_file():
with open(plist_file, "rb") as fp:
pl = plistload(fp)
else:
logging.debug(f"could not find plist file: {str(plist_file)}")
return None
# get the IPXDefaultLibraryURLBookmark from com.apple.Photos.plist
# this is a serialized CFData object
photosurlref = pl.get("IPXDefaultLibraryURLBookmark")
if photosurlref is not None:
# use CFURLCreateByResolvingBookmarkData to de-serialize bookmark data into a CFURLRef
# pylint: disable=no-member
# pylint: disable=undefined-variable
photosurl = CoreFoundation.CFURLCreateByResolvingBookmarkData(
CoreFoundation.kCFAllocatorDefault, photosurlref, 0, None, None, None, None
)
# the CFURLRef we got is a sruct that python treats as an array
# I'd like to pass this to CFURLGetFileSystemRepresentation to get the path but
# CFURLGetFileSystemRepresentation barfs when it gets an array from python instead of expected struct
# first element is the path string in form:
# file:///Users/username/Pictures/Photos%20Library.photoslibrary/
photosurlstr = photosurl[0].absoluteString() if photosurl[0] else None
# now coerce the file URI back into an OS path
# surely there must be a better way
if photosurlstr is not None:
photospath = os.path.normpath(
urllib.parse.unquote(urllib.parse.urlparse(photosurlstr).path)
)
else:
logging.warning(
"Could not extract photos URL String from IPXDefaultLibraryURLBookmark"
)
return None
return photospath
else:
logging.debug("Could not get path to Photos database")
return None
def list_photo_libraries():
"""returns list of Photos libraries found on the system"""
""" on MacOS < 10.15, this may omit some libraries """
# On 10.15, mdfind appears to find all libraries
# On older MacOS versions, mdfind appears to ignore some libraries
# glob to find libraries in ~/Pictures then mdfind to find all the others
# TODO: make this more robust
lib_list = list_directory(
f"{pathlib.Path.home()}/Pictures/", glob="*.photoslibrary"
)
# On older OS, may not get all libraries so make sure we get the last one
if last_lib := get_last_library_path():
lib_list.append(last_lib)
output = subprocess.check_output(
["/usr/bin/mdfind", "-onlyin", "/", "-name", ".photoslibrary"]
).splitlines()
for lib in output:
lib_list.append(lib.decode("utf-8"))
lib_list = sorted(set(lib_list))
return lib_list
def normalize_fs_path(path: str) -> str:
"""Normalize filesystem paths with unicode in them"""
# macOS HFS+ uses NFD, APFS doesn't normalize but stick with NFD
# ref: https://eclecticlight.co/2021/05/08/explainer-unicode-normalization-and-apfs/
return unicodedata.normalize("NFD", path)
# def findfiles(pattern, path):
# """Returns list of filenames from path matched by pattern
# shell pattern. Matching is case-insensitive.
# If 'path_' is invalid/doesn't exist, returns []."""
# if not os.path.isdir(path):
# return []
# # paths need to be normalized for unicode as filesystem returns unicode in NFD form
# pattern = normalize_fs_path(pattern)
# rule = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
# files = os.listdir(path)
# return [name for name in files if rule.match(name)]
def list_directory(
directory: Union[str, pathlib.Path],
startswith: Optional[str] = None,
endswith: Optional[str] = None,
contains: Optional[str] = None,
glob: Optional[str] = None,
include_path: bool = False,
case_sensitive: bool = False,
) -> List[Union[str, pathlib.Path]]:
"""List directory contents and return list of files or directories matching search criteria.
Accounts for case-insensitive filesystems, unicode filenames. directory can be a str or a pathlib.Path object.
Args:
directory: directory to search
startswith: string to match at start of filename
endswith: string to match at end of filename
contains: string to match anywhere in filename
glob: shell-style glob pattern to match filename
include_path: if True, return full path to file
case_sensitive: if True, match case-sensitively
Returns: List of files or directories matching search criteria as either str or pathlib.Path objects depending on the input type;
returns empty list if directory is invalid or doesn't exist.
"""
is_pathlib = isinstance(directory, pathlib.Path)
if is_pathlib:
directory = str(directory)
if not os.path.isdir(directory):
return []
startswith = normalize_fs_path(startswith) if startswith else None
endswith = normalize_fs_path(endswith) if endswith else None
contains = normalize_fs_path(contains) if contains else None
glob = normalize_fs_path(glob) if glob else None
files = [normalize_fs_path(f) for f in os.listdir(directory)]
if not case_sensitive:
files_normalized = {f.lower(): f for f in files}
files = [f.lower() for f in files]
startswith = startswith.lower() if startswith else None
endswith = endswith.lower() if endswith else None
contains = contains.lower() if contains else None
glob = glob.lower() if glob else None
else:
files_normalized = {f: f for f in files}
if startswith:
files = [f for f in files if f.startswith(startswith)]
if endswith:
endswith = normalize_fs_path(endswith)
files = [f for f in files if f.endswith(endswith)]
if contains:
contains = normalize_fs_path(contains)
files = [f for f in files if contains in f]
if glob:
glob = normalize_fs_path(glob)
flags = re.IGNORECASE if | |
<reponame>rsdeng/mlab
# -*- encoding: utf-8 -*-
#############################################################################
################## awmstools : Common functions for python ###################
##############################################################################
##
## o author: <NAME> (<EMAIL>)
## o created: 2000-04-08T15:52:17+00:00
## o last changed: $Date: 2009-03-24 02:09:50 $
## o license: see file LICENSE
## o keywords: python, helper functions
## o requires: python >= 2.4
## o TODO:
## - get rid of the bogus test failure (doctest: +ELLIPSIS)
## - streamline or cull baroque stuff like reverse
## - cull more stuff and factor into several files
## - saveVars etc. should have `silent` option or so
## - not all functions are tested rigorously yet
## - write a fast merge (kicked old slow recursive one)
##
## Sorted in inverse order of uselessness :) The stuff under EXPERIMENTAL is
## just that: experimental. Expect it not to work, or to disappear or to be
## incompatibly modified in the future. The rest should be fairly stable.
"""A collection of various convenience functions and classes, small utilities
and 'fixes'.
Some just save a little bit of typing (`Result`), others are things that
seem to have been forgotten in the standard libraries (`slurp`,
`binarySearch`, `replaceStrs`) or that have a strange behavior
(`os.path.splitext`). Apart from several general purpose utilities for
lists (`flatten`) iterables in general (`window`, `unique`, `union`,
`group` etc.) there are also more special purpose utilities such as various
handy functions and classes for writing scripts (`DryRun`), for debugging
(`makePrintReturner`) and for meta-programming (`gensym`).
"""
from __future__ import division
__docformat__ = "restructuredtext en"
__revision__ = "$Id: awmstools.py,v 1.29 2009-03-24 02:09:50 aschmolc Exp $"
__version__ = "0.9"
__author__ = "<NAME> <<EMAIL>>"
__test__ = {} # this is for doctest
import bisect
import codecs
import copy
import cPickle
try: from functools import partial # python < 2.5 compatibility
except ImportError: partial = lambda f,*args,**kwargs: lambda *a,**k: f(args+a,update(kwargs,k))
from itertools import *
import inspect
import itertools
import math
import operator
import os
import getpass
import re
import sys
import tempfile
import time
import types
import urllib2
try: from threading import Lock
except ImportError: Lock = lambda: Null
try: any
except NameError: any = lambda xs: bool(some(xs)); all = lambda xs: bool(every(xs))
class _uniqueClass(object):
"""To create a single instance to be used for default values; supports
comparison by-object identity to work around stupid classes that won't allow
comparisons to non-self-instances."""
def __eq__(a,b): return a is b;
def __ne__(a,b): return a is not b
try: # don't redefine on reload
__unique
except NameError:
__unique = _uniqueClass()
if sys.maxint > 1e6*60*60*24*365*100: # see below
# XXX this relies on the GIL & itertools for threadsafety
# but since itertools.count can only count to sys.maxint...
def Counter(start=0):
"""A threadsafe counter that let's you keep counting
for at least 100 years at a rate of 1MHz (if `start`= 0).
"""
return itertools.count(start).next
else:
# ... we also need this more generic version
class Counter(object):
"""A threadsafe counter that let's you keep counting
for at least 100 years at a rate of 10^6/s (if `start`= 0).
"""
def __init__(self, start=0):
self.lock = Lock()
self.count = start
def __call__(self):
try:
self.lock.acquire()
return self.count
finally:
self.count+=1
self.lock.release()
# don't redefine on reload
try:
_count
except NameError:
_count = Counter()
_gensyms = {}
def gensym(prefix="GSYM"):
r"""Returns an string that is valid as a unique python variable name. Useful
when creating functions etc. on the fly. Can be used from multiple threads
and is `reload` safe.
"""
return "%s%d" % (prefix, _count())
__test__['gensym'] = r"""
>>> import awmstools
>>> bak = awmstools._count
>>> awmstools._count = Counter()
>>> gensym()
'GSYM0'
>>> gensym()
'GSYM1'
>>> gensym('FOO')
'FOO2'
>>> import awmstools
>>> reload(awmstools) and None
>>> awmstools._count = bak
"""
# FIXME test threadsafety at least superficially!
#_. FIXES
# Fixes for things in python I'd like to behave differently
def rexGroups(rex):
"""Return the named groups in a regular expression (compiled or as string)
in occuring order.
>>> rexGroups(r'(?P<name>\w+) +(?P<surname>\w+)')
('name', 'surname')
"""
if isinstance(rex,basestring): rex = re.compile(rex)
return zip(*sorted([(n,g) for (g,n) in rex.groupindex.items()]))[1]
class IndexMaker(object):
"""Convinience class to make slices etc. that can be used for creating
indices (mainly because using `slice` is a PITA).
Examples:
>>> range(4)[indexme[::-1]] == range(4)[::-1] == [3, 2, 1, 0]
True
>>> indexme[::-1]
slice(None, None, -1)
>>> indexme[0,:]
(0, slice(None, None, None))
"""
def __getitem__(self, a):
return a
indexme = IndexMaker()
# A shortcut for 'infinite' integer e.g. for slicing: ``seq[4:INFI]`` as
# ``seq[4:len(seq)]`` is messy and only works if `seq` isn't an expression
INFI = sys.maxint
# real infinity
INF = 1e999999
class Result(object):
"""Circumvent python's lack of assignment expression (mainly useful for
writing while loops):
>>> import re
>>> s = 'one 2 three 4 five 6'
>>> findNumber = Result(re.compile('\d+').search)
>>> while findNumber(s):
... match = findNumber.result
... print 'found', `match.group(0)`, 'at position', match.start()
... s = s[match.end():]
...
found '2' at position 4
found '4' at position 7
found '6' at position 6
"""
def __init__(self, func):
self.func = func
def __call__(self,*args,**kwargs):
self.result = self.func(*args,**kwargs)
return self.result
class NullType(object):
r"""Similar to `NoneType` with a corresponding singleton instance `Null`
that, unlike `None` accepts any message and returns itself.
Examples:
>>> Null("send", a="message")(*"and one more")[
... "even index and"].what.you.get.still is Null
True
>>> not Null
True
>>> Null['something']
Null
>>> Null.something
Null
>>> Null in Null
False
>>> hasattr(Null, 'something')
True
>>> Null.something = "a value"
>>> Null.something
Null
>>> Null == Null
True
>>> Null == 3
False
"""
def __new__(cls): return Null
def __call__(self, *args, **kwargs): return Null
## def __getstate__(self, *args): return Null
def __getinitargs__(self):
print "__getinitargs__"
return ('foobar',)
def __getattr__(self, attr): return Null
def __getitem__(self, item): return Null
def __setattr__(self, attr, value): pass
def __setitem__(self, item, value): pass
def __len__(self): return 0
def __iter__(self): return iter([])
def __contains__(self, item): return False
def __repr__(self): return "Null"
Null = object.__new__(NullType)
def div(a,b):
"""``div(a,b)`` is like ``a // b`` if ``b`` devides ``a``, otherwise
an `ValueError` is raised.
>>> div(10,2)
5
>>> div(10,3)
Traceback (most recent call last):
...
ValueError: 3 does not divide 10
"""
res, fail = divmod(a,b)
if fail:
raise ValueError("%r does not divide %r" % (b,a))
else:
return res
def ipshuffle(l, random=None):
r"""Shuffle list `l` inplace and return it."""
import random as _random
_random.shuffle(l, random)
return l
__test__['ipshuffle'] = r'''
>>> l = [1,2,3]
>>> ipshuffle(l, lambda :0.3) is l
True
>>> l
[2, 3, 1]
>>> l = [1,2,3]
>>> ipshuffle(l, lambda :0.4) is l
True
>>> l
[3, 1, 2]
'''
def shuffle(seq, random=None):
r"""Return shuffled *copy* of `seq`."""
if isinstance(seq, list):
return ipshuffle(seq[:], random)
elif isString(seq):
# seq[0:0] == "" or u""
return seq[0:0].join(ipshuffle(list(seq)),random)
else:
return type(seq)(ipshuffle(list(seq),random))
__test__['shuffle'] = r'''
>>> l = [1,2,3]
>>> shuffle(l, lambda :0.3)
[2, 3, 1]
>>> l
[1, 2, 3]
>>> shuffle(l, lambda :0.4)
[3, 1, 2]
>>> l
[1, 2, 3]
'''
# s = open(file).read() would be a nice shorthand -- unfortunately it doesn't
# work (because the file is never properly closed, at least not under
# Jython). Thus:
def _normalizeToFile(maybeFile, mode, expand):
if isinstance(maybeFile, int):
return os.fdopen(maybeFile, mode)
elif isString(maybeFile):
if maybeFile.startswith('http://'): #XXX experimental
return urllib2.urlopen(maybeFile)
else:
if expand:
maybeFile = os.path.expandvars(os.path.expanduser(maybeFile))
return open(maybeFile, mode)
else:
return maybeFile
def slurp(file, binary=False, expand=False):
r"""Read in a complete file `file` as a string
Parameters:
- `file`: a file handle or a string (`str` or `unicode`).
- `binary`: whether to read in the file in binary mode (default: False).
"""
mode = "r" + ["b",""][not binary]
file = _normalizeToFile(file, mode=mode, expand=expand)
try: return file.read()
finally: file.close()
# FIXME write proper tests for IO stuff
def withFile(file, func, mode='r', expand=False):
"""Pass `file` to `func` and ensure the file is closed afterwards. If
`file` is a string, open according to `mode`; if `expand` is true also
expand user and vars.
"""
file = _normalizeToFile(file, mode=mode, expand=expand)
try: return func(file)
finally: file.close()
def slurpLines(file, expand=False):
r"""Read in a complete file (specified by a file handler or a filename
string/unicode string) as list of lines"""
file = _normalizeToFile(file, "r", expand)
try: return file.readlines()
finally: file.close()
def slurpChompedLines(file, expand=False):
r"""Return ``file`` a list of chomped lines. See `slurpLines`."""
f=_normalizeToFile(file, "r", expand)
try: return list(chompLines(f))
finally: f.close()
def strToTempfile(s, suffix=None, prefix=None, dir=None, binary=False):
"""Create a new tempfile, write ``s`` to it and return the filename.
`suffix`, `prefix` and `dir` are like in `tempfile.mkstemp`.
"""
fd, filename = tempfile.mkstemp(**dict((k,v) for (k,v) in
[('suffix',suffix),('prefix',prefix),('dir', dir)]
if v is not None))
spitOut(s, fd, binary)
return filename
def spitOut(s, file, binary=False, expand=False):
r"""Write string `s` into `file` (which can be a string (`str` or
`unicode`) or a `file` instance)."""
mode = "w" + ["b",""][not binary]
file = _normalizeToFile(file, mode=mode, expand=expand)
try: file.write(s)
finally: file.close()
def spitOutLines(lines, file, expand=False):
r"""Write all the `lines` to `file` (which can be | |
import csv
import random
from constants import Transport
from utils import sort_by_value
def merge_dicts(base, to_merge, operation):
""" Merge to_merge dict in base dict applying and operation when keys are the same"""
for k, v in to_merge.items():
if k in base:
base[k] = operation(base[k], v)
else:
base[k] = v
def unmerge_dicts(base, to_unmerge, operation):
""" Unmerge to_unmerge dict from base dicts by applying an operation on values when key is the same"""
for k, v in to_unmerge.items():
if k in base:
base[k] = operation(base[k], v)
def similarity(base, k, v):
if k in base:
compare_value = base[k]
length = min(len(compare_value), len(v))
count = 0
for i in range(length):
if (not compare_value[i].isalnum() and not v[i].isalnum()
) or compare_value[i].upper() == v[i].upper():
count += 1
else:
return -1
return count / max(len(compare_value), len(v))
return 0
class ActivityDataManager(object):
"""Contains only utils classes to manage activity"""
def add_activity(self, time, bytes_count):
time_string = time.replace(microsecond=0).isoformat()
self.activity[time_string] = self.activity.get(time_string,
0) + int(bytes_count)
def merge_activity(self, other_activity):
def sum_fn(v1, v2):
return v1 + v2
merge_dicts(self.activity, other_activity, sum_fn)
class App(object):
"""An App is a container of services. An App corresponds to one device.
It has characteristics for the similarity logic to be applied
An App can be a Web Browser, a mobile App, etc.
"""
def __init__(self):
self.characteristics = {}
self.services = []
# Maps every stream to a service to have fast access
self.stream_to_service = {}
def get_size(self):
return sum(each.get_size() for each in self.services)
def update_app(self, app_args):
"""Add new characteristics and updates them if new characteristic is longer than current"""
for k in app_args:
current_value = self.characteristics.get(k)
new_value = app_args.get(k)
if (not current_value) or (new_value and
len(new_value) > len(current_value)):
self.characteristics[k] = new_value
def process_service_from_new_stream(self, service, time, length,
stream_number):
"""It can create a new service or find an existing one that matches.
It links the stream to the service
If new Service is created, it is added to App services
"""
existing = False
curr_service = service
for each in self.services:
if each.name == service.name:
existing = True
curr_service = each
break
if not existing:
self.services.append(curr_service)
curr_service.add_activity(time, length)
self.stream_to_service[stream_number] = curr_service
return curr_service
def sort_services(self):
s_map = {}
for each_service in self.services:
s_map[each_service] = each_service.get_size()
sort_by_value(self.services, s_map)
class Service(ActivityDataManager):
"""A Service represents a consumption of a Web Service
It can be detected by different methods:
- Url Service: it detects the name of the service based on the url.
For example: www.infobae.com will be 'infobae' service.
This url can be obtained from the header host of HTTP requests or
because of DNS request with some IP as answer and then consuming that IP.
This services have a 'Generic' type.
- DB Service: it is detected by the dataset, because of the url its consuming or
the IP. This services have a specific type.
A Service will be:
- Auhorless Service: if the App that triggered it was not detected
- App Consumed Service: if the App that consumed it was detected (also the device is detected)
Services are the same if they have same name
"""
def __init__(self):
self.activity = {}
self.name = None
self.type = None
self.ips = set()
self.hosts = set()
@classmethod
def from_characteristics(cls, characteristics):
service = cls()
service.name = characteristics.get('name') or 'Unknown'
service.type = characteristics.get('type') or 'Unknown'
return service
@classmethod
def from_service(cls, p_service):
service = cls()
service.name = p_service.name
service.type = p_service.type
service.activity = p_service.activity
service.ips = p_service.ips
service.hosts = p_service.hosts
return service
@classmethod
def from_name(cls, name):
service = cls()
service.name = name
service.type = 'Generic'
return service
@classmethod
def from_ip_only(cls, ip):
service = cls()
service.name = 'Unknown (IP {})'.format(ip)
service.type = 'Generic'
return service
def get_size(self):
return sum([v for k, v in self.activity.items()])
class AuthorlessService(Service):
"""An Authorless Service is a Service that has no App (and no Device) associated
This services are originated because of:
- Encrypted Traffic: TLS traffic or propietary protocols at TCP level
- TCP Traffic that is not HTTP
- UDP packages
- HTTP traffic with no information about the device
"""
def __init__(self):
super().__init__()
# This services can have multiple streams from different devices
# that are consuming this service. For example WhatsApp can be
# used from different devices in same capture
self.activity_per_stream = {Transport.UDP: {}, Transport.TCP: {}}
def add_activity_to_stream(self, protocol, stream, time, bytes_count):
time_string = time.replace(microsecond=0).isoformat()
if stream not in self.activity_per_stream[protocol]:
self.activity_per_stream[protocol][stream] = {}
self.activity_per_stream[protocol][stream][
time_string] = self.activity_per_stream[protocol][stream].get(
time_string, 0) + int(bytes_count)
def remove_activity_from_stream(self, protocol, stream):
def substract_fn(v1, v2):
return v1 - v2
unmerge_dicts(self.activity,
self.activity_per_stream[protocol][stream], substract_fn)
del self.activity_per_stream[protocol][stream]
def is_empty(self):
"""Return True if it has no more streams left.
This occures when all streams could be assigned to an App (and a Device)
"""
return self.activity_per_stream[
Transport.TCP] == {} and self.activity_per_stream[
Transport.UDP] == {}
class Device(ActivityDataManager):
"""A Device is a node of the network that has one unique IP inside LAN
It is a container of Apps.
It has extra activity (not only the App's Services activity)
because it can have activity that does not include Apps or Services, that is,
originated from the OS itself (for example some HTTP message to 'connectivitycheck.android.com')
For example a smartphone, laptop, desktop computer, smartTV, etc.
"""
def __init__(self, inference_engine):
self.apps = []
self.characteristics = {}
self.activity = {}
# List of services that are not associated with App
self.unasigned_services = []
self.stream_to_unasigned_service = {}
# For the app (and later the service) can be obtained with stream number
self.stream_to_app = {}
self.inference_engine = inference_engine
def get_size(self):
return sum(e.get_size()
for e in self.apps) + sum(e.get_size()
for e in self.unasigned_services)
def sort_apps(self):
app_map = {}
for each_app in self.apps:
each_app.sort_services()
app_map[each_app] = each_app.get_size()
sort_by_value(self.apps, app_map)
def sort_unassigned_services(self):
us_map = {}
for each_service in self.unasigned_services:
us_map[each_service] = each_service.get_size()
sort_by_value(self.unasigned_services, us_map)
def match_score(self, device_args, app_args):
"""Based on device and app dictionary of characteristics it returns a score of correspondence
In case it is incompatible (example device is iPad and characteristic has iPhone as model)
it returns -1
"""
score = 0
for k, v in device_args.items():
sim = similarity(self.characteristics, k, v)
if sim == -1:
return -1
score += sim
for app in self.apps:
for k, v in app_args.items():
sim = similarity(app.characteristics, k, v)
if sim != -1:
score += sim
return score
def update(self, device_args, app_args, stream_number):
"""Updates only characteristics of Device and, in some cases, corresponding App"""
if device_args:
self.update_device(device_args)
if app_args:
app = self.update_apps(app_args)
self.stream_to_app[stream_number] = app
def update_apps(self, app_args):
"""Updates an App. This App can be:
- Best matching App
- New App
"""
apps = []
max_score = float('-inf')
for each_app in self.apps:
score = 0
incompatible = False
for k, v in app_args.items():
sim = similarity(each_app.characteristics, k, v)
if sim == -1:
incompatible = True
break
else:
score += sim
if not incompatible:
if score > 0:
if score == max_score:
apps.append(each_app)
elif score > max_score:
max_score, apps = score, [each_app]
app = None
if apps:
app = random.choice(apps)
elif app_args:
app = App()
self.apps.append(app)
app.update_app(app_args)
return app
def update_device(self, device_args):
"""Add new characteristics and updates them if new characteristic is longer than current.
After that, it checks if it can infer new characteristics with the inference_engine
"""
for k in device_args:
current_value = self.characteristics.get(k)
new_value = device_args.get(k)
if (not current_value) or (new_value and
len(new_value) > len(current_value)):
self.characteristics[k] = new_value
inferences = self.inference_engine.analyze_inference(
self.characteristics)
if inferences:
self.characteristics.update(inferences)
def process_unasigned_service_from_new_stream(self, service, time, length,
stream_number):
"""It can create a new service or find an existing one that matches.
It links the stream to the service
If new Service is created, it is added to unasigned services
"""
existing = False
curr_service = service
for each in self.unasigned_services:
if each.name == service.name:
existing = True
curr_service = each
break
if not existing:
self.unasigned_services.append(curr_service)
curr_service.add_activity(time, length)
self.stream_to_unasigned_service[stream_number] = curr_service
return curr_service
def get_service_from_stream(self, stream_number):
"""Can return an App Service or an unasigned Service"""
app = self.stream_to_app.get(stream_number)
if app:
return app.stream_to_service.get(stream_number)
else:
unasigned_service = self.stream_to_unasigned_service.get(
stream_number)
if not | |
<gh_stars>1-10
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
"""
################################################################################
PyEdit :a Python/tkinter text file editor and component.
Uses the Tk text widget, plus GuiMaker menus and toolbar buttons to
implement a full-featured text editor that can be run as a standalone
program, and attached as a component to other GUIs. Also used by
PyMailGUI and PyView to edit mail text and image file notes, and by
PyMailGUI and PyDemos in pop-up mode to display source and text files.
TBD (and suggested exercises):
-could also allow search case choice in GUI (not just config file)
-could use re patterns for searches and greps (see text chapter)
-could experiment with syntax-directed text colorization (see IDLE, others)
-could try to verify app exit for quit() in non-managed windows too?
-could queue each result as found in grep dialog thread to avoid delay
-could use images in toolbar buttons (per examples of this in Chapter 9)
-could scan line to map Tk insert position column to account for tabs on Info
-could experiment with "grep" tbd Unicode issues (see notes in the code);
################################################################################
"""
import os # platform, args, run tools
from tkinter.colorchooser import askcolor
from tkinter.filedialog import Open, SaveAs # standard dialogs
from tkinter.messagebox import showerror, askyesno
from tkinter.simpledialog import askstring, askinteger
from minghu6.gui.guimaker import * # Frame + menu/toolbar builders
# general configurations
try:
import textConfig # startup font and colors
configs = textConfig.__dict__ # work if not on the path or bad
except:
import minghu6.gui.textConfig as textConfig # define in client app directory
configs = textConfig.__dict__
# config={}
helptext = """PyEdit a Python/tkinter text editor
support :
-grep
-font custimized
-background custimized
-find/refind
-replace
-goto
-open/save
place all configure Info in textConfig.py
base in minghu6 pacage
subfile(in current dir) can overload the base file
"""
START = '1.0' # index of first char: row=1,col=0
SEL_FIRST = SEL + '.first' # map sel tag to index
SEL_LAST = SEL + '.last' # same as 'sel.last'
FontScale = 0 # use bigger font on Linux
if sys.platform[:3] != 'win': # and other non-Windows boxes
FontScale = 3
################################################################################
# Main class: implements editor GUI, actions
# requires a flavor of GuiMaker to be mixed in by more specific subclasses;
# not a direct subclass of GuiMaker because that class takes multiple forms.
################################################################################
class TextEditor: # mix with menu/toolbar Frame class
startfiledir = '.' # for dialogs
editwindows = [] # for process-wide quit check
# Unicode configurations
# imported in class to allow overrides in subclass or self
if __name__ == '__main__':
pass
else:
pass
ftypes = [('All files', '*'), # for file open dialog
('Text files', '.txt'), # customize in subclass
('Python files', '.py')] # or set in each instance
colors = [{'fg': 'black', 'bg': 'white'}, # color pick list
{'fg': 'yellow', 'bg': 'black'}, # first item is default
{'fg': 'white', 'bg': 'blue'}, # tailor me as desired
{'fg': 'black', 'bg': 'beige'}, # or do PickBg/Fg chooser
{'fg': 'yellow', 'bg': 'purple'},
{'fg': 'black', 'bg': 'brown'},
{'fg': 'lightgreen', 'bg': 'darkgreen'},
{'fg': 'darkblue', 'bg': 'orange'},
{'fg': 'orange', 'bg': 'darkblue'}]
fonts = [('courier', 9 + FontScale, 'normal'), # platform-neutral fonts
('courier', 12 + FontScale, 'normal'), # (family, size, style)
('courier', 10 + FontScale, 'bold'), # or pop up a listbox
('courier', 10 + FontScale, 'italic'), # make bigger on Linux
('times', 10 + FontScale, 'normal'), # use 'bold italic' for 2
('helvetica', 10 + FontScale, 'normal'), # also 'underline', etc.
('ariel', 10 + FontScale, 'normal'),
('system', 10 + FontScale, 'normal'),
('courier', 20 + FontScale, 'normal')]
def __init__(self, loadFirst='', loadEncode=''):
if not isinstance(self, GuiMaker):
raise TypeError('TextEditor needs a GuiMaker mixin')
self.setFileName(None)
self.lastfind = None
self.openDialog = None
self.saveDialog = None
self.knownEncoding = None # 2.1 Unicode: till Open or Save
self.text.focus() # else must click in text
if loadFirst:
self.update() # 2.1: else @ line 2; see book
self.onOpen(loadFirst, loadEncode)
def start(self): # run by GuiMaker.__init__
self.menuBar = [ # configure menu/toolbar
('File', 0, # a GuiMaker menu def tree
[('Open...', 0, self.onOpen), # build in method for self
('Save', 0, self.onSave), # label, shortcut, callback
('Save As...', 5, self.onSaveAs),
('New', 0, self.onNew),
'separator',
('Quit...', 0, self.onQuit)]
),
('Edit', 0,
[('Undo', 0, self.onUndo),
('Redo', 0, self.onRedo),
'separator',
('Cut', 0, self.onCut),
('Copy', 1, self.onCopy),
('Paste', 0, self.onPaste),
'separator',
('Delete', 0, self.onDelete),
('Select All', 0, self.onSelectAll)]
),
('Search', 0,
[('Goto...', 0, self.onGoto),
('Find...', 0, self.onFind),
('Refind', 0, self.onRefind),
('Replace...', 0, self.onChange),
('Grep...', 3, self.onGrep)]
),
('Tools', 0,
[('Pick Font...', 6, self.onPickFont),
('Font List', 0, self.onFontList),
'separator',
('Pick Bg...', 3, self.onPickBg),
('Pick Fg...', 0, self.onPickFg),
('Color List', 0, self.onColorList),
'separator',
('Info...', 0, self.onInfo),
('Clone', 1, self.onClone),
('Run Code', 0, self.onRunCode)]
)]
self.toolBar = [
('Save', self.onSave, {'side': LEFT}),
('Cut', self.onCut, {'side': LEFT}),
('Copy', self.onCopy, {'side': LEFT}),
('Paste', self.onPaste, {'side': LEFT}),
('Find', self.onRefind, {'side': LEFT}),
('Help', self.help, {'side': RIGHT}),
('Quit', self.onQuit, {'side': RIGHT})]
def makeWidgets(self): # run by GuiMaker.__init__
name = Label(self, bg='black', fg='white') # add below menu, above tool
name.pack(side=TOP, fill=X) # menu/toolbars are packed
# GuiMaker frame packs itself
vbar = Scrollbar(self)
hbar = Scrollbar(self, orient='horizontal')
text = Text(self, padx=5, wrap='none') # disable line wrapping
text.config(undo=1, autoseparators=1) # 2.0, default is 0, 1
vbar.pack(side=RIGHT, fill=Y)
hbar.pack(side=BOTTOM, fill=X) # pack text last
text.pack(side=TOP, fill=BOTH, expand=YES) # else sbars clipped
text.config(yscrollcommand=vbar.set) # call vbar.set on text move
text.config(xscrollcommand=hbar.set)
vbar.config(command=text.yview) # call text.yview on scroll move
hbar.config(command=text.xview) # or hbar['command']=text.xview
# 2.0: apply user configs or defaults
startfont = configs.get('font', self.fonts[0])
startbg = configs.get('bg', self.colors[0]['bg'])
startfg = configs.get('fg', self.colors[0]['fg'])
text.config(font=startfont, bg=startbg, fg=startfg)
if 'height' in configs: text.config(height=configs['height'])
if 'width' in configs: text.config(width=configs['width'])
self.text = text
self.filelabel = name
############################################################################
# File menu commands
############################################################################
def my_askopenfilename(self): # objects remember last result dir/file
if not self.openDialog:
self.openDialog = Open(initialdir=self.startfiledir,
filetypes=self.ftypes)
return self.openDialog.show()
def my_asksaveasfilename(self): # objects remember last result dir/file
if not self.saveDialog:
self.saveDialog = SaveAs(initialdir=self.startfiledir,
filetypes=self.ftypes)
return self.saveDialog.show()
def onOpen(self, loadFirst='', loadEncode=''):
"""
tests if file is okay ahead of time to try to avoid opens;
we could also load and manually decode bytes to str to avoid
multiple open attempts, but this is unlikely to try all cases;
encoding behavior is configurable in the local textConfig.py:
1) tries known type first if passed in by client (email_self charsets)
2) if opensAskUser True, try user input next (prefill wih defaults)
3) if opensEncoding nonempty, try this encoding next: 'latin-1', etc.
4) tries sys.getdefaultencoding() platform default next
5) uses binary mode bytes and Tk policy as the last resort
"""
if self.text_edit_modified(): # 2.0
if not askyesno('PyEdit', 'Text has changed: discard changes?'):
return
file = loadFirst or self.my_askopenfilename()
if not file:
return
if not os.path.isfile(file):
showerror('PyEdit', 'Could not open file ' + file)
return
# try known encoding if passed and accurate (e.g., email_self)
text = None # empty file = '' = False: test for None!
if loadEncode:
try:
text = open(file, 'r', encoding=loadEncode).read()
self.knownEncoding = loadEncode
except (UnicodeError, LookupError, IOError): # lookup: bad name
pass
# try user input, prefill with next choice as default
if text is None and self.opensAskUser:
self.update() # else dialog doesn't appear in rare cases
askuser = askstring('PyEdit', 'Enter Unicode encoding for open',
initialvalue=(self.opensEncoding or
sys.getdefaultencoding() or ''))
self.text.focus() # else must click
if askuser:
try:
text = open(file, 'r', encoding=askuser).read()
self.knownEncoding = askuser
except (UnicodeError, LookupError, IOError):
pass
# try config file (or before ask user?)
if text is None and self.opensEncoding:
try:
text = open(file, 'r', encoding=self.opensEncoding).read()
self.knownEncoding = self.opensEncoding
except (UnicodeError, LookupError, IOError):
pass
# try platform default (utf-8 on windows; try utf8 always?)
if text is None:
try:
text = open(file, 'r', encoding=sys.getdefaultencoding()).read()
self.knownEncoding = sys.getdefaultencoding()
except (UnicodeError, LookupError, IOError):
pass
# last resort: use binary bytes and rely on Tk to decode
if text is None:
try:
text = open(file, 'rb').read() # bytes for Unicode
text = text.replace(b'\r\n', b'\n') # for display, saves
self.knownEncoding = None
except IOError:
pass
if text is None:
showerror('PyEdit', 'Could not decode and open file ' + file)
else:
self.setAllText(text)
self.setFileName(file)
self.text.edit_reset() # 2.0: clear undo/redo stks
self.text.edit_modified(0) # 2.0: clear modified flag
def onSave(self):
| |
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
from phonenumber_field.modelfields import PhoneNumberField
from dirtyfields import DirtyFieldsMixin
from django_flex_user.validators import FlexUserUnicodeUsernameValidator
from django_flex_user.fields import CICharField
# Reference: https://docs.djangoproject.com/en/3.0/topics/auth/customizing/
# Reference: https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html
class FlexUserManager(BaseUserManager):
"""
Our custom implementation of django.contrib.auth.models.UserManager.
"""
@classmethod
def normalize_email(cls, email):
"""
Normalize email by lowercasing and IDNA encoding its domain part.
:param email:
:return:
"""
if email is None:
return None
try:
email_name, domain_part = email.strip().rsplit('@', 1)
email = email_name + '@' + domain_part.lower().encode('idna').decode('ascii')
except UnicodeError:
pass
except ValueError:
pass
return email
def _create_user(self, username=None, email=None, phone=None, password=None, **extra_fields):
user = self.model(username=username, email=email, phone=phone, **extra_fields)
user.set_password(password)
user.full_clean()
user.save(using=self._db)
return user
def create_user(self, username=None, email=None, phone=None, password=None, **extra_fields):
"""
Create a user. You must supply at least one of ``username``, ``email``, or ``phone``.
If ``password`` is None, the user's password will be set using \
:meth:`~django.contrib.auth.models.User.set_unusable_password`.
.. warning::
This method does not run :setting:`AUTH_PASSWORD_VALIDATORS` against ``password``. It's the
caller's responsibility to run password validators before calling this method.
:param username: The username for the user, defaults to None.
:type username: str, optional
:param email: The email address for the user, defaults to None.
:type email: str, optional
:param phone: The phone number for the user, defaults to None.
:type phone: str, optional
:param password: The password for the user, defaults to None.
:type password: str, optional
:param extra_fields: Additional model fields you wish to set for the user.
:type extra_fields: dict, optional
:raises ~django.core.exceptions.ValidationError: If any of the supplied parameters fails model field validation
(e.g. the supplied phone number is already in use by another user, the supplied username is invalid, etc.)
:return: The newly created user.
:rtype: ~django_flex_user.models.user.FlexUser
"""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, phone, password, **extra_fields)
def create_superuser(self, username=None, email=None, phone=None, password=None, **extra_fields):
"""
Create a super user. You must supply at least one of ``username``, ``email``, or ``phone``.
If ``password`` is None, the user's password will be set using \
:meth:`~django.contrib.auth.models.User.set_unusable_password`.
.. warning::
This method does not run :setting:`AUTH_PASSWORD_VALIDATORS` against ``password``. It's the
caller's responsibility to run password validators before calling this method.
:param username: The username for the user, defaults to None.
:type username: str, optional
:param email: The email address for the user, defaults to None.
:type email: str, optional
:param phone: The phone number for the user, defaults to None.
:type phone: str, optional
:param password: <PASSWORD>, defaults to <PASSWORD>.
:type password: str, optional
:param extra_fields: Additional model fields you wish to set for the user.
:type extra_fields: dict, optional
:raises ~django.core.exceptions.ValidationError: If any of the supplied parameters fails model field validation
(e.g. the supplied phone number is already in use by another user, the supplied username is invalid, etc.)
:return: The newly created user.
:rtype: ~django_flex_user.models.user.FlexUser
"""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, phone, password, **extra_fields)
def get_by_natural_key(self, username=None, email=None, phone=None):
if username is None and email is None and phone is None:
raise ValueError('You must supply at least one of username, email or phone number')
q = {}
if username is not None:
q.update({'username': username})
if email is not None:
q.update({'email': email})
if phone is not None:
q.update({'phone': phone})
return self.get(**q)
class FlexUser(AbstractBaseUser, PermissionsMixin, DirtyFieldsMixin):
"""
Our implementation django.contrib.auth.models.User.
This user model is designed to give users the flexibility to sign up and sign in using their choice of username,
email address or phone number.
Our implementation is identical to django.contrib.auth.models.User except in the following ways:
username field sets null=True and blank=True.
email field sets null=True and blank = True.
phone field is introduced. It defines unique=True, null=True and blank=True.
first_name and last_name fields are omitted.
For each of username, email and phone we set blank = True to preserve the ordinary functioning of the
admin site. Setting blank = True on model fields results in form fields which have required = False set,
thereby enabling users to supply any subset of username, email and phone when configuring a user on the
admin site. Furthermore, when null = True and blank = True are set together on model fields, the value of empty
form fields are conveniently coerced to None. Unfortunately, setting blank = True on model fields has the
undesirable consequence that empty string values will not by rejected by clean_fields/full_clean methods. To
remedy this, we reject empty string values for username, email and phone in our clean method (see below).
clean method:
- Ensures that at least one of username, email or phone is defined for the user.
- Ensures that none of username, email and phone are equal to the empty string. We must do this
because we set blank = True for each of these fields (see above).
- Normalizes email in addition to username.
get_username method returns one of username, email, phone or id. This method evaluates each of these
fields in order and returns the first truthy value.
natural_key method returns a tuple of username, email and phone.
We place the following restrictions on username, email and phone:
- It shouldn't be possible to interpret username as an email address or phone number
- It shouldn't be possible to interpret email as a username or phone number
- It shouldn't be possible to interpret phone as a username or email address
These restrictions are enforced by field validators which apply the constraints below:
- username may not begin with "+" or a decimal number, nor may it contain "@"
- email must contain "@"
- phone must contain "+" and may not contain "@"
These constraints make it possible to receive an unspecified user identifier and infer whether it is a username,
email address or phone number.
"""
username_validator = FlexUserUnicodeUsernameValidator()
email = models.EmailField(
_('email address'),
unique=True,
null=True, # new
blank=True, # new
error_messages={
'unique': _("A user with that email address already exists."),
},
)
phone = PhoneNumberField( # new
_('phone number'),
unique=True,
null=True,
blank=True,
error_messages={
'unique': _("A user with that phone number already exists."),
},
)
# username = models.CharField(
# _('username'),
# max_length=150,
# unique=True,
# null=True, # new
# blank=True, # new
# help_text=_('150 characters or fewer. Letters, digits and ./-/_ only.'),
# validators=[username_validator],
# error_messages={
# 'unique': _("A user with that username already exists."),
# },
# )
username = CICharField(
_('username'),
max_length=150,
unique=True,
null=True, # new
blank=True, # new
help_text=_('150 characters or fewer. Letters, digits and ./-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
# We remove these fields from our user model implementation
# first_name = models.CharField(_('first name'), max_length=30, blank=True)
# last_name = models.CharField(_('last name'), max_length=150, blank=True)
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
objects = FlexUserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def clean(self):
errors = {}
if self.username is None and self.email is None and self.phone is None:
errors[NON_FIELD_ERRORS] = 'You must supply at least one of {username}, {email} or {phone}.'.format(
username=self._meta.get_field('username').verbose_name,
email=self._meta.get_field('email').verbose_name,
phone=self._meta.get_field('phone').verbose_name
)
# For fields which have blank = False:
# django.db.models.fields.Field.clean first executes django.db.models.fields.Field.validate which raises an
# exception if the field contains a blank value. If an exception is raised, the subsequent call to
# django.db.models.fields.Field.run_validators is not made.
#
# For fields which have blank = True:
# django.db.models.base.Model.clean_fields executes django.db.models.fields.Field.clean for each of its fields.
# However, it skips this call for fields which contain a | |
need to provide a tuple of 2 elements not ' + type(value))
pool, list_input = value
self._delitem(key)
node = ElementTree.SubElement(self.rootnode(self.instance), 'pool')
node.attrib['name'] = key
if pool and pool.uri:
node.attrib['output-uri'] = pool.uri
for inart in list_input:
sub = ElementTree.Element('input')
sub.attrib['uri'] = inart.uri
node.append(sub)
def _delitem(self, key):
for node in self._elems:
if node.attrib['name'] == key:
self.rootnode(self.instance).remove(node)
break
def _parse_element(self, element, **kwargs):
from pyclarity_lims.entities import Artifact
if 'output-uri' in element.attrib:
pool = Artifact(self.instance.lims, uri=element.attrib.get('output-uri'))
else:
pool = None
dict.__setitem__(
self,
element.attrib.get('name'),
(
pool,
tuple(Artifact(self.instance.lims, uri=sub.attrib.get('uri')) for sub in element.findall('input'))
)
)
# List types
class XmlList(XmlMutable, list):
"""Class that behaves like a list and modifies the provided instance as the list gets updated"""
def __init__(self, instance, *args, **kwargs):
XmlMutable.__init__(self, instance=instance)
list.__init__(self, *args, **kwargs)
self._update_elems()
self._prepare_list()
def _prepare_list(self):
for i, elem in enumerate(self._elems):
self._parse_element(elem, lims=self.instance.lims, position=i)
def clear(self):
# python 2.7 does not have a clear function for list
del self[:]
for elem in self._elems:
self.rootnode(self.instance).remove(elem)
self._update_elems()
def __add__(self, other_list):
for item in other_list:
self._additem(item)
self._update_elems()
return list.__add__(self, [self._modify_value_before_insert(v, len(self) + i) for i, v in enumerate(other_list)])
def __iadd__(self, other_list):
for item in other_list:
self._additem(item)
self._update_elems()
return list.__iadd__(self, [self._modify_value_before_insert(v, len(self) + i) for i, v in enumerate(other_list)])
def __setitem__(self, i, item):
if isinstance(i, slice):
new_items = []
slice_range = range(*i.indices(len(self)))
if len(slice_range) != len(item):
raise ValueError('Setting slice and list of different sizes is not supported: %s != %s' % (len(slice_range), len(item)))
for k, v in zip(slice_range, item):
self._setitem(k, v)
new_items.append(self._modify_value_before_insert(v, k))
item = new_items
elif isinstance(i, int):
self._setitem(i, item)
item = self._modify_value_before_insert(item, i)
else:
raise TypeError('List indices must be integers or slices, not ' + type(i))
self._update_elems()
return list.__setitem__(self, i, item)
def insert(self, i, item):
self._insertitem(i, item)
self._update_elems()
list.insert(self, i, self._modify_value_before_insert(item, i))
# Hack to make sure subsequent elements get updated if they went through _modify_value_before_insert
new_items = []
for p, v in enumerate(self[i + 1:]):
new_items.append(self._modify_value_before_insert(v, i + 1 + p))
list.__setitem__(self, slice(i + 1, len(self), 1), new_items)
def append(self, item):
self._additem(item)
self._update_elems()
return list.append(self, self._modify_value_before_insert(item, len(self)))
def extend(self, iterable):
for v in iterable:
self._additem(v)
self._update_elems()
return list.extend(self, [self._modify_value_before_insert(v, len(self) + i) for i, v in enumerate(iterable)])
def _additem(self, value):
node = self._create_new_node(value)
self.rootnode(self.instance).append(node)
def _insertitem(self, index, value):
node = self._create_new_node(value)
self.rootnode(self.instance).insert(index, node)
def _setitem(self, index, value):
node = self._create_new_node(value)
# Remove the old value in the xml
self._delitem(index)
# Insert it in place
self.rootnode(self.instance).insert(index, node)
def _delitem(self, index):
# Remove the value in the xml and update the cached _elems
self.rootnode(self.instance).remove(self._elems[index])
def _update_elems(self):
raise NotImplementedError
def _parse_element(self, element, **kwargs):
raise NotImplementedError
def _create_new_node(self, value):
raise NotImplementedError
def _modify_value_before_insert(self, value, position):
"""Give subclasses an opportunity to alter the data before inserting. This function is called for each value
being inserted into the list.
"""
return value
class TagXmlList(XmlList, Nestable):
"""Abstract class that creates elements of the list based on the provided tag."""
def __init__(self, instance, tag, nesting=None, *args, **kwargs):
self.tag = tag
Nestable.__init__(self, nesting)
XmlList.__init__(self, instance=instance, *args, **kwargs)
def _update_elems(self):
self._elems = self.rootnode(self.instance).findall(self.tag)
class XmlTextList(TagXmlList):
"""This is a list of strings linked to an element's text.
The list can only contain strings but can be passed any type, which will be converted to strings"""
def _create_new_node(self, value):
node = ElementTree.Element(self.tag)
node.text = str(value)
return node
def _parse_element(self, element, lims, **kwargs):
list.append(self, element.text)
class XmlAttributeList(TagXmlList):
"""This is a list of dicts linked to an element's attributes.
The list can only contain and be provided with dict elements.
The internal dicts are XmlElementAttributeDict objects which can be modified directly to modify the XML"""
def _create_new_node(self, value):
if not isinstance(value, dict):
raise TypeError('You need to provide a dict not ' + type(value))
node = ElementTree.Element(self.tag)
for k, v in value.items():
node.attrib[k] = v
return node
def _parse_element(self, element, lims, position, **kwargs):
d = XmlElementAttributeDict(self.instance, tag=self.tag, nesting=self.rootkeys, position=position)
list.append(self, d)
def _modify_value_before_insert(self, value, position):
return XmlElementAttributeDict(self.instance, tag=self.tag, nesting=self.rootkeys, position=position)
class XmlActionList(TagXmlList):
def __init__(self, instance, *args, **kwargs):
TagXmlList.__init__(self, instance, tag='next-action', nesting=['next-actions'], *args, **kwargs)
def _create_new_node(self, value):
if not isinstance(value, dict):
raise TypeError('You need to provide a dict not ' + type(value))
node = ElementTree.Element(self.tag)
for k, v in value.items():
if k in ['artifact', 'step', 'rework-step']:
k = k + '-uri'
v = v.uri
node.attrib[k] = v
return node
def _parse_element(self, element, lims, position, **kwargs):
d = XmlAction(self.instance, tag=self.tag, nesting=self.rootkeys, position=position)
list.append(self, d)
def _modify_value_before_insert(self, value, position):
return XmlAction(self.instance, tag=self.tag, nesting=self.rootkeys, position=position)
class XmlReagentLabelList(XmlAttributeList):
"""List of reagent labels."""
def __init__(self, instance, nesting=None, *args, **kwargs):
XmlAttributeList.__init__(self, instance, tag='reagent-label', nesting=nesting, *args, **kwargs)
def _create_new_node(self, value):
return XmlAttributeList._create_new_node(self, {'name': value})
def _parse_element(self, element, lims, **kwargs):
list.append(self, element.attrib['name'])
def _modify_value_before_insert(self, value, position):
return value
class EntityList(TagXmlList):
"""List of entities. The list can only contain entities of the provided class (klass)"""
def __init__(self, instance, tag, klass, nesting=None, *args, **kwargs):
self.klass = klass
TagXmlList.__init__(self, instance, tag, nesting=nesting, *args, **kwargs)
def _create_new_node(self, value):
if not isinstance(value, self.klass):
raise TypeError('You need to provide an %s not %s' % (self.klass, type(value)))
node = ElementTree.Element(self.tag)
node.attrib['uri'] = value.uri
return node
def _parse_element(self, element, lims, **kwargs):
list.append(self, self.klass(lims, uri=element.attrib['uri']))
class XmlInputOutputMapList(TagXmlList):
"""An instance attribute yielding a list of tuples (input, output)
where each item is a dictionary, representing the input/output
maps of a Process instance.
"""
def __init__(self, instance, *args, **kwargs):
TagXmlList.__init__(self, instance, 'input-output-map', *args, **kwargs)
def _parse_element(self, element, lims, **kwargs):
input_element = self._get_dict(lims, element.find('input'))
output_element = self._get_dict(lims, element.find('output'))
list.append(self, (input_element, output_element))
def _get_dict(self, lims, node):
from pyclarity_lims.entities import Artifact, Process
if node is None:
return None
result = dict()
for key in ['limsid', 'output-type', 'output-generation-type']:
try:
result[key] = node.attrib[key]
except KeyError:
pass
for uri in ['uri', 'post-process-uri']:
try:
result[uri] = Artifact(lims, uri=node.attrib[uri])
except KeyError:
pass
node = node.find('parent-process')
if node is not None:
result['parent-process'] = Process(lims, node.attrib['uri'])
return result
def _set_dict(self, element, value_dict):
for key in ['limsid', 'output-type', 'output-generation-type']:
if key in value_dict:
element.attrib[key] = value_dict[key]
for key in ['uri', 'post-process-uri']:
if key in value_dict:
element.attrib[key] = value_dict[key].uri
if 'parent-process' in value_dict:
node = ElementTree.SubElement(element, 'parent-process')
node.attrib['uri'] = value_dict['parent-process'].uri
def _create_new_node(self, value ):
if not isinstance(value, tuple):
raise TypeError('You need to provide a tuple not %s' % (type(value)))
if len(value) != 2:
raise TypeError('You need to provide a tuple with 2 values, found %s' % len(value))
input_dict, output_dict = value
node = ElementTree.Element(self.tag)
input_element = ElementTree.SubElement(node, 'input')
output_element = ElementTree.SubElement(node, 'output')
self._set_dict(input_element, input_dict)
self._set_dict(output_element, output_dict)
return node
class OutputPlacementList(TagXmlList):
"""This is a list of output placements as found in the StepPlacement. The list contains tuples organised as follows:
(A, (B, C)) where
A is an artifact
B is a container
C is a string specifying the location such as "1:1"
"""
def __init__(self, instance, *args, **kwargs):
TagXmlList.__init__(self, instance, tag='output-placement', nesting=['output-placements'], *args, **kwargs)
def _create_new_node(self, value):
if not isinstance(value, tuple):
raise TypeError('You need to provide a tuple not %s' % (type(value)))
art, location = value
container, position = location
node = ElementTree.Element(self.tag)
node.attrib['uri'] = art.uri
elem = ElementTree.SubElement(node, 'location')
ElementTree.SubElement(elem, 'container', uri=container.uri, limsid=container.id)
v = ElementTree.SubElement(elem, 'value')
v.text = position
return node
def _parse_element(self, element, lims, **kwargs):
from pyclarity_lims.entities import Artifact, Container
input = Artifact(lims, uri=element.attrib['uri'])
loc = element.find('location')
location = (None, None)
if loc:
location = (
Container(lims, uri=loc.find('container').attrib['uri']),
loc.find('value').text
)
list.append(self, (input, location))
class ExternalidList(XmlList):
def _update_elems(self):
self._elems = self.rootnode(self.instance).findall(nsmap('ri:externalid'))
def _create_new_node(self, value):
if not isinstance(value, tuple):
raise TypeError('You need to provide a tuple not ' + type(value))
node = ElementTree.Element(nsmap('ri:externalid'))
id, uri = value
node.attrib['id'] = id
node.attrib['uri'] = uri
return node
def _parse_element(self, element, **kwargs):
list.append(self, (element.attrib.get('id'), element.attrib.get('uri')))
class QueuedArtifactList(TagXmlList):
"""This is a list of Artifacts associated with the time they spent in the queue and their location on a plate.
The list contains tuples organised as follows:
(A, B, (C, D)) where
A is an artifact
B is a datetime object,
C is a container
D is a string specifying the location such as "1:1"
"""
def __init__(self, instance, *args, **kwargs):
TagXmlList.__init__(self, instance, tag='artifact', nesting=['artifacts'], *args, **kwargs)
def _parse_element(self, element, lims, **kwargs):
from pyclarity_lims.entities import Artifact, Container
input_art = Artifact(lims, uri=element.attrib['uri'])
loc = element.find('location')
location = (None, | |
__all__ = [
"bump_version",
"check_version",
"get_version",
"serialize_pep440",
"serialize_pvp",
"serialize_semver",
"Style",
"Vcs",
"Version",
]
import copy
import datetime as dt
import re
import shlex
import shutil
import subprocess
from collections import OrderedDict
from enum import Enum
from functools import total_ordering
from pathlib import Path
from typing import (
Any,
Callable,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
_VERSION_PATTERN = r"""
(?x) (?# ignore whitespace)
^v((?P<epoch>\d+)!)?(?P<base>\d+(\.\d+)*) (?# v1.2.3 or v1!2000.1.2)
([-._]?((?P<stage>[a-zA-Z]+)[-._]?(?P<revision>\d+)?))? (?# b0)
(\+(?P<tagged_metadata>.+))?$ (?# +linux)
""".strip()
# PEP 440: [N!]N(.N)*[{a|b|rc}N][.postN][.devN][+<local version label>]
_VALID_PEP440 = r"^(\d+!)?\d+(\.\d+)*((a|b|rc)\d+)?(\.post\d+)?(\.dev\d+)?(\+.+)?$"
_VALID_SEMVER = (
r"^\d+\.\d+\.\d+(\-[a-zA-z0-9\-]+(\.[a-zA-z0-9\-]+)*)?(\+[a-zA-z0-9\-]+(\.[a-zA-z0-9\-]+)*)?$"
)
_VALID_PVP = r"^\d+(\.\d+)*(-[a-zA-Z0-9]+)*$"
_T = TypeVar("_T")
class Style(Enum):
Pep440 = "pep440"
SemVer = "semver"
Pvp = "pvp"
class Vcs(Enum):
Any = "any"
Git = "git"
Mercurial = "mercurial"
Darcs = "darcs"
Subversion = "subversion"
Bazaar = "bazaar"
Fossil = "fossil"
def _run_cmd(
command: str,
codes: Sequence[int] = (0,),
where: Path = None,
shell: bool = False,
env: dict = None,
) -> Tuple[int, str]:
result = subprocess.run(
shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=str(where) if where is not None else None,
shell=shell,
env=env,
)
output = result.stdout.decode().strip()
if codes and result.returncode not in codes:
raise RuntimeError(
"The command '{}' returned code {}. Output:\n{}".format(
command, result.returncode, output
)
)
return (result.returncode, output)
_MatchedVersionPattern = NamedTuple(
"_MatchedVersionPattern",
[
("matched_tag", str),
("base", str),
("stage_revision", Optional[Tuple[str, Optional[int]]]),
("newer_tags", Sequence[str]),
("tagged_metadata", Optional[str]),
("epoch", Optional[int]),
],
)
def _match_version_pattern(
pattern: str, sources: Sequence[str], latest_source: bool
) -> _MatchedVersionPattern:
"""
:return: Tuple of:
* matched tag
* base segment
* tuple of:
* stage
* revision
* any newer unmatched tags
* tagged_metadata matched section
"""
pattern_match = None
base = None
stage_revision = None
newer_unmatched_tags = []
tagged_metadata = None
epoch = None # type: Optional[Union[str, int]]
for source in sources[:1] if latest_source else sources:
pattern_match = re.search(pattern, source)
if pattern_match is None:
newer_unmatched_tags.append(source)
continue
try:
base = pattern_match.group("base")
if base is not None:
break
except IndexError:
raise ValueError(
"Pattern '{}' did not include required capture group 'base'".format(pattern)
)
if pattern_match is None or base is None:
if latest_source:
raise ValueError(
"Pattern '{}' did not match the latest tag '{}' from {}".format(
pattern, sources[0], sources
)
)
else:
raise ValueError("Pattern '{}' did not match any tags from {}".format(pattern, sources))
stage = pattern_match.groupdict().get("stage")
revision = pattern_match.groupdict().get("revision")
tagged_metadata = pattern_match.groupdict().get("tagged_metadata")
epoch = pattern_match.groupdict().get("epoch")
if stage is not None:
try:
stage_revision = (stage, None if revision is None else int(revision))
except ValueError:
raise ValueError("Revision '{}' is not a valid number".format(revision))
if epoch is not None:
try:
epoch = int(epoch)
except ValueError:
raise ValueError("Epoch '{}' is not a valid number".format(epoch))
return _MatchedVersionPattern(
source, base, stage_revision, newer_unmatched_tags, tagged_metadata, epoch
)
def _blank(value: Optional[_T], default: _T) -> _T:
return value if value is not None else default
def _equal_if_set(x: _T, y: Optional[_T], unset: Sequence[Any] = (None,)) -> bool:
if y in unset:
return True
return x == y
def _detect_vcs(expected_vcs: Vcs = None) -> Vcs:
checks = OrderedDict(
[
(Vcs.Git, "git status"),
(Vcs.Mercurial, "hg status"),
(Vcs.Darcs, "darcs log"),
(Vcs.Subversion, "svn log"),
(Vcs.Bazaar, "bzr status"),
(Vcs.Fossil, "fossil status"),
]
)
if expected_vcs:
command = checks[expected_vcs]
program = command.split()[0]
if not shutil.which(program):
raise RuntimeError("Unable to find '{}' program".format(program))
code, _ = _run_cmd(command, codes=[])
if code != 0:
raise RuntimeError(
"This does not appear to be a {} project".format(expected_vcs.value.title())
)
return expected_vcs
else:
for vcs, command in checks.items():
if shutil.which(command.split()[0]):
code, _ = _run_cmd(command, codes=[])
if code == 0:
return vcs
raise RuntimeError("Unable to detect version control system.")
class _GitRefInfo:
def __init__(
self, ref: str, commit: str, creatordate: str, committerdate: str, taggerdate: str
):
self.fullref = ref
self.commit = commit
self.creatordate = self.normalize_git_dt(creatordate)
self.committerdate = self.normalize_git_dt(committerdate)
self.taggerdate = self.normalize_git_dt(taggerdate)
self.tag_topo_lookup = {} # type: Mapping[str, int]
def with_tag_topo_lookup(self, lookup: Mapping[str, int]) -> "_GitRefInfo":
self.tag_topo_lookup = lookup
return self
@staticmethod
def normalize_git_dt(timestamp: str) -> Optional[dt.datetime]:
if timestamp == "":
return None
else:
return _parse_git_timestamp_iso_strict(timestamp)
def __repr__(self):
return (
"_GitRefInfo(ref={!r}, commit={!r}, creatordate={!r},"
" committerdate={!r}, taggerdate={!r})"
).format(
self.fullref, self.commit_offset, self.creatordate, self.committerdate, self.taggerdate
)
def best_date(self) -> Optional[dt.datetime]:
if self.taggerdate is not None:
return self.taggerdate
elif self.committerdate is not None:
return self.committerdate
else:
return self.creatordate
@property
def commit_offset(self) -> int:
try:
return self.tag_topo_lookup[self.fullref]
except KeyError:
raise RuntimeError(
"Unable to determine commit offset for ref {} in data: {}".format(
self.fullref, self.tag_topo_lookup
)
)
@property
def sort_key(self) -> Tuple[int, Optional[dt.datetime]]:
return (-self.commit_offset, self.best_date())
@property
def ref(self) -> str:
return self.fullref.replace("refs/tags/", "")
@staticmethod
def normalize_tag_ref(ref: str) -> str:
if ref.startswith("refs/tags/"):
return ref
else:
return "refs/tags/{}".format(ref)
@staticmethod
def from_git_tag_topo_order() -> Mapping[str, int]:
code, logmsg = _run_cmd(
'git log --simplify-by-decoration --topo-order --decorate=full HEAD "--format=%H%d"'
)
tag_lookup = {}
# Simulate "--decorate-refs=refs/tags/*" for older Git versions:
filtered_lines = [
x for x in logmsg.strip().splitlines(keepends=False) if " (" not in x or "tag: " in x
]
for tag_offset, line in enumerate(filtered_lines):
# lines have the pattern
# <gitsha1> (tag: refs/tags/v1.2.0b1, tag: refs/tags/v1.2.0)
commit, _, tags = line.partition("(")
commit = commit.strip()
if tags:
# remove trailing ')'
tags = tags[:-1]
taglist = [
tag.strip() for tag in tags.split(", ") if tag.strip().startswith("tag: ")
]
taglist = [tag.split()[-1] for tag in taglist]
taglist = [_GitRefInfo.normalize_tag_ref(tag) for tag in taglist]
for tag in taglist:
tag_lookup[tag] = tag_offset
return tag_lookup
@total_ordering
class Version:
def __init__(
self,
base: str,
*,
stage: Tuple[str, Optional[int]] = None,
distance: int = 0,
commit: str = None,
dirty: bool = None,
tagged_metadata: Optional[str] = None,
epoch: int = None
) -> None:
"""
:param base: Release segment, such as 0.1.0.
:param stage: Pair of release stage (e.g., "a", "alpha", "b", "rc")
and an optional revision number.
:param distance: Number of commits since the last tag.
:param commit: Commit hash/identifier.
:param dirty: True if the working directory does not match the commit.
:param epoch: Optional PEP 440 epoch.
"""
#: Release segment.
self.base = base
#: Alphabetical part of prerelease segment.
self.stage = None
#: Numerical part of prerelease segment.
self.revision = None
if stage is not None:
self.stage, self.revision = stage
#: Number of commits since the last tag.
self.distance = distance
#: Commit ID.
self.commit = commit
#: Whether there are uncommitted changes.
self.dirty = dirty
#: Any metadata segment from the tag itself.
self.tagged_metadata = tagged_metadata
#: Optional PEP 440 epoch.
self.epoch = epoch
self._matched_tag = None # type: Optional[str]
self._newer_unmatched_tags = None # type: Optional[Sequence[str]]
def __str__(self) -> str:
return self.serialize()
def __repr__(self) -> str:
return (
"Version(base={!r}, stage={!r}, revision={!r},"
" distance={!r}, commit={!r}, dirty={!r}, tagged_metadata={!r}, epoch={!r})"
).format(
self.base,
self.stage,
self.revision,
self.distance,
self.commit,
self.dirty,
self.tagged_metadata,
self.epoch,
)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Version):
raise TypeError(
"Cannot compare Version with type {}".format(other.__class__.__qualname__)
)
return (
self.base == other.base
and self.stage == other.stage
and self.revision == other.revision
and self.distance == other.distance
and self.commit == other.commit
and self.dirty == other.dirty
and self.tagged_metadata == other.tagged_metadata
and self.epoch == other.epoch
)
def _matches_partial(self, other: "Version") -> bool:
"""
Compare this version to another version, but ignore None values in the other version.
Distance is also ignored when `other.distance == 0`.
:param other: The version to compare to.
:return: True if this version equals the other version.
"""
return (
_equal_if_set(self.base, other.base)
and _equal_if_set(self.stage, other.stage)
and _equal_if_set(self.revision, other.revision)
and _equal_if_set(self.distance, other.distance, unset=[None, 0])
and _equal_if_set(self.commit, other.commit)
and _equal_if_set(self.dirty, other.dirty)
and _equal_if_set(self.tagged_metadata, other.tagged_metadata)
and _equal_if_set(self.epoch, other.epoch)
)
def __lt__(self, other: Any) -> bool:
if not isinstance(other, Version):
raise TypeError(
"Cannot compare Version with type {}".format(other.__class__.__qualname__)
)
import packaging.version as pv
return (
pv.Version(self.base) < pv.Version(other.base)
and _blank(self.stage, "") < _blank(other.stage, "")
and _blank(self.revision, 0) < _blank(other.revision, 0)
and _blank(self.distance, 0) < _blank(other.distance, 0)
and _blank(self.commit, "") < _blank(other.commit, "")
and bool(self.dirty) < bool(other.dirty)
and _blank(self.tagged_metadata, "") < _blank(other.tagged_metadata, "")
and _blank(self.epoch, 0) < _blank(other.epoch, 0)
)
def serialize(
self,
metadata: bool = None,
dirty: bool = False,
format: Union[str, Callable[["Version"], str]] = None,
style: Style = None,
bump: bool = False,
tagged_metadata: bool = False,
) -> str:
"""
Create a string from the version info.
:param metadata: Metadata (commit ID, dirty flag) is normally included
in the metadata/local version part only if the distance is nonzero.
Set this to True to always | |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC RemoteAttestation server."""
from concurrent import futures
import logging
import grpc
# import rpc.remote_pb2
# import rpc.remote_pb2_grpc
from .rpc import remote_pb2
from .rpc import remote_pb2_grpc
from rpc_utils import *
import os
import sys
import traceback
from .core import RemoteAPI as remote_api
from .rabit import RemoteAPI as rabit_remote_api
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64
import threading
import types
_USERS = []
class Command(object):
"""
Commands submitted for execution to remote server
"""
def __init__(self):
self.reset()
def reset(self):
self._func = None
self._params = None
self._ret = None
self._seq_num = None
self._usernames = []
self._signatures = []
self._sig_lengths = []
self._retrieved = []
def submit(self, func, params, username):
if self._func is None:
self._func = func
self._params = params
else:
assert self._func == func
# FIXME: check that all clients have the same sequence number
self._seq_num = params.seq_num
self._usernames.append(username)
self._signatures.append(params.signature)
self._sig_lengths.append(params.sig_len)
def is_ready(self):
for user in _USERS:
if user not in self._usernames:
return False
return True
def invoke(self):
if not globals()["is_orchestrator"]:
# Returns <return_value>, signature, sig_len
self._ret = self._func(self._params, self._usernames, self._signatures, self._sig_lengths)
else: # We're the RPC orchestrator
node_ips = globals()["nodes"]
seq_num = self._seq_num
signers = self._usernames
signatures = self._signatures
sig_lengths = self._sig_lengths
channels = []
for channel_addr in node_ips:
channels.append(grpc.insecure_channel(channel_addr))
# Store futures in a list
# Futures hold the result of asynchronous calls to each gRPC server
futures = []
for channel in channels:
stub = remote_pb2_grpc.RemoteStub(channel)
# Asynchronous calls to start job on each node
if self._func == rabit_remote_api.RabitInit:
response_future = stub.rpc_RabitInit.future(remote_pb2.RabitParams(
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == rabit_remote_api.RabitFinalize:
response_future = stub.rpc_RabitFinalize.future(remote_pb2.RabitParams(
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGDMatrixCreateFromEncryptedFile:
dmatrix_attrs = self._params.attrs
response_future = stub.rpc_XGDMatrixCreateFromEncryptedFile.future(remote_pb2.DMatrixAttrsRequest(
attrs=dmatrix_attrs,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths,
))
elif self._func == remote_api.XGBoosterSetParam:
booster_param = self._params.booster_param
response_future = stub.rpc_XGBoosterSetParam.future(remote_pb2.BoosterParamRequest(
booster_param=booster_param,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGBoosterCreate:
attrs = self._params.attrs
response_future = stub.rpc_XGBoosterCreate.future(remote_pb2.BoosterAttrsRequest(
attrs=attrs,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGBoosterUpdateOneIter:
booster_update_params = self._params.booster_update_params
response_future = stub.rpc_XGBoosterUpdateOneIter.future(remote_pb2.BoosterUpdateParamsRequest(
booster_update_params = booster_update_params,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGBoosterSaveModel:
save_model_params = self._params.save_model_params
response_future = stub.rpc_XGBoosterSaveModel.future(remote_pb2.SaveModelParamsRequest(
save_model_params = save_model_params,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGBoosterLoadModel:
load_model_params = self._params.load_model_params
response_future = stub.rpc_XGBoosterLoadModel.future(remote_pb2.LoadModelParamsRequest(
load_model_params=load_model_params,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGBoosterDumpModelEx:
dump_model_params = self._params.dump_model_params
response_future = stub.rpc_XGBoosterDumpModelEx.future(remote_pb2.DumpModelParamsRequest(
dump_model_params=dump_model_params,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGBoosterDumpModelExWithFeatures:
dump_model_with_features_params = self._params.dump_model_with_features_params
response_future = stub.rpc_XGBoosterDumpModelExWithFeatures.future(remote_pb2.DumpModelWithFeaturesParamsRequest(
dump_model_with_features_params=dump_model_with_features_params,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGBoosterGetModelRaw:
model_raw_params = self._params.model_raw_params
response_future = stub.rpc_XGBoosterGetModelRaw.future(remote_pb2.ModelRawParamsRequest(
model_raw_params=model_raw_params,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGDMatrixNumRow:
name = self._params.name
response_future = stub.rpc_XGDMatrixNumRow.future(remote_pb2.NumRowRequest(
name=name,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGDMatrixNumCol:
name = self._params.name
response_future = stub.rpc_XGDMatrixNumCol.future(remote_pb2.NumColRequest(
name=name,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
elif self._func == remote_api.XGBoosterPredict:
predict_params = self._params.predict_params
response_future = stub.rpc_XGBoosterPredict.future(remote_pb2.PredictParamsRequest(
predict_params=predict_params,
seq_num=seq_num,
signers=signers,
signatures=signatures,
sig_lengths=sig_lengths
))
futures.append(response_future)
results = []
for future in futures:
results.append(future.result())
statuses = [result.status.status for result in results]
# Check for error
error = False
exception = None
if -1 in statuses:
exceptions = [result.status.exception for result in results]
i = statuses.index(-1)
exception = exceptions[i]
# Collect all signatures
master_signature = None
master_sig_len = None
if self._func != remote_api.XGBoosterPredict:
sig_protos = []
sig_lens = []
for result in results:
sig_protos.append(result.signature)
sig_lens.append(result.sig_len)
# If we return only one signature, return the signature from the master enclave
master_signature = sig_protos[0]
master_sig_len = sig_lens[0]
# Set return value
if self._func == rabit_remote_api.RabitInit:
if error:
self._ret = remote_pb2.Status(status=-1, exception=exception)
else:
# FIXME: add signatures
self._ret = remote_pb2.Status(status=0)
elif self._func == rabit_remote_api.RabitFinalize:
if error:
self._ret = remote_pb2.Status(status=-1, exception=exception)
else:
# FIXME: add signatures
self._ret = remote_pb2.Status(status=0)
elif self._func == remote_api.XGDMatrixCreateFromEncryptedFile:
if error:
self._ret = (None, None, None, remote_pb2.Status(status=-1, exception=exception))
else:
dmatrix_handles = [result.name for result in results]
if dmatrix_handles.count(dmatrix_handles[0]) == len(dmatrix_handles):
# Every enclave returned the same handle string
self._ret = (dmatrix_handles[0], master_signature, master_sig_len, remote_pb2.Status(status=0))
else:
self._ret = (None, None, None, remote_pb2.Status(status=-1, exception="ERROR: Inconsistent dmatrix handles returned by enclaves in XGDMatrixCreateFromEncryptedFile call"))
elif self._func == remote_api.XGBoosterSetParam:
if error:
self._ret = (None, None, remote_pb2.Status(status=-1, exception=exception))
else:
self._ret = (master_signature, master_sig_len, remote_pb2.Status(status=0))
elif self._func == remote_api.XGBoosterCreate:
if error:
self._ret = (None, None, None, remote_pb2.Status(status=-1, exception=exception))
else:
bst_handles = [result.name for result in results]
if bst_handles.count(bst_handles[0]) == len(bst_handles):
# Every enclave returned the same booster handle string
self._ret = (bst_handles[0], master_signature, master_sig_len, remote_pb2.Status(status=0))
else:
self._ret = (None, None, None, remote_pb2.Status(status=-1, exception="ERROR: Inconsistent booster handles returned by enclaves in XGBoosterCreate call"))
elif self._func == remote_api.XGBoosterUpdateOneIter:
if error:
self._ret = (None, None, remote_pb2.Status(status=-1, exception=exception))
else:
self._ret = (master_signature, master_sig_len, remote_pb2.Status(status=0))
elif self._func == remote_api.XGBoosterSaveModel:
if error:
self._ret = (None, None, remote_pb2.Status(status=-1, exception=exception))
else:
self._ret = (master_signature, master_sig_len, remote_pb2.Status(status=0))
elif self._func == remote_api.XGBoosterLoadModel:
if error:
self._ret = (None, None, remote_pb2.Status(status=-1, exception=exception))
else:
self._ret = (master_signature, master_sig_len, remote_pb2.Status(status=0))
elif self._func == remote_api.XGBoosterDumpModelEx:
if error:
self._ret = (None, None, None, None, remote_pb2.Status(status=-1, exception=exception))
else:
sarrs = [result.sarr for result in results]
lengths = [result.length for result in results]
if lengths.count(lengths[0]) == len(lengths):
# Every enclave returned the same length
# We cannot check if the dumps are the same because they are encrypted
self._ret = (lengths[0], sarrs[0], master_signature, master_sig_len, remote_pb2.Status(status=0))
else:
self._ret = (None, None, None, None, remote_pb2.Status(status=-1, exception="ERROR: Inconsistent results from enclaves in XGBoosterDumpModelEx call"))
elif self._func == remote_api.XGBoosterDumpModelExWithFeatures:
if error:
self._ret = (None, None, None, None, remote_pb2.Status(status=-1, exception=exceptions))
else:
sarrs = [result.sarr for result in results]
lengths = [result.length for result in results]
if lengths.count(lengths[0]) == len(lengths):
# Every enclave returned the same length
# We cannot check if the dumps are the same because they are encrypted
self._ret = (lengths[0], sarrs[0], master_signature, master_sig_len, remote_pb2.Status(status=0))
else:
self._ret = (None, None, None, None, remote_pb2.Status(status=-1, exception="ERROR: Inconsistent results from enclaves in XGBoosterDumpModelExWithFeatures call"))
elif self._func == remote_api.XGBoosterGetModelRaw:
if error:
self._ret = (None, None, None, None, remote_pb2.Status(status=-1, exception=exceptions))
else:
sarrs = [result.sarr for result in results]
lengths = [result.length for result in results]
if lengths.count(lengths[0]) == len(lengths):
# Every enclave returned the same length
# We cannot check if the dumps are the same because they are encrypted
self._ret = (lengths[0], sarrs[0], master_signature, master_sig_len, remote_pb2.Status(status=0))
else:
self._ret = (None, None, None, None, remote_pb2.Status(status=-1, exception="ERROR: Inconsistent results from enclaves in XGBoosterGetModelRaw call"))
elif self._func == remote_api.XGDMatrixNumRow:
if error:
self._ret = (None, None, None, remote_pb2.Status(status=-1, exception=exception))
else:
num_rows = [result.value for result in results]
if num_rows.count(num_rows[0]) == len(num_rows):
# Each enclave agrees on the number of rows in the DMatrix
self._ret = (num_rows[0], master_signature, master_sig_len, remote_pb2.Status(status=0))
else:
self._ret = (None, None, None, remote_pb2.Status(status=-1, exception="ERROR: Inconsistent numbers from enclaves in XGDMatrixNumRow call"))
elif self._func == remote_api.XGDMatrixNumCol:
if error:
self._ret = (None, None, None, remote_pb2.Status(status=-1, exception=exception))
else:
num_cols = [result.value for result in results]
if num_cols.count(num_cols[0]) == len(num_cols):
# Each enclave agrees on the number of columns in the DMatrix
self._ret = (num_cols[0], master_signature, master_sig_len, remote_pb2.Status(status=0))
else:
self._ret = (None, None, None, remote_pb2.Status(status=-1, exception="ERROR: Inconsistent numbers from enclaves in XGDMatrixNumCol call"))
elif self._func == remote_api.XGBoosterPredict:
if error:
self._ret = (None, None, None, None, remote_pb2.Status(status=-1, exception=exception))
else:
enc_preds_ret = []
num_preds_ret = []
sig_protos_ret = []
sig_lens_ret = []
for result in results:
# Collect encrypted predictions
enc_preds_ret.extend(result.predictions)
num_preds_ret.extend(result.num_preds)
# Collect signatures
sig_protos_ret.extend(result.signatures)
sig_lens_ret.extend(result.sig_lens)
if len(enc_preds_ret) == len(num_preds_ret):
self._ret = (enc_preds_ret, num_preds_ret, sig_protos_ret, sig_lens_ret, remote_pb2.Status(status=0))
else:
self._ret = (None, None, None, None, remote_pb2.Status(status=-1, exception="ERROR: Inconsistent results in XGBoosterPredict call"))
else:
raise NotImplementedError
def result(self, username):
self._retrieved.append(username)
| |
'''
Copyright or © or Copr.
This software is a computer program whose purpose is to generate random
test case from a template file describing the data model.
This software is governed by the CeCILL-B license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL-B
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL-B license and that you accept its terms.
'''
###############################################################################
# --- Xml module ------------------------------------------------------------ #
###############################################################################
import xml.etree.ElementTree as ET
import Miscellaneous as misc
from Node import Node
from Parameter import Boolean_Parameter, String_Parameter, Integer_Parameter, Real_Parameter
from Constraint import Constraint
def parse_xml(path):
try:
tree = ET.parse(path)
return tree.getroot()
except ET.ParseError:
misc.error("Xml::parse_xml() -> the template \"" + path + "\" does not respect xml format")
return None
def read_template(path):
root_xml = parse_xml(path)
if root_xml is None:
raise ValueError
if root_xml.tag != "root":
misc.error("Xml::read_xml() -> the template root tag must be \"root\"" + path + "\"")
raise ValueError
name = check_attribute(root_xml, "name", True)
nb_instances = Integer_Parameter(name + "_nb_instances", -1, 1, 1, "u", None, None, None, None, 1)
nb_instances.set_value_i(0, 1)
nb_instances.lock_i(0)
root_node = Node(name, 0, None, None, nb_instances)
read_node(root_xml, root_node)
return root_node
def read_node(node_xml, node, d=0):
for child in node_xml:
name = check_attribute(child, "name", True)
if child.tag == "parameter":
node.add_parameter(build_parameter(name, d, child))
elif child.tag == "constraint":
node.add_constraint(build_constraint(name, d, node, child))
elif child.tag == "node":
node.add_child(build_node(name, d+1, node, child))
read_node(child, node.get_child_n(name), d+1)
else:
misc.error("Xml::read_node() -> \"" + child.tag + "\" unknown xml tag")
raise NameError
def build_node(n, d, p, node_xml):
minimum = check_attribute(node_xml, "min")
maximum = check_attribute(node_xml, "max")
nb = check_attribute(node_xml, "nb_instances")
if nb and check_nb_instances(nb):
nb = int(nb)
if minimum is not None or maximum is not None:
misc.error("Xml::build_node() -> \"" + n + "\" min and max should not be specified along with nb_instances attribute")
raise ValueError
node_xml.attrib["min"] = nb
node_xml.attrib["max"] = nb
nb_instances = build_integer_parameter(n + "_nb_instances", d-1, node_xml, 1)
nb_instances.set_value_i(0, nb)
nb_instances.lock_i(0)
elif minimum is not None or maximum is not None:
if minimum is None and maximum is not None:
misc.error("Xml::build_node() -> \"" + n + "\" missing min attribute")
raise ValueError
elif maximum is None and minimum is not None:
misc.error("Xml::build_node() -> \"" + n + "\" missing max attribute")
raise ValueError
nb_instances = build_integer_parameter(n + "_nb_instances", d-1, node_xml, 1)
if nb_instances.m < 0:
misc.error("Xml::build_node() -> \"" + n + "\" min and max attributes must be positive integers")
raise ValueError
else: #not nb_instances and not minimum and not maximum
node_xml.attrib["min"] = "1"
node_xml.attrib["max"] = "1"
nb_instances = build_integer_parameter(n + "_nb_instances", d-1, node_xml, 1)
nb_instances.set_value_i(0, 1)
nb_instances.lock_i(0)
return Node(n, d, p, None, nb_instances)
def build_parameter(n, d, node_xml):
parameter_type = check_attribute(node_xml, "type", True)
nbi = check_attribute(node_xml, "nb_instances")
to_lock = False
if nbi and check_nb_instances(nbi):
nbi = int(nbi)
to_lock = True
else:
nbi = 1
if parameter_type == "boolean":
p = build_boolean_parameter(n, d, node_xml, nbi)
elif parameter_type == "string":
p = build_string_parameter(n, d, node_xml, nbi)
elif parameter_type == "integer":
p = build_integer_parameter(n, d, node_xml, nbi)
elif parameter_type == "real":
p = build_real_parameter(n, d, node_xml, nbi)
else:
misc.error("Xml::build_parameter() -> \"" + parameter_type + "\" unknown parameter type")
raise NameError
if to_lock:
p.lock_nb_instances()
return p
def build_categorical_parameter(node_xml):
values = []
tmp = check_attribute(node_xml, "values", False)
if tmp:
tmp = tmp.split(";")
for v in tmp:
values.append(misc.remove_starting_and_ending_space(v))
else:
values = [True, False]
return values, build_weights(check_attribute(node_xml, "weights", False))
def build_weights(str_weights):
weights = []
if str_weights:
str_weights = str_weights.split(";")
for w in str_weights:
w = misc.remove_starting_and_ending_space(w)
if misc.check_integer(w, True):
w = int(w)
if w >= 0:
weights.append(int(w))
else:
misc.error("Xml::build_weights() -> weight must be positive or null")
if sum(weights) == 0:
misc.error("Xml::build_weights() -> at least one weight must be positive")
raise ValueError
return weights
def build_boolean_parameter(n, d, node_xml, nbi):
values, weights = build_categorical_parameter(node_xml)
if len(values) != 2:
misc.error("Xml::build_boolean_parameter() -> wrong boolean parameter values")
raise ValueError
for i in range(2):
if values[i] in [True, "True", "true", 1]:
values[i] = True
elif values[i] in [False, "False", "false", "0"]:
values[i] = False
else:
misc.error("Xml::build_boolean_parameter() -> wrong boolean parameter values")
raise ValueError
return Boolean_Parameter(n, d, values, weights, nbi)
def build_string_parameter(n, d, node_xml, nbi):
values, weights = build_categorical_parameter(node_xml)
return String_Parameter(n, d, values, weights, nbi)
def build_numerical_parameter(node_xml):
minimum = check_attribute(node_xml, "min", True)
maximum = check_attribute(node_xml, "max", True)
distribution = check_attribute(node_xml, "distribution")
mean = check_attribute(node_xml, "mean", False)
variance = check_attribute(node_xml, "variance", False)
ranges = []
if not distribution:
distribution = "u"
if mean and misc.check_number(mean, True):
mean = float(mean)
else:
mean = None
if variance and misc.check_number(variance, True):
variance = float(variance)
else:
variance = None
if ranges:
pass
tmp = check_attribute(node_xml, "ranges", False)
if tmp:
tmp = tmp.split(";")
for r in tmp:
r = misc.remove_starting_and_ending_space(r)
r = r[1:-1].split(",")
if len(r) != 2:
misc.error("Xml::build_numerical_parameter() -> invalid ranges")
raise ValueError
for i in range(2):
r[i] = misc.remove_starting_and_ending_space(r[i])
ranges.append((r[0], r[1]))
return minimum, maximum, distribution, mean, variance, ranges, build_weights(check_attribute(node_xml, "weights", False))
def build_integer_parameter(n, d, node_xml, nbi):
minimum, maximum, distribution, mean, variance, str_ranges, weights = build_numerical_parameter(node_xml)
misc.check_integer(minimum, True)
minimum = int(minimum)
misc.check_integer(maximum, True)
maximum = int(maximum)
ranges = []
for r in str_ranges:
if misc.check_integer(r[0], True) and misc.check_integer(r[1], True):
ranges.append((int(r[0]), int(r[1])))
return Integer_Parameter(n, d, minimum, maximum, distribution, mean, variance, ranges, weights, nbi)
def build_real_parameter(n, d, node_xml, nbi):
minimum, maximum, distribution, mean, variance, str_ranges, weights = build_numerical_parameter(node_xml)
misc.check_number(minimum, True)
minimum = float(minimum)
misc.check_number(maximum, True)
maximum = float(maximum)
ranges = []
for r in str_ranges:
if misc.check_number(r[0], True) and misc.check_number(r[1], True):
ranges.append((float(r[0]), float(r[1])))
return Real_Parameter(n, d, minimum, maximum, distribution, mean, variance, ranges, weights, nbi)
def build_constraint(n, d, node, node_xml):
expressions = []
raw_expressions = check_attribute(node_xml, "expressions", True)
raw_expressions = raw_expressions.split(";")
for e in raw_expressions:
expressions.append(misc.remove_starting_and_ending_space(e))
types = []
raw_constraint_types = check_attribute(node_xml, "types", False)
if raw_constraint_types is not None:
raw_constraint_types = raw_constraint_types.split(";")
for c in raw_constraint_types:
c = misc.remove_starting_and_ending_space(c)
if c in ["forall", "exist", "unique"]:
types.append(c)
else:
misc.error("Xml::__build_constraint() -> unknown constraint type \"" + c + "\"")
raise NameError
quantifiers = []
raw_quantifiers = check_attribute(node_xml, "quantifiers", False)
if raw_quantifiers is not None:
raw_quantifiers = raw_quantifiers.split(";")
for l in raw_quantifiers:
l = misc.remove_starting_and_ending_space(l)
if misc.check_letter(l, True):
quantifiers.append(l)
ranges = []
raw_ranges = check_attribute(node_xml, "ranges", False)
if raw_ranges is not None:
raw_ranges = raw_ranges.split(";")
for r in raw_ranges:
r = misc.remove_starting_and_ending_space(r)
if r == "all":
ranges.append(r)
elif r[0] is "[" and r[-1] is "]":
boundaries = r[1:-1].split(",")
if len(boundaries) != 2:
misc.error("Xml::build_constraint() -> wrong ranges syntax")
raise ValueError
ranges.append((misc.remove_starting_and_ending_space(boundaries[0]), misc.remove_starting_and_ending_space(boundaries[1])))
else:
misc.error("Xml::build_constraint() -> wrong ranges syntax")
raise ValueError
if len(quantifiers) != len(ranges) or len(quantifiers) != len(types):
misc.error("Xml::build_constraint() -> the number of quantifiers must equal the number of ranges and types")
raise ValueError
return Constraint(n, d, node, expressions, types, quantifiers, ranges)
def check_nb_instances(nb):
misc.check_integer(nb, True)
if int(nb) >= 0:
return True
else:
misc.error("Xml::check_nb_instances() -> nb_instances must be a positive integer value")
raise ValueError
def check_attribute(node_xml, att, err=False):
if att in node_xml.attrib:
return node_xml.attrib[att]
else:
if err:
misc.error("Xml::check_attribute() -> \"" + att + "\" attribute is missing")
raise NameError
else:
return None
def write_test_case(root_node, seed, path):
with open(path, "w") as f:
f.write("<?xml version=\"1.0\"?>\n\n")
f.write(write_root_node(root_node, seed))
def write_root_node(root_node, seed):
s = "<root name=\"" + root_node.name + "\">\n"
s +="\t<seed value =\"" + seed + "\"/>\n"
current_container = root_node.get_container_i(0)
s += write_data(current_container)
s += "</root>"
return s
def write_node(node, tab):
s = ""
for i in range(node.nb_instances):
s += tab + "<node name=\"" + node.name + "\" instance=\"" + str(i) + "/" + str(node.nb_instances - 1) + "\">\n"
current_container = node.get_container_i(i)
s += write_data(current_container, tab)
s += tab + "</node>\n"
return s
def write_data(current_container, tab=""):
s = ""
for p in current_container.parameters:
tmp_param = current_container.get_parameter_n(p)
values = ""
for i in range(tmp_param.nb_instances):
values += str(tmp_param.values[i]) + ";"
values = values[:-1]
s += tab + "\t<parameter name=\"" + p + "\" values=\"" + values + "\"/>\n"
for c in current_container.children:
s += write_node(current_container.get_child_n(c), tab + "\t")
return s
def read_test_case(path, root_node):
root_xml = parse_xml(path)
seed = "r"
if root_node.name == root_xml.attrib["name"]:
if root_xml[0].tag == "seed":
if root_xml[0].attrib["value"]:
seed = root_xml[0].attrib["value"]
root_xml.remove(root_xml[0])
else:
misc.error("Xml::read_template() -> seed value is missing")
raise ValueError
else:
misc.error("Xml::read_genotype() -> node name does not match")
raise ValueError
set_element(root_xml, root_node)
return seed
def set_element(node_xml, node, i=0):
for child in node_xml:
name = check_attribute(child, "name", True)
if child.tag == "parameter":
set_parameter(name, child, node, i)
elif child.tag == "node":
set_node(name, child, node, i)
else:
misc.error("Xml::set_element() -> unknown xml tag\"" + child.tag + "\"")
raise NameError
def set_parameter(name, node_xml, node, i):
if name in node.parameters:
param = node.get_parameter_n(name, i)
values = check_attribute(node_xml, "values", True).split(";")
length = len(values)
param.change_nb_instances(length)
for i in range(length):
if not values[i] in ["r", ""]:
param.set_value_i(i, misc.remove_starting_and_ending_space(values[i]))
param.lock_i(i)
else:
misc.error("Xml::set_parameter() -> parameter name \"" + name + "\" does not match")
raise NameError
def set_node(name, node_xml, node, i):
if name in node.children:
elem = node.get_child_n(name, i)
raw_identifier = check_attribute(node_xml, "instance")
if raw_identifier is None:
raw_identifier = "0"
identifier = raw_identifier.split("/")[0]
if misc.check_integer(identifier, True):
identifier = int(identifier)
if "/" in raw_identifier:
max_identifier = raw_identifier.split("/")[1]
if misc.check_integer(max_identifier, True):
max_identifier = int(max_identifier)
if not elem.nb_instances_lock:
elem.change_nb_instances(max_identifier + 1)
elem.lock_nb_instances()
if elem.nb_instances is None or identifier + 1 > elem.nb_instances:
elem.change_nb_instances(identifier + | |
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j]]), 7: array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, -1.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, | |
from time import time
from random import randrange, seed
import numpy as np
#import pandas as pd
import cv2
#import sys
from sklearn.cluster import KMeans
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
#from random import randrange, seed
class Tracktor():
def __init__(self,
id="NO_ID",
colour=None,
block_size=51, offset=20,
min_area=100, max_area=5000,
scaling=1.0
):
try:
# Returns True if OpenCL is present
ocl = cv2.ocl.haveOpenCL()
# Prints whether OpenCL is present
print("OpenCL Supported?: ", end='')
print(ocl)
print()
# Enables use of OpenCL by OpenCV if present
if ocl == True:
print('Now enabling OpenCL support')
cv2.ocl.setUseOpenCL(True)
print("Has OpenCL been Enabled?: ", end='')
print(cv2.ocl.useOpenCL())
except cv2.error as e:
print('Error:')
# colours is a vector of BGR values which are used to identify individuals in the video
# id is spider id and is also used for individual identification
# number of elements in colours should be greater than n_inds (THIS IS NECESSARY FOR VISUALISATION ONLY)
# number of elements in id should be greater than n_inds (THIS IS NECESSARY TO GET INDIVIDUAL-SPECIFIC DATA)
#where each tracktor takes care of one individual, we do not need this.
#self.n_inds = n_inds
self.id = id
if colour is None:
seed(time())
colour = (randrange(0, 255, 1), randrange(0, 255, 1), randrange(0, 255, 1))
self.colour = colour
# this is the block_size and offset used for adaptive thresholding (block_size should always be odd)
# these values are critical for tracking performance
if block_size % 2 != 1:
self.block_size = block_size + 1
else:
self.block_size = block_size
self.offset = offset
# minimum area and maximum area occupied by the animal in number of pixels
# this parameter is used to get rid of other objects in view that might be hard to threshold out but are differently sized
# in this case, the range is wide because males vastly smaller than females
self.min_area = min_area
self.max_area = max_area
self.area = 0
self.clicked = (-1, -1)
# the scaling parameter can be used to speed up tracking if video resolution is too high (use value 0-1)
self.scaling = scaling
# kernel for erosion and dilation
# useful since thin spider limbs are sometimes detected as separate objects
self.kernel = np.ones((5, 5), np.uint8)
# mot determines whether the tracker is being used in noisy conditions to track a single object or for multi-object
# using this will enable k-means clustering to force n_inds number of animals
self.mot = False
#List of data for pandas dataframe
df = []
codec = 'DIVX' # try other codecs if the default doesn't work ('DIVX', 'avc1', 'XVID') note: this list is non-exhaustive
## Video writer class to output video with contour and centroid of tracked object(s)
# make sure the frame size matches size of array 'final'
fourcc = cv2.VideoWriter_fourcc(*codec)
#output_framesize = (int(cap.read()[1].shape[1]*scaling), int(cap.read()[1].shape[0]*scaling))
#out = cv2.VideoWriter(filename = output_vidpath, fourcc = fourcc, fps = 60.0, frameSize = output_framesize, isColor = True)
## Individual location(s) measured in the last and current step
self.meas_last = list(np.zeros((1, 2)))
self.meas_now = list(np.zeros((1, 2)))
#data frame?
self.df = []
def colour_to_thresh(self, frame):
"""
This function retrieves a video frame and preprocesses it for object tracking.
The code blurs image to reduce noise, converts it to greyscale and then returns a
thresholded version of the original image.
Parameters
----------
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
block_size: int(optional), default = 31
block_size determines the width of the kernel used for adaptive thresholding.
Note: block_size must be odd. If even integer is used, the programme will add
1 to the block_size to make it odd.
offset: int(optional), default = 25
constant subtracted from the mean value within the block
Returns
-------
thresh: ndarray, shape(n_rows, n_cols, 1)
binarised(0, 255) image
"""
blur = cv2.blur(frame, (5, 5))
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.block_size, self.offset)
return thresh
def detect_and_draw_contours(self, frame, thresh):
"""
This function detects contours, thresholds them based on area and draws them.
Parameters
----------
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
thresh: ndarray, shape(n_rows, n_cols, 1)
binarised(0, 255) image
meas_last: array_like, dtype=float
individual's location on previous frame
meas_now: array_like, dtype=float
individual's location on current frame
min_area: int
minimum area threhold used to detect the object of interest
max_area: int
maximum area threhold used to detect the object of interest
Returns
-------
final: ndarray, shape(n_rows, n_cols, 3)
final output image composed of the input frame with object contours
and centroids overlaid on it
contours: list
a list of all detected contours that pass the area based threhold criterion
meas_last: array_like, dtype=float
individual's location on previous frame
meas_now: array_like, dtype=float
individual's location on current frame
"""
# Detect contours and draw them based on specified area thresholds
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# img = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
# final = frame.copy()
i = 0
self.meas_last = self.meas_now.copy()
#del self.meas_now[:]
#assigning to empty doesn't crash, less efficient but a size of 2 wont make a difference
self.meas_now = []
while i < len(contours):
#if we clicked this frame
if self.clicked != (-1, -1):
#check if the position we clicked is inside of the contour
dist = cv2.pointPolygonTest(contours[i], self.clicked, False)
#if it is not (-1 if not, 1 if it is) we delete the contour
if dist == -1.0:
del contours[i]
continue
#if there exists a last position (x)
elif self.meas_last[0][0]:
#determine the distance from our last point to all contours
dist = cv2.pointPolygonTest(contours[i], (self.meas_last[0][0], self.meas_last[0][1]), True)
#delete all contours that exist outside max_area
max_radius = int(np.sqrt(self.max_area/np.pi))
if abs(dist) > max_radius:
del contours[i]
continue
area = cv2.contourArea(contours[i])
if area < self.min_area or area > self.max_area:
del contours[i]
else:
cv2.drawContours(frame, contours, i, (0, 0, 255), 1)
M = cv2.moments(contours[i])
if M['m00'] != 0:
contour_x = M['m10']/M['m00']
contour_y = M['m01']/M['m00']
else:
contour_x = 0
contour_y = 0
self.meas_now.append([contour_x, contour_y])
i += 1
self.clicked = (-1, -1)
return frame, contours
def apply_k_means(self, contours):
"""
This function applies the k-means clustering algorithm to separate merged
contours. The algorithm is applied when detected contours are fewer than
expected objects(number of animals) in the scene.
Parameters
----------
contours: list
a list of all detected contours that pass the area based threhold criterion
n_inds: int
total number of individuals being tracked
meas_now: array_like, dtype=float
individual's location on current frame
Returns
-------
contours: list
a list of all detected contours that pass the area based threhold criterion
meas_now: array_like, dtype=float
individual's location on current frame
"""
#del self.meas_now[:]
self.meas_now = []
# Clustering contours to separate individuals
myarray = np.vstack(contours)
print(myarray)
myarray = myarray.reshape(myarray.shape[0], myarray.shape[2])
kmeans = KMeans(n_clusters=1, random_state=0, n_init=50).fit(myarray)
l = len(kmeans.cluster_centers_)
for i in range(l):
x = int(tuple(kmeans.cluster_centers_[i])[0])
y = int(tuple(kmeans.cluster_centers_[i])[1])
self.meas_now.append([x, y])
return contours
def hungarian_algorithm(self):
"""
The hungarian algorithm is a combinatorial optimisation algorithm used
to solve assignment problems. Here, we use the algorithm to reduce noise
due to ripples and to maintain individual identity. This is accomplished
by minimising a cost function; in this case, euclidean distances between
points measured in previous and current step. The algorithm here is written
to be flexible as the number of contours detected between successive frames
changes. However, an error will be returned if zero contours are detected.
Parameters
----------
self.meas_last: array_like, dtype=float
individual's location on previous frame
meas_now: array_like, dtype=float
individual's location on current frame
Returns
-------
row_ind: array, dtype=int64
individual identites arranged according to input ``meas_last``
col_ind: array, dtype=int64
individual identities rearranged based on matching locations from
``meas_last`` to ``meas_now`` by minimising the cost function
"""
self.meas_last = np.array(self.meas_last)
self.meas_now = np.array(self.meas_now)
if self.meas_now.shape != self.meas_last.shape:
if self.meas_now.shape[0] < self.meas_last.shape[0]:
while self.meas_now.shape[0] != self.meas_last.shape[0]:
self.meas_last = np.delete(self.meas_last, self.meas_last.shape[0]-1, 0)
else:
result = np.zeros(self.meas_now.shape)
result[:self.meas_last.shape[0], :self.meas_last.shape[1]] = self.meas_last
self.meas_last = result
self.meas_last = list(self.meas_last)
self.meas_now = list(self.meas_now)
cost = cdist(self.meas_last, self.meas_now)
#reduce the length of cost if it gets too long... (takes a long time to process)
if len(cost) > | |
#!/bin/python2
from __future__ import print_function
from gensim.parsing.preprocessing import strip_non_alphanum, preprocess_string
from gensim.corpora.dictionary import Dictionary
from keras.models import load_model
import numpy as np
import os
import subprocess
try:
input = raw_input
except NameError:
pass
try:
model = load_model('SentimentAnalysis/model_nn.h5')
except IOError:
if 'model_nn.tar.gz' not in os.listdir('SentimentAnalysis'):
raise IOError("Could not find Sentiment Analysis model. Ensure model "\
"is present in: ./SentimentAnalysis")
else:
process = subprocess.Popen("cd SentimentAnalysis/; "\
"tar -zxf model_nn.tar.gz; cd ..",
shell=True, stdout=subprocess.PIPE)
process.wait()
model = load_model('SentimentAnalysis/model_nn.h5')
vocab = Dictionary.load('SentimentAnalysis/vocab_sentiment')
def predict(text):
preprocessed = [word[:-3] if word[-3:] == 'xxx' else word for word in
preprocess_string(text.lower().replace('not', 'notxxx'))]
txt_list = [(vocab.token2id[word] + 1) for word in preprocessed
if word in vocab.token2id.keys()]
txt_list = [txt_list]
max_tweet_len = 20
if len(txt_list[0]) < max_tweet_len:
for i in range(max_tweet_len - len(txt_list[0])):
txt_list[0].append(0)
elif len(txt_list[0]) > max_tweet_len:
while len(txt_list[-1]) > max_tweet_len:
txt_list.append(txt_list[-1][max_tweet_len:])
txt_list[-2] = txt_list[-2][:max_tweet_len]
prediction = 0
for txt in txt_list:
prediction += model.predict(np.array([txt]), batch_size=1)
prediction /= len(txt_list)
return prediction
finisher = 'It was really nice talking to you and I hope that now you'\
' feel better after talking to me.\nBest of luck for your future '\
'endeavours. Bye!'
def friends():
response = input('How are your friends meeting up with your expectations?'\
'\n')
if(predict(response) >=0.4):
response = input('Have you broken up with someone recently?\n')
if(predict(response)>=0.4):
print(name + ", don't feel sad. Take your time and heal properly,"\
" look at what's happened, learn from it, and find ways to "\
"build a new and healthy life.\nAll any of us wants is to "\
"be happy. For some, this requires the perfect person to "\
"be our other half, and for others, it means completing "\
"the equation yourself. Either way, to find the right "\
"person, you need to be the right person. And trust that "\
"in the long run, your efforts will lead to your own "\
"personal happy ending.")
print(finisher)
else:
print(name + ", don't worry. You may be at a point where similar "\
"people are not in your life right now. That happens in "\
"life from time to time.\nIt is better to be away from "\
"incompatible people, and those people are attracted to "\
"you when you pretend to be someone you aren't.\nBe as "\
"different as you truly are, get to know yourself at a "\
"deep level, esteem your individuality, interact with "\
"pepole honestly, and eventually the people who appreciate "\
"you will notice and be drawn in.")
print(finisher)
else:
print("Many people tend to expect too much of others, their family, "\
"their friends or even just acquaintances. It's a usual mistake"\
", people don't think exactly the way you do.\nDon't let the "\
"opinions of others make you forget what you deserve. You are "\
"not in this world to live up to the expectations of others, "\
"nor should you feel that others are here to live up to yours."\
"\nThe first step you should take if you want to learn how to "\
"stop expecting too much from people is to simply realize and "\
"accept the fact that nobody is perfect and that everyone "\
"makes mistakes every now and then.")
print(finisher)
def family():
print(name + ", don't take too much stress. All you need to do is adjust "\
"your priorities. Don't take on unnecessary duties and "\
"responsibilities.\nTake advice from people whose opinion you "\
"trust, and get specific advice when issues arise.\nYou should "\
"use stress management techniques and always hope for the best. "\
"These situations arise in everyone's life and what matters the "\
"most is taking the right decision at such moments.")
print(finisher)
def work():
print(name + ", don't take too much stress. I can list some really cool "\
"ways to handle it.\nYou should develop healthy responses which "\
"include doing regular exercise and taking good quality sleep. "\
"You should have clear boundaries between your work or academic "\
"life and home life so you make sure that you don't mix them.\n"\
"Tecniques such as meditation and deep breathing exercises can be "\
"really helping in relieving stress.\n Always take time to "\
"recharge so as to avoid the negative effects of chronic stress "\
"and burnout. We need time to replenish and return to our pre-"\
"stress level of functioning.")
print(finisher)
def sad1():
response = input('I understand. Seems like something\'s bothering you. '\
'Could you further describe it, in short?\n')
if(predict(response)>=0.4):
response = input('It seems like though the issue might be a little '\
'worrisome, it might not actually be very serious. '\
'What are your thoughts on this?\n')
if(predict(response)>=0.5):
response = input('Looks like you agree with me. Wanna sign off?\n')
if(predict(response)>0.55):
print("That's okay. It was nice talking to you. You can chat "\
"with me anytime you want.\nBye " + name + "!")
else:
sad3()
else:
sad3()
else:
sad2()
def sad2():
response = input('Please feel free to share your feelings ' + name +\
', think of me as your friend.\n')
if(predict(response)>=0.3):
response = input('I see. Among the thoughts occuring in your mind, '\
'which one upsets you the most?\n')
response = input('Why do you think it upsets you?\n')
print("Okay. You just identified what we call an automatic thought. "\
"Everyone has them. They are thoughts that immediately pop to "\
"mind without any effort on your part.\nMost of the time the "\
"thought occurs so quickly you don't notice it, but it has an "\
"impact on your emotions. It's usually the emotion that you "\
"notice, rather than the thought.\nOften these automatic "\
"thoughts are distorted in some way but we usually don't stop "\
"to question the validity of the thought. But today, that's "\
"what we are going to do.")
response = input('So, ' + name + ', are there signs that contrary '\
'could be true?\n')
if(predict(response)>=0.4):
print("I'm glad that you realised that the opposite could be "\
"true. The reason these are called 'false beliefs' is "\
"because they are extreme ways of perceiving the world. "\
"They are black or white and ignore the shades of grey in "\
"between.\nNow that you have learned about this cool "\
"technique, you can apply it on most of the problems that "\
"you will face. If you still feel stuck at any point, you "\
"can always chat with me.\nBest of luck for your future "\
"endeavours. Bye!")
else:
sad4()
else:
sad4()
def sad3():
response = input('Feel comfortable. Could you briefly explain about your '\
'day?\n')
response = input('What are the activities that make up your most of the '\
'day?\n')
response = input('It looks like you might be feeling comfortable talking '\
'about yourself. Could you share your feelings?\n')
if(predict(response)>=0.3):
sad2()
else:
sad4()
def sad4():
print("My sympathies. Looks like it might be a point of concern. Don't "\
"worry, that's what I'm here for!")
response_friends = input('How are things going on with your friends?\n')
response_family = input('How is your relationship with your parents?\n')
response_worklife = input('How is your work or academic life going on?\n')
if(predict(response_friends)<=0.3):
friends()
else:
if(predict(response_family)<=0.3):
family()
else:
work()
print('\n\nHello! Thanks for coming here. I am a chatbot. People say that '
'I am a kind and approachable bot.')
name = input('Please tell me your name.\n')
try:
preprocessed = [word for word in preprocess_string(name) if word not in (
'people', 'call', 'friend')][0]
name = [word for word in strip_non_alphanum(name.lower()).split(
) if preprocessed in word][0]
except:
name = name.split()[0]
name = name[0].upper() + name[1:]
print("Hi " + name + "! My name's Brad. Let's start with our session.")
response = input("How are you doing?\n")
if (predict(response) >= 0.55):
response = input('That is good. Are you usually this happy, or are there '\
'some worries that you want to talk about?\n')
if (predict(response)>=0.7):
response = input('You seem to be really content. Wanna sign off?\n')
if(predict(response)>=0.7):
print('Ok, bye ' + name + '!')
else:
response = input('Is there something bothering you? Would you '\
'share it with | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to update prebuilt versions of the projects."""
import argparse
import glob
import io
import json
import logging
import os
import platform
import re
import subprocess
import sys
from l2tdevtools import presets
from l2tdevtools import projects
from l2tdevtools import versions
from l2tdevtools.download_helpers import interface
if platform.system() == 'Windows':
import wmi # pylint: disable=import-error
class PackageDownload(object):
"""Information about a package download.
Attributes:
filename (str): name of the package file.
name (str): name of the package.
url (str): download URL of the package file.
version (str): version of the package.
"""
def __init__(self, name, version, filename, url):
"""Initializes a package download.
Args:
name (str): name of the package.
version (str): version of the package.
filename (str): name of the package file.
url (str): download URL of the package file.
"""
super(PackageDownload, self).__init__()
self.filename = filename
self.name = name
self.url = url
self.version = version
class GithubRepoDownloadHelper(interface.DownloadHelper):
"""Helps in downloading from a GitHub repository."""
_GITHUB_REPO_API_URL = (
'https://api.github.com/repos/log2timeline/l2tbinaries')
_GITHUB_REPO_URL = (
'https://github.com/log2timeline/l2tbinaries')
_SUPPORTED_PYTHON_VERSIONS = frozenset([(3, 10)])
def __init__(self, download_url, branch='main'):
"""Initializes a download helper.
Args:
download_url (str): download URL.
branch (Optional[str]): git branch to download from.
"""
super(GithubRepoDownloadHelper, self).__init__(download_url)
self._branch = branch
def _GetMachineTypeSubDirectory(
self, preferred_machine_type=None, preferred_operating_system=None):
"""Retrieves the machine type sub directory.
Args:
preferred_machine_type (Optional[str]): preferred machine type, where
None, which will auto-detect the current machine type.
preferred_operating_system (Optional[str]): preferred operating system,
where None, which will auto-detect the current operating system.
Returns:
str: machine type sub directory or None.
"""
if preferred_operating_system:
operating_system = preferred_operating_system
else:
operating_system = platform.system()
if preferred_machine_type:
cpu_architecture = preferred_machine_type
else:
cpu_architecture = platform.machine().lower()
sub_directory = None
if operating_system != 'Windows':
logging.error('Operating system: {0:s} not supported.'.format(
operating_system))
return None
if (sys.version_info[0], sys.version_info[1]) not in (
self._SUPPORTED_PYTHON_VERSIONS):
logging.error('Python version: {0:d}.{1:d} not supported.'.format(
sys.version_info[0], sys.version_info[1]))
return None
if cpu_architecture == 'x86':
sub_directory = 'win32'
elif cpu_architecture == 'amd64':
sub_directory = 'win64'
if not sub_directory:
logging.error('CPU architecture: {0:s} not supported.'.format(
cpu_architecture))
return None
return sub_directory
def _GetDownloadURL(
self, preferred_machine_type=None, preferred_operating_system=None,
use_api=False):
"""Retrieves the download URL.
Args:
preferred_machine_type (Optional[str]): preferred machine type, where
None, which will auto-detect the current machine type.
preferred_operating_system (Optional[str]): preferred operating system,
where None, which will auto-detect the current operating system.
use_api (Optional[bool]): True if the GitHub API should be used to
determine the download URL.
Returns:
str: download URL or None.
"""
sub_directory = self._GetMachineTypeSubDirectory(
preferred_machine_type=preferred_machine_type,
preferred_operating_system=preferred_operating_system)
if not sub_directory:
return None
if use_api:
# TODO: add support for branch.
download_url = '{0:s}/contents/{1:s}'.format(
self._GITHUB_REPO_API_URL, sub_directory)
else:
download_url = '{0:s}/tree/{1:s}/{2:s}'.format(
self._GITHUB_REPO_URL, self._branch, sub_directory)
return download_url
def GetPackageDownloadURLs(
self, preferred_machine_type=None, preferred_operating_system=None,
use_api=False):
"""Retrieves the package download URLs for a given system configuration.
Args:
preferred_machine_type (Optional[str]): preferred machine type, where
None, which will auto-detect the current machine type.
preferred_operating_system (Optional[str]): preferred operating system,
where None, which will auto-detect the current operating system.
use_api (Optional[bool]): True if the GitHub API should be used to
determine the download URL.
Returns:
list[str]: list of package download URLs or None if no package download
URLs could be determined.
"""
download_url = self._GetDownloadURL(
preferred_machine_type=preferred_machine_type,
preferred_operating_system=preferred_operating_system, use_api=use_api)
if not download_url:
logging.info('Missing download URL.')
return None
page_content = self.DownloadPageContent(download_url)
if not page_content:
return None
# TODO: skip SHA256SUMS
download_urls = []
if use_api:
# The page content consist of JSON data that contains a list of dicts.
# Each dict consists of:
# {
# "name":"PyYAML-3.11.win-amd64-py2.7.msi",
# "path":"win64/PyYAML-3.11.win-amd64-py2.7.msi",
# "sha":"8fca8c1e2549cf54bf993c55930365d01658f418",
# "size":196608,
# "url":"https://api.github.com/...",
# "html_url":"https://github.com/...",
# "git_url":"https://api.github.com/...",
# "download_url":"https://raw.githubusercontent.com/...",
# "type":"file",
# "_links":{
# "self":"https://api.github.com/...",
# "git":"https://api.github.com/...",
# "html":"https://github.com/..."
# }
# }
for directory_entry in json.loads(page_content):
download_url = directory_entry.get('download_url', None)
if download_url:
download_urls.append(download_url)
else:
sub_directory = self._GetMachineTypeSubDirectory(
preferred_machine_type=preferred_machine_type,
preferred_operating_system=preferred_operating_system)
if not sub_directory:
return None
# The format of the download URL is:
# <a class="js-navigation-open link-gray-dark" title="{title}"
# href="{path}">{name}</a>
# Note that:
# * class="js-navigation-open" and class="js-navigation-open " also have
# been seen to be used.
# * an additional data-pjax="{data}" parameter.
expression_string = (
'<a class="js-navigation-open[^"]*" title="[^"]*" '
'(|data-pjax="[^"]*" )href="([^"]*)"')
matches = re.findall(expression_string, page_content)
for _, match in matches:
_, _, filename = match.rpartition('/')
download_url = (
'https://github.com/log2timeline/l2tbinaries/raw/{0:s}/{1:s}/'
'{2:s}').format(self._branch, sub_directory, filename)
download_urls.append(download_url)
return download_urls
class DependencyUpdater(object):
"""Helps in updating dependencies.
Attributes:
operating_system (str): the operating system on which to update
dependencies and remove previous versions.
"""
_DOWNLOAD_URL = 'https://github.com/log2timeline/l2tbinaries/releases'
_GIT_BRANCH_PER_TRACK = {
'dev': 'dev',
'stable': 'main',
'staging': 'staging',
'testing': 'testing'}
_PKG_NAME_PREFIXES = [
'com.github.dateutil.',
'com.github.dfvfs.',
'com.github.erocarrer.',
'com.github.ForensicArtifacts.',
'com.github.kennethreitz.',
'com.github.google.',
'org.github.ipython.',
'com.github.libyal.',
'com.github.log2timeline.',
'com.github.sleuthkit.',
'com.google.code.p.',
'org.samba.',
'org.pypi.',
'org.python.pypi.',
'net.sourceforge.projects.']
# Some projects have different names than their module names.
_ALTERNATE_NAMES = {
'lz4': 'python-lz4',
'redis': 'redis-py'}
def __init__(
self, download_directory='build', download_only=False,
download_track='stable', exclude_packages=False, force_install=False,
msi_targetdir=None, preferred_machine_type=None,
preferred_operating_system=None, verbose_output=False):
"""Initializes the dependency updater.
Args:
download_directory (Optional[str]): path of the download directory.
download_only (Optional[bool]): True if the dependency packages should
only be downloaded.
download_track (Optional[str]): track to download from.
exclude_packages (Optional[bool]): True if packages should be excluded
instead of included.
force_install (Optional[bool]): True if the installation (update) should
be forced.
msi_targetdir (Optional[str]): MSI TARGETDIR property.
preferred_machine_type (Optional[str]): preferred machine type, where
None, which will auto-detect the current machine type.
preferred_operating_system (Optional[str]): preferred operating system,
where None, which will auto-detect the current operating system.
verbose_output (Optional[bool]): True more verbose output should be
provided.
"""
branch = self._GIT_BRANCH_PER_TRACK.get(download_track, 'main')
super(DependencyUpdater, self).__init__()
self._download_directory = download_directory
self._download_helper = GithubRepoDownloadHelper(
self._DOWNLOAD_URL, branch=branch)
self._download_only = download_only
self._download_track = download_track
self._exclude_packages = exclude_packages
self._force_install = force_install
self._msi_targetdir = msi_targetdir
self._verbose_output = verbose_output
if preferred_operating_system:
self.operating_system = preferred_operating_system
else:
self.operating_system = platform.system()
if preferred_machine_type:
self._preferred_machine_type = preferred_machine_type.lower()
else:
self._preferred_machine_type = None
def _GetAvailablePackages(self):
"""Determines the packages available for download.
Returns:
list[PackageDownload]: packages available for download.
"""
python_version_indicator = '-py{0:d}.{1:d}'.format(
sys.version_info[0], sys.version_info[1])
# The API is rate limited, so we scrape the web page instead.
package_urls = self._download_helper.GetPackageDownloadURLs(
preferred_machine_type=self._preferred_machine_type,
preferred_operating_system=self.operating_system)
if not package_urls:
logging.error('Unable to determine package download URLs.')
return []
# Use a dictionary so we can more efficiently set a newer version of
# a package that was set previously.
available_packages = {}
package_versions = {}
for package_url in package_urls:
_, _, package_filename = package_url.rpartition('/')
package_filename = package_filename.lower()
if not package_filename.endswith('.msi'):
# Ignore all other file extensions.
continue
# Strip off the trailing part starting with '.win'.
package_name, _, package_version = package_filename.partition('.win')
if ('-py' in package_version and
python_version_indicator not in package_version):
# Ignore packages that are for different versions of Python.
continue
if package_name.startswith('pefile-1.'):
# We need to use the most left '-' character as the separator of the
# name and the version, since version can contain the '-' character.
name, _, version = package_name.partition('-')
else:
# We need to use the most right '-' character as the separator of the
# name and the version, since name can contain the '-' character.
name, _, version = package_name.rpartition('-')
version = version.split('.')
if package_name.startswith('pefile-1.'):
last_part = version.pop()
version.extend(last_part.split('-'))
if name not in package_versions:
compare_result = 1
else:
compare_result = versions.CompareVersions(
version, package_versions[name])
if compare_result > 0:
package_versions[name] = version
package_download = PackageDownload(
name, version, package_filename, package_url)
available_packages[name] = package_download
return available_packages.values()
def _GetPackageFilenamesAndVersions(
self, project_definitions, available_packages,
user_defined_package_names):
"""Determines the package filenames and versions.
Args:
project_definitions (dist[str, ProjectDefinition]): project definitions
per name.
available_packages (list[PackageDownload]): packages available for
download.
user_defined_package_names (list[str]): names of packages that should be
updated if an update is available. These package names are derived
from the user specified names of projects. An empty list represents
all available packages.
Returns:
tuple: containing:
dict[str, str]: filenames per package.
dict[str, str]: versions per package.
"""
project_definition_per_package_name = {}
for project_name, project_definition in project_definitions.items():
package_name = getattr(
project_definition, 'msi_name', None) or project_name
package_name = package_name.lower()
project_definition_per_package_name[package_name] = project_definition
package_filenames = {}
package_versions = {}
for package_download in available_packages:
package_name = package_download.name
package_filename = package_download.filename
package_download_path = os.path.join(
self._download_directory, package_filename)
# Ignore package names if user defined.
if user_defined_package_names:
in_package_names = package_name in user_defined_package_names
alternate_name = self._ALTERNATE_NAMES.get(package_name, None)
if alternate_name:
if ((self._exclude_packages and in_package_names) or
(not self._exclude_packages and not in_package_names)):
in_package_names = alternate_name in user_defined_package_names
if ((self._exclude_packages and in_package_names) or
(not self._exclude_packages and not in_package_names)):
logging.info('Skipping: {0:s} because it was | |
def part1(instructions):
regs = {}
lines = instructions.split("\n")
for line in lines:
line = line.replace('is', '_is')
row = line.split()
reg_key = row[0]
test_key = row[4]
direction = row[1]
inc = int(row[2])
test = ' '.join(row[4:])
try:
regs[reg_key]
except KeyError:
regs[reg_key] = 0
try:
regs[test_key]
except KeyError:
regs[test_key] = 0
test_result = eval(test, None, regs)
if test_result:
if direction == "inc":
regs[reg_key] += inc
else:
regs[reg_key] -= inc
vals = regs.values()
return max(vals)
def part2(instructions):
regs = {}
max_val = 0
lines = instructions.split("\n")
for line in lines:
line = line.replace('is', '_is')
row = line.split()
reg_key = row[0]
test_key = row[4]
direction = row[1]
inc = int(row[2])
test = ' '.join(row[4:])
try:
regs[reg_key]
except KeyError:
regs[reg_key] = 0
try:
regs[test_key]
except KeyError:
regs[test_key] = 0
test_result = eval(test, None, regs)
if test_result:
if direction == "inc":
regs[reg_key] += inc
else:
regs[reg_key] -= inc
max_val = max(max(regs.values()), max_val)
return max_val
if __name__ == "__main__":
test1_input="""b inc 5 if a > 1
a inc 1 if b < 5
c dec -10 if a >= 1
c inc -20 if c == 10"""
# print('test1', modify_regs(test1_input))
print('test1', part1(test1_input))
real_input="""um inc -671 if lbf != 5
j inc 236 if umr > -6
fk inc -246 if j < 241
uy dec -404 if mmw <= 2
j inc 372 if gk >= -1
uy inc -380 if umr > -4
dy inc 257 if es > -9
es dec 769 if es < 4
t inc -429 if umr >= 0
hg dec 13 if dy < 267
is inc 66 if fk == -246
fk inc -30 if es > -775
ada inc 258 if umr > 3
eri inc -950 if lx > -4
umr dec -789 if x >= -4
um inc -783 if aao > -7
j inc -97 if ada != -1
es inc 406 if fk != -283
lx inc 43 if kg <= 7
f inc 464 if lx <= 44
kg inc 884 if t >= -435
mmw inc 836 if gk > -3
a dec 496 if um > -1447
eri dec -617 if uy == 24
j inc -858 if kg < 886
hg dec -854 if umr == 789
dy dec -246 if f >= 457
lbf inc 122 if a != 10
aao inc -408 if uy > 19
f dec 908 if uy != 18
t dec -775 if j >= -351
t inc -594 if yk <= 3
es inc 28 if gk == 0
es inc -306 if kg >= 894
mmw dec 154 if kg <= 885
dy inc 832 if aao <= -402
lx inc -426 if a >= -8
umr dec -792 if eri > -341
a inc -609 if gk <= -6
j dec -970 if lx > -393
uy dec -241 if yk > 0
yk inc 411 if is > 61
ada dec -253 if is == 66
is dec -486 if aao > -413
yk dec 561 if a == 0
dy inc 976 if um == -1454
dy inc 885 if eri < -331
hg inc -5 if gk <= -9
t dec 717 if f <= -443
mmw inc -293 if lx <= -379
t inc 77 if lx != -383
uy dec -89 if ada <= 258
fk inc -381 if fk < -272
eri dec 711 if mmw < 398
is dec -273 if gk != -3
umr dec 384 if aao != -414
is dec -36 if is != 825
ada dec 422 if es < -326
fk inc 207 if mmw < 389
uy dec -357 if lx == -383
es inc 829 if dy <= 3199
aao dec -173 if gk == 0
x dec 274 if is >= 824
t dec -400 if is <= 833
fk dec -677 if f == -444
x inc -494 if j == 623
t dec -406 if f < -443
gk dec 704 if gk == 0
x inc -637 if x < -758
x dec 194 if gk != -710
um inc 956 if fk > 26
ada inc -527 if aao > -239
j inc -774 if f <= -436
es inc -121 if ada > -689
hg dec -461 if gk < -698
t inc 780 if is < 828
yk inc -858 if es >= 504
dy inc 145 if j > -159
is inc 929 if f != -453
mmw inc 702 if fk > 17
lbf dec 123 if aao >= -240
hg dec -543 if a == 0
kg dec -610 if es > 488
hg inc -726 if hg < 1854
kg dec -410 if j <= -146
dy inc -469 if gk <= -712
a inc 252 if aao > -237
dy dec 168 if uy <= 478
gk inc -530 if a > 244
gk dec -254 if uy == 479
es inc -960 if j > -152
umr dec 561 if hg != 1126
uy inc 420 if j < -148
mmw inc 976 if j < -142
umr dec -852 if gk >= -1238
aao dec 559 if eri >= -1042
gk inc -745 if j <= -142
t dec 183 if hg < 1112
t inc 725 if yk == -150
lbf dec -142 if kg < 1912
mmw dec -908 if is <= 1758
um inc 329 if f == -447
x dec 543 if ada > -704
gk inc 226 if eri <= -1041
es dec -176 if mmw == 2975
ada inc -156 if eri >= -1053
ada inc -523 if t <= 1347
aao inc -717 if x < -2145
gk inc -236 if t >= 1354
lx dec -266 if lx != -389
hg dec -324 if dy >= 3169
f dec 96 if x != -2143
yk inc -270 if um != -1450
aao inc -916 if lbf == 141
es inc -943 if f == -540
a dec 974 if lbf > 131
dy dec -35 if yk == -413
kg inc 112 if eri >= -1053
mmw inc -30 if gk <= -1754
um dec -288 if mmw < 2978
es dec 774 if uy == 900
t dec -60 if x > -2145
j inc 1 if x > -2148
dy inc 222 if es >= -1232
is dec -221 if x != -2142
hg dec -626 if uy > 887
is dec -114 if t <= 1408
j inc -564 if umr == 1488
a inc -29 if f != -540
umr dec 373 if fk != 17
a dec -788 if fk >= 14
ada inc 316 if aao >= -1153
x dec 970 if lbf != 141
x dec -209 if aao >= -1159
uy dec 279 if lx != -117
f dec 517 if hg >= 2065
j dec 566 if a <= 75
x dec -346 if a <= 59
t inc 576 if lx > -118
um dec 785 if es >= -1229
a dec -949 if mmw > 2974
j inc 915 if x <= -1929
hg inc 177 if is < 1873
is inc -678 if f >= -1052
umr dec 254 if lx > -123
x dec 754 if um < -1160
aao dec 977 if uy <= 894
yk inc -157 if aao <= -2124
um inc 631 if is >= 1867
lx inc -99 if ada <= -1058
fk dec -834 if x < -2695
is dec 105 if x != -2680
a inc 462 if umr <= 864
mmw inc 394 if lbf > 150
gk inc -370 if lbf >= 146
is inc 722 if yk > -586
dy inc -882 if gk != -1753
t inc 983 if gk == -1763
fk dec 874 if fk > 14
hg dec -949 if lbf < 142
es dec -592 if uy == 891
dy dec 823 if gk >= -1762
mmw dec 137 if gk >= -1751
hg dec 704 if dy >= 2343
t dec -921 if j != -372
x dec 95 if is >= 2485
dy inc -117 if kg >= 2017
gk dec 551 if um <= -545
ada inc -161 if umr >= 856
mmw inc 633 if umr != 862
fk dec 38 if lbf >= 140
kg dec -954 if x >= -2787
um inc 325 if hg > 2481
um dec -72 if j > -375
umr dec 910 if gk <= -1756
fk dec -628 if j != -356
mmw dec 267 if aao != -2123
eri inc -857 if um != -137
um inc -651 if eri != -1896
j dec -122 if a >= 1473
umr dec 177 if gk | |
Distance per class per epoch on reconstructed iSEG image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed iSEG image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed MRBrainS image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed MRBrainS image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed ABIDE image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed ABIDE image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed iSEG image",
every=1,
params={
"title": "Dice score per class per epoch on reconstructed iSEG image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed MRBrainS image", every=1,
params={
"title": "Dice score per class per epoch on reconstructed MRBrainS image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed ABIDE image",
every=1,
params={
"title": "Dice score per class per epoch on reconstructed ABIDE image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input T2 iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input T2 iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized T2 iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized T2 iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Initial Noise iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Initial Noise iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented iSEG After Normalization",
PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented iSEG After Normalization"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented Input iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented Input iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input T2 MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input T2 MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized T2 MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized T2 MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Initial Noise MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Initial Noise MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented MRBrainS After Normalization",
PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented MRBrainS After Normalization"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented Input MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented Input MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Conv1 FM", PlotType.IMAGES_PLOT,
params={"nrow": 8, "opts": {"store_history": True,
"title": "Conv1 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer1 FM", PlotType.IMAGES_PLOT,
params={"nrow": 8, "opts": {"store_history": True,
"title": "Layer1 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer2 FM", PlotType.IMAGES_PLOT,
params={"nrow": 12, "opts": {"store_history": True,
"title": "Layer2 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer3 FM", PlotType.IMAGES_PLOT,
params={"nrow": 16, "opts": {"store_history": True,
"title": "Layer3 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Per-Dataset Histograms", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Images Histograms", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True}}, every=5), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
Checkpoint(save_folder, monitor_fn=lambda model_trainer: model_trainer.valid_loss, delta=0.01,
mode=MonitorMode.MIN), Event.ON_EPOCH_END) \
.with_event_handler(PlotAvgGradientPerLayer(visdom_logger, every=25), Event.ON_TRAIN_BATCH_END)
return trainer
elif self._trainer == TrainerType.LSGAN_Multimodal:
trainer = LSGANMultimodalTrainer(training_config, model_trainers, dataloaders[0], dataloaders[1],
dataloaders[2],
reconstruction_datasets, normalized_reconstructor, input_reconstructor,
segmentation_reconstructor, augmented_input_reconstructor,
gt_reconstructor,
run_config, dataset_configs, save_folder) \
.with_event_handler(PrintTrainingStatus(every=25), Event.ON_BATCH_END) \
.with_event_handler(PrintMonitors(every=25), Event.ON_BATCH_END) \
.with_event_handler(PlotMonitors(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(PlotLR(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Segmented Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Test Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Segmentation Ground Truth Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Label Map Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Label Map Patches | |
{"1": {}, "2": {}, "3": {}, "4": {}},
"bstunGroupEntry": {"2": {}, "3": {}, "4": {}, "5": {}},
"bstunPortEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"bstunRouteEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cAal5VccEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cBootpHCCountDropNotServingSubnet": {},
"cBootpHCCountDropUnknownClients": {},
"cBootpHCCountInvalids": {},
"cBootpHCCountReplies": {},
"cBootpHCCountRequests": {},
"cCallHistoryEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cCallHistoryIecEntry": {"2": {}},
"cContextMappingEntry": {"2": {}, "3": {}, "4": {}, "5": {}, "6": {}},
"cContextMappingMIBObjects.2.1.1": {},
"cContextMappingMIBObjects.2.1.2": {},
"cContextMappingMIBObjects.2.1.3": {},
"cDhcpv4HCCountAcks": {},
"cDhcpv4HCCountDeclines": {},
"cDhcpv4HCCountDiscovers": {},
"cDhcpv4HCCountDropNotServingSubnet": {},
"cDhcpv4HCCountDropUnknownClient": {},
"cDhcpv4HCCountForcedRenews": {},
"cDhcpv4HCCountInforms": {},
"cDhcpv4HCCountInvalids": {},
"cDhcpv4HCCountNaks": {},
"cDhcpv4HCCountOffers": {},
"cDhcpv4HCCountReleases": {},
"cDhcpv4HCCountRequests": {},
"cDhcpv4ServerClientAllowedProtocol": {},
"cDhcpv4ServerClientClientId": {},
"cDhcpv4ServerClientDomainName": {},
"cDhcpv4ServerClientHostName": {},
"cDhcpv4ServerClientLeaseType": {},
"cDhcpv4ServerClientPhysicalAddress": {},
"cDhcpv4ServerClientRange": {},
"cDhcpv4ServerClientServedProtocol": {},
"cDhcpv4ServerClientSubnetMask": {},
"cDhcpv4ServerClientTimeRemaining": {},
"cDhcpv4ServerDefaultRouterAddress": {},
"cDhcpv4ServerIfLeaseLimit": {},
"cDhcpv4ServerRangeInUse": {},
"cDhcpv4ServerRangeOutstandingOffers": {},
"cDhcpv4ServerRangeSubnetMask": {},
"cDhcpv4ServerSharedNetFreeAddrHighThreshold": {},
"cDhcpv4ServerSharedNetFreeAddrLowThreshold": {},
"cDhcpv4ServerSharedNetFreeAddresses": {},
"cDhcpv4ServerSharedNetReservedAddresses": {},
"cDhcpv4ServerSharedNetTotalAddresses": {},
"cDhcpv4ServerSubnetEndAddress": {},
"cDhcpv4ServerSubnetFreeAddrHighThreshold": {},
"cDhcpv4ServerSubnetFreeAddrLowThreshold": {},
"cDhcpv4ServerSubnetFreeAddresses": {},
"cDhcpv4ServerSubnetMask": {},
"cDhcpv4ServerSubnetSharedNetworkName": {},
"cDhcpv4ServerSubnetStartAddress": {},
"cDhcpv4SrvSystemDescr": {},
"cDhcpv4SrvSystemObjectID": {},
"cEigrpAcksRcvd": {},
"cEigrpAcksSent": {},
"cEigrpAcksSuppressed": {},
"cEigrpActive": {},
"cEigrpAsRouterId": {},
"cEigrpAsRouterIdType": {},
"cEigrpAuthKeyChain": {},
"cEigrpAuthMode": {},
"cEigrpCRpkts": {},
"cEigrpDestSuccessors": {},
"cEigrpDistance": {},
"cEigrpFdistance": {},
"cEigrpHeadSerial": {},
"cEigrpHelloInterval": {},
"cEigrpHellosRcvd": {},
"cEigrpHellosSent": {},
"cEigrpHoldTime": {},
"cEigrpInputQDrops": {},
"cEigrpInputQHighMark": {},
"cEigrpLastSeq": {},
"cEigrpMFlowTimer": {},
"cEigrpMcastExcepts": {},
"cEigrpMeanSrtt": {},
"cEigrpNbrCount": {},
"cEigrpNextHopAddress": {},
"cEigrpNextHopAddressType": {},
"cEigrpNextHopInterface": {},
"cEigrpNextSerial": {},
"cEigrpOOSrvcd": {},
"cEigrpPacingReliable": {},
"cEigrpPacingUnreliable": {},
"cEigrpPeerAddr": {},
"cEigrpPeerAddrType": {},
"cEigrpPeerCount": {},
"cEigrpPeerIfIndex": {},
"cEigrpPendingRoutes": {},
"cEigrpPktsEnqueued": {},
"cEigrpQueriesRcvd": {},
"cEigrpQueriesSent": {},
"cEigrpRMcasts": {},
"cEigrpRUcasts": {},
"cEigrpRepliesRcvd": {},
"cEigrpRepliesSent": {},
"cEigrpReportDistance": {},
"cEigrpRetrans": {},
"cEigrpRetransSent": {},
"cEigrpRetries": {},
"cEigrpRouteOriginAddr": {},
"cEigrpRouteOriginAddrType": {},
"cEigrpRouteOriginType": {},
"cEigrpRto": {},
"cEigrpSiaQueriesRcvd": {},
"cEigrpSiaQueriesSent": {},
"cEigrpSrtt": {},
"cEigrpStuckInActive": {},
"cEigrpTopoEntry": {"17": {}, "18": {}, "19": {}},
"cEigrpTopoRoutes": {},
"cEigrpUMcasts": {},
"cEigrpUUcasts": {},
"cEigrpUpTime": {},
"cEigrpUpdatesRcvd": {},
"cEigrpUpdatesSent": {},
"cEigrpVersion": {},
"cEigrpVpnName": {},
"cEigrpXmitDummies": {},
"cEigrpXmitNextSerial": {},
"cEigrpXmitPendReplies": {},
"cEigrpXmitReliableQ": {},
"cEigrpXmitUnreliableQ": {},
"cEtherCfmEventCode": {},
"cEtherCfmEventDeleteRow": {},
"cEtherCfmEventDomainName": {},
"cEtherCfmEventLastChange": {},
"cEtherCfmEventLclIfCount": {},
"cEtherCfmEventLclMacAddress": {},
"cEtherCfmEventLclMepCount": {},
"cEtherCfmEventLclMepid": {},
"cEtherCfmEventRmtMacAddress": {},
"cEtherCfmEventRmtMepid": {},
"cEtherCfmEventRmtPortState": {},
"cEtherCfmEventRmtServiceId": {},
"cEtherCfmEventServiceId": {},
"cEtherCfmEventType": {},
"cEtherCfmMaxEventIndex": {},
"cHsrpExtIfEntry": {"1": {}, "2": {}},
"cHsrpExtIfTrackedEntry": {"2": {}, "3": {}},
"cHsrpExtSecAddrEntry": {"2": {}},
"cHsrpGlobalConfig": {"1": {}},
"cHsrpGrpEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cIgmpFilterApplyStatus": {},
"cIgmpFilterEditEndAddress": {},
"cIgmpFilterEditEndAddressType": {},
"cIgmpFilterEditOperation": {},
"cIgmpFilterEditProfileAction": {},
"cIgmpFilterEditProfileIndex": {},
"cIgmpFilterEditSpinLock": {},
"cIgmpFilterEditStartAddress": {},
"cIgmpFilterEditStartAddressType": {},
"cIgmpFilterEnable": {},
"cIgmpFilterEndAddress": {},
"cIgmpFilterEndAddressType": {},
"cIgmpFilterInterfaceProfileIndex": {},
"cIgmpFilterMaxProfiles": {},
"cIgmpFilterProfileAction": {},
"cIpLocalPoolAllocEntry": {"3": {}, "4": {}},
"cIpLocalPoolConfigEntry": {"4": {}, "5": {}, "6": {}, "7": {}, "8": {}},
"cIpLocalPoolGroupContainsEntry": {"2": {}},
"cIpLocalPoolGroupEntry": {"1": {}, "2": {}},
"cIpLocalPoolNotificationsEnable": {},
"cIpLocalPoolStatsEntry": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}},
"cMTCommonMetricsBitmaps": {},
"cMTCommonMetricsFlowCounter": {},
"cMTCommonMetricsFlowDirection": {},
"cMTCommonMetricsFlowSamplingStartTime": {},
"cMTCommonMetricsIpByteRate": {},
"cMTCommonMetricsIpDscp": {},
"cMTCommonMetricsIpOctets": {},
"cMTCommonMetricsIpPktCount": {},
"cMTCommonMetricsIpPktDropped": {},
"cMTCommonMetricsIpProtocol": {},
"cMTCommonMetricsIpTtl": {},
"cMTCommonMetricsLossMeasurement": {},
"cMTCommonMetricsMediaStopOccurred": {},
"cMTCommonMetricsRouteForward": {},
"cMTFlowSpecifierDestAddr": {},
"cMTFlowSpecifierDestAddrType": {},
"cMTFlowSpecifierDestPort": {},
"cMTFlowSpecifierIpProtocol": {},
"cMTFlowSpecifierMetadataGlobalId": {},
"cMTFlowSpecifierRowStatus": {},
"cMTFlowSpecifierSourceAddr": {},
"cMTFlowSpecifierSourceAddrType": {},
"cMTFlowSpecifierSourcePort": {},
"cMTHopStatsCollectionStatus": {},
"cMTHopStatsEgressInterface": {},
"cMTHopStatsIngressInterface": {},
"cMTHopStatsMaskBitmaps": {},
"cMTHopStatsMediatraceTtl": {},
"cMTHopStatsName": {},
"cMTInitiatorActiveSessions": {},
"cMTInitiatorConfiguredSessions": {},
"cMTInitiatorEnable": {},
"cMTInitiatorInactiveSessions": {},
"cMTInitiatorMaxSessions": {},
"cMTInitiatorPendingSessions": {},
"cMTInitiatorProtocolVersionMajor": {},
"cMTInitiatorProtocolVersionMinor": {},
"cMTInitiatorSoftwareVersionMajor": {},
"cMTInitiatorSoftwareVersionMinor": {},
"cMTInitiatorSourceAddress": {},
"cMTInitiatorSourceAddressType": {},
"cMTInitiatorSourceInterface": {},
"cMTInterfaceBitmaps": {},
"cMTInterfaceInDiscards": {},
"cMTInterfaceInErrors": {},
"cMTInterfaceInOctets": {},
"cMTInterfaceInSpeed": {},
"cMTInterfaceOutDiscards": {},
"cMTInterfaceOutErrors": {},
"cMTInterfaceOutOctets": {},
"cMTInterfaceOutSpeed": {},
"cMTMediaMonitorProfileInterval": {},
"cMTMediaMonitorProfileMetric": {},
"cMTMediaMonitorProfileRowStatus": {},
"cMTMediaMonitorProfileRtpMaxDropout": {},
"cMTMediaMonitorProfileRtpMaxReorder": {},
"cMTMediaMonitorProfileRtpMinimalSequential": {},
"cMTPathHopAddr": {},
"cMTPathHopAddrType": {},
"cMTPathHopAlternate1Addr": {},
"cMTPathHopAlternate1AddrType": {},
"cMTPathHopAlternate2Addr": {},
"cMTPathHopAlternate2AddrType": {},
"cMTPathHopAlternate3Addr": {},
"cMTPathHopAlternate3AddrType": {},
"cMTPathHopType": {},
"cMTPathSpecifierDestAddr": {},
"cMTPathSpecifierDestAddrType": {},
"cMTPathSpecifierDestPort": {},
"cMTPathSpecifierGatewayAddr": {},
"cMTPathSpecifierGatewayAddrType": {},
"cMTPathSpecifierGatewayVlanId": {},
"cMTPathSpecifierIpProtocol": {},
"cMTPathSpecifierMetadataGlobalId": {},
"cMTPathSpecifierProtocolForDiscovery": {},
"cMTPathSpecifierRowStatus": {},
"cMTPathSpecifierSourceAddr": {},
"cMTPathSpecifierSourceAddrType": {},
"cMTPathSpecifierSourcePort": {},
"cMTResponderActiveSessions": {},
"cMTResponderEnable": {},
"cMTResponderMaxSessions": {},
"cMTRtpMetricsBitRate": {},
"cMTRtpMetricsBitmaps": {},
"cMTRtpMetricsExpectedPkts": {},
"cMTRtpMetricsJitter": {},
"cMTRtpMetricsLossPercent": {},
"cMTRtpMetricsLostPktEvents": {},
"cMTRtpMetricsLostPkts": {},
"cMTRtpMetricsOctets": {},
"cMTRtpMetricsPkts": {},
"cMTScheduleEntryAgeout": {},
"cMTScheduleLife": {},
"cMTScheduleRecurring": {},
"cMTScheduleRowStatus": {},
"cMTScheduleStartTime": {},
"cMTSessionFlowSpecifierName": {},
"cMTSessionParamName": {},
"cMTSessionParamsFrequency": {},
"cMTSessionParamsHistoryBuckets": {},
"cMTSessionParamsInactivityTimeout": {},
"cMTSessionParamsResponseTimeout": {},
"cMTSessionParamsRouteChangeReactiontime": {},
"cMTSessionParamsRowStatus": {},
"cMTSessionPathSpecifierName": {},
"cMTSessionProfileName": {},
"cMTSessionRequestStatsBitmaps": {},
"cMTSessionRequestStatsMDAppName": {},
"cMTSessionRequestStatsMDGlobalId": {},
"cMTSessionRequestStatsMDMultiPartySessionId": {},
"cMTSessionRequestStatsNumberOfErrorHops": {},
"cMTSessionRequestStatsNumberOfMediatraceHops": {},
"cMTSessionRequestStatsNumberOfNoDataRecordHops": {},
"cMTSessionRequestStatsNumberOfNonMediatraceHops": {},
"cMTSessionRequestStatsNumberOfValidHops": {},
"cMTSessionRequestStatsRequestStatus": {},
"cMTSessionRequestStatsRequestTimestamp": {},
"cMTSessionRequestStatsRouteIndex": {},
"cMTSessionRequestStatsTracerouteStatus": {},
"cMTSessionRowStatus": {},
"cMTSessionStatusBitmaps": {},
"cMTSessionStatusGlobalSessionId": {},
"cMTSessionStatusOperationState": {},
"cMTSessionStatusOperationTimeToLive": {},
"cMTSessionTraceRouteEnabled": {},
"cMTSystemMetricBitmaps": {},
"cMTSystemMetricCpuFiveMinutesUtilization": {},
"cMTSystemMetricCpuOneMinuteUtilization": {},
"cMTSystemMetricMemoryUtilization": {},
"cMTSystemProfileMetric": {},
"cMTSystemProfileRowStatus": {},
"cMTTcpMetricBitmaps": {},
"cMTTcpMetricConnectRoundTripDelay": {},
"cMTTcpMetricLostEventCount": {},
"cMTTcpMetricMediaByteCount": {},
"cMTTraceRouteHopNumber": {},
"cMTTraceRouteHopRtt": {},
"cPeerSearchType": {},
"cPppoeFwdedSessions": {},
"cPppoePerInterfaceSessionLossPercent": {},
"cPppoePerInterfaceSessionLossThreshold": {},
"cPppoePtaSessions": {},
"cPppoeSystemCurrSessions": {},
"cPppoeSystemExceededSessionErrors": {},
"cPppoeSystemHighWaterSessions": {},
"cPppoeSystemMaxAllowedSessions": {},
"cPppoeSystemPerMACSessionIWFlimit": {},
"cPppoeSystemPerMACSessionlimit": {},
"cPppoeSystemPerMacThrottleRatelimit": {},
"cPppoeSystemPerVCThrottleRatelimit": {},
"cPppoeSystemPerVClimit": {},
"cPppoeSystemPerVLANlimit": {},
"cPppoeSystemPerVLANthrottleRatelimit": {},
"cPppoeSystemSessionLossPercent": {},
"cPppoeSystemSessionLossThreshold": {},
"cPppoeSystemSessionNotifyObjects": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}},
"cPppoeSystemThresholdSessions": {},
"cPppoeTotalSessions": {},
"cPppoeTransSessions": {},
"cPppoeVcCurrSessions": {},
"cPppoeVcExceededSessionErrors": {},
"cPppoeVcHighWaterSessions": {},
"cPppoeVcMaxAllowedSessions": {},
"cPppoeVcThresholdSessions": {},
"cPtpClockCurrentDSMeanPathDelay": {},
"cPtpClockCurrentDSOffsetFromMaster": {},
"cPtpClockCurrentDSStepsRemoved": {},
"cPtpClockDefaultDSClockIdentity": {},
"cPtpClockDefaultDSPriority1": {},
"cPtpClockDefaultDSPriority2": {},
"cPtpClockDefaultDSQualityAccuracy": {},
"cPtpClockDefaultDSQualityClass": {},
"cPtpClockDefaultDSQualityOffset": {},
"cPtpClockDefaultDSSlaveOnly": {},
"cPtpClockDefaultDSTwoStepFlag": {},
"cPtpClockInput1ppsEnabled": {},
"cPtpClockInput1ppsInterface": {},
"cPtpClockInputFrequencyEnabled": {},
"cPtpClockOutput1ppsEnabled": {},
"cPtpClockOutput1ppsInterface": {},
"cPtpClockOutput1ppsOffsetEnabled": {},
"cPtpClockOutput1ppsOffsetNegative": {},
"cPtpClockOutput1ppsOffsetValue": {},
"cPtpClockParentDSClockPhChRate": {},
"cPtpClockParentDSGMClockIdentity": {},
"cPtpClockParentDSGMClockPriority1": {},
"cPtpClockParentDSGMClockPriority2": {},
"cPtpClockParentDSGMClockQualityAccuracy": {},
"cPtpClockParentDSGMClockQualityClass": {},
"cPtpClockParentDSGMClockQualityOffset": {},
"cPtpClockParentDSOffset": {},
"cPtpClockParentDSParentPortIdentity": {},
"cPtpClockParentDSParentStats": {},
"cPtpClockPortAssociateAddress": {},
"cPtpClockPortAssociateAddressType": {},
"cPtpClockPortAssociateInErrors": {},
"cPtpClockPortAssociateOutErrors": {},
"cPtpClockPortAssociatePacketsReceived": {},
"cPtpClockPortAssociatePacketsSent": {},
"cPtpClockPortCurrentPeerAddress": {},
"cPtpClockPortCurrentPeerAddressType": {},
"cPtpClockPortDSAnnounceRctTimeout": {},
"cPtpClockPortDSAnnouncementInterval": {},
"cPtpClockPortDSDelayMech": {},
"cPtpClockPortDSGrantDuration": {},
"cPtpClockPortDSMinDelayReqInterval": {},
"cPtpClockPortDSName": {},
"cPtpClockPortDSPTPVersion": {},
"cPtpClockPortDSPeerDelayReqInterval": {},
"cPtpClockPortDSPeerMeanPathDelay": {},
"cPtpClockPortDSPortIdentity": {},
"cPtpClockPortDSSyncInterval": {},
"cPtpClockPortName": {},
"cPtpClockPortNumOfAssociatedPorts": {},
"cPtpClockPortRole": {},
"cPtpClockPortRunningEncapsulationType": {},
"cPtpClockPortRunningIPversion": {},
"cPtpClockPortRunningInterfaceIndex": {},
"cPtpClockPortRunningName": {},
"cPtpClockPortRunningPacketsReceived": {},
"cPtpClockPortRunningPacketsSent": {},
"cPtpClockPortRunningRole": {},
"cPtpClockPortRunningRxMode": {},
"cPtpClockPortRunningState": {},
"cPtpClockPortRunningTxMode": {},
"cPtpClockPortSyncOneStep": {},
"cPtpClockPortTransDSFaultyFlag": {},
"cPtpClockPortTransDSPeerMeanPathDelay": {},
"cPtpClockPortTransDSPortIdentity": {},
"cPtpClockPortTransDSlogMinPdelayReqInt": {},
"cPtpClockRunningPacketsReceived": {},
"cPtpClockRunningPacketsSent": {},
"cPtpClockRunningState": {},
"cPtpClockTODEnabled": {},
"cPtpClockTODInterface": {},
"cPtpClockTimePropertiesDSCurrentUTCOffset": {},
"cPtpClockTimePropertiesDSCurrentUTCOffsetValid": {},
"cPtpClockTimePropertiesDSFreqTraceable": {},
"cPtpClockTimePropertiesDSLeap59": {},
"cPtpClockTimePropertiesDSLeap61": {},
"cPtpClockTimePropertiesDSPTPTimescale": {},
"cPtpClockTimePropertiesDSSource": {},
"cPtpClockTimePropertiesDSTimeTraceable": {},
"cPtpClockTransDefaultDSClockIdentity": {},
"cPtpClockTransDefaultDSDelay": {},
"cPtpClockTransDefaultDSNumOfPorts": {},
"cPtpClockTransDefaultDSPrimaryDomain": {},
"cPtpDomainClockPortPhysicalInterfacesTotal": {},
"cPtpDomainClockPortsTotal": {},
"cPtpSystemDomainTotals": {},
"cPtpSystemProfile": {},
"cQIfEntry": {"1": {}, "2": {}, "3": {}},
"cQRotationEntry": {"1": {}},
"cQStatsEntry": {"2": {}, "3": {}, "4": {}},
"cRFCfgAdminAction": {},
"cRFCfgKeepaliveThresh": {},
"cRFCfgKeepaliveThreshMax": {},
"cRFCfgKeepaliveThreshMin": {},
"cRFCfgKeepaliveTimer": {},
"cRFCfgKeepaliveTimerMax": {},
"cRFCfgKeepaliveTimerMin": {},
"cRFCfgMaintenanceMode": {},
"cRFCfgNotifTimer": {},
"cRFCfgNotifTimerMax": {},
"cRFCfgNotifTimerMin": {},
"cRFCfgNotifsEnabled": {},
"cRFCfgRedundancyMode": {},
"cRFCfgRedundancyModeDescr": {},
"cRFCfgRedundancyOperMode": {},
"cRFCfgSplitMode": {},
"cRFHistoryColdStarts": {},
"cRFHistoryCurrActiveUnitId": {},
"cRFHistoryPrevActiveUnitId": {},
"cRFHistoryStandByAvailTime": {},
"cRFHistorySwactTime": {},
"cRFHistorySwitchOverReason": {},
"cRFHistoryTableMaxLength": {},
"cRFStatusDomainInstanceEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"cRFStatusDuplexMode": {},
"cRFStatusFailoverTime": {},
"cRFStatusIssuFromVersion": {},
"cRFStatusIssuState": {},
"cRFStatusIssuStateRev1": {},
"cRFStatusIssuToVersion": {},
"cRFStatusLastSwactReasonCode": {},
"cRFStatusManualSwactInhibit": {},
"cRFStatusPeerStandByEntryTime": {},
"cRFStatusPeerUnitId": {},
"cRFStatusPeerUnitState": {},
"cRFStatusPrimaryMode": {},
"cRFStatusRFModeCapsModeDescr": {},
"cRFStatusUnitId": {},
"cRFStatusUnitState": {},
"cSipCfgAaa": {"1": {}},
"cSipCfgBase": {
"1": {},
"10": {},
"11": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"3": {},
"30": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"cSipCfgBase.12.1.2": {},
"cSipCfgBase.9.1.2": {},
"cSipCfgPeer": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cSipCfgPeer.1.1.10": {},
"cSipCfgPeer.1.1.11": {},
"cSipCfgPeer.1.1.12": {},
"cSipCfgPeer.1.1.13": {},
"cSipCfgPeer.1.1.14": {},
"cSipCfgPeer.1.1.15": {},
"cSipCfgPeer.1.1.16": {},
"cSipCfgPeer.1.1.17": {},
"cSipCfgPeer.1.1.18": {},
"cSipCfgPeer.1.1.2": {},
"cSipCfgPeer.1.1.3": {},
"cSipCfgPeer.1.1.4": {},
"cSipCfgPeer.1.1.5": {},
"cSipCfgPeer.1.1.6": {},
"cSipCfgPeer.1.1.7": {},
"cSipCfgPeer.1.1.8": {},
"cSipCfgPeer.1.1.9": {},
"cSipCfgRetry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
| |
If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_mul(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_div(self, sparse_delta, use_locking=False, name=None):
"""Divide this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to divide this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_div(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_update(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable batch-wise.
Analogous to `batch_gather`. This assumes that this variable and the
sparse_delta IndexedSlices have a series of leading dimensions that are the
same for all of them, and the updates are performed on the last dimension of
indices. In other words, the dimensions should be the following:
`num_prefix_dims = sparse_delta.indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[
batch_dim:]`
where
`sparse_delta.updates.shape[:num_prefix_dims]`
`== sparse_delta.indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n,
sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[
i_1, ..., i_n, j]`
When sparse_delta.indices is a 1D tensor, this operation is equivalent to
`scatter_update`.
To avoid this operation one can looping over the first `ndims` of the
variable and using `scatter_update` on the subtensors that result of slicing
the first dimension. This is a valid option for `ndims = 1`, but less
efficient than this implementation.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
state_ops.batch_scatter_update(
self,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name))
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_sub(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_add(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_update(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_max(self, indices, updates, name=None):
"""Updates this variable with the max of `tf.IndexedSlices` and itself.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_max(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_min(self, indices, updates, name=None):
"""Updates this variable with the min of `tf.IndexedSlices` and itself.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` | |
+ m.x684 + m.x685 + m.x686 + m.x725 + m.x726 + m.x727 + m.x728
+ m.x729 + m.x730 + m.x731 + m.x809 + m.x810 + m.x811 + m.x812 + m.x813 + m.x814 + m.x815
<= 124)
m.c27 = Constraint(expr= m.x119 + m.x120 + m.x121 + m.x122 + m.x123 + m.x124 + m.x125 + m.x126 + m.x127 + m.x159
+ m.x160 + m.x161 + m.x162 + m.x163 + m.x164 + m.x165 + m.x166 + m.x167 + m.x214 + m.x215
+ m.x216 + m.x217 + m.x218 + m.x219 + m.x220 + m.x221 + m.x222 + m.x326 + m.x327 + m.x328
+ m.x329 + m.x330 + m.x331 + m.x332 + m.x333 + m.x334 + m.x358 + m.x359 + m.x360 + m.x361
+ m.x362 + m.x363 + m.x364 + m.x365 + m.x366 + m.x491 + m.x492 + m.x493 + m.x494 + m.x495
+ m.x496 + m.x497 + m.x498 + m.x499 + m.x554 + m.x555 + m.x556 + m.x557 + m.x558 + m.x559
+ m.x560 + m.x561 + m.x562 + m.x624 + m.x625 + m.x626 + m.x627 + m.x628 + m.x629 + m.x630
+ m.x631 + m.x632 + m.x732 + m.x733 + m.x734 + m.x735 + m.x736 + m.x737 + m.x738 + m.x739
+ m.x740 + m.x774 + m.x775 + m.x776 + m.x777 + m.x778 + m.x779 + m.x780 + m.x781 + m.x782
+ m.x816 + m.x817 + m.x818 + m.x819 + m.x820 + m.x821 + m.x822 + m.x823 + m.x824 + m.x901
+ m.x902 + m.x903 + m.x904 + m.x905 + m.x906 + m.x907 + m.x908 + m.x909 + m.x948 + m.x949
+ m.x950 + m.x951 + m.x952 + m.x953 + m.x954 + m.x955 + m.x956 + m.x1002 + m.x1003 + m.x1004
+ m.x1005 + m.x1006 + m.x1007 + m.x1008 + m.x1009 + m.x1010 <= 179)
m.c28 = Constraint(expr= m.x168 + m.x169 + m.x170 + m.x171 + m.x172 + m.x173 + m.x174 + m.x175 + m.x265 + m.x266
+ m.x267 + m.x268 + m.x269 + m.x270 + m.x271 + m.x272 + m.x335 + m.x336 + m.x337 + m.x338
+ m.x339 + m.x340 + m.x341 + m.x342 + m.x367 + m.x368 + m.x369 + m.x370 + m.x371 + m.x372
+ m.x373 + m.x374 + m.x500 + m.x501 + m.x502 + m.x503 + m.x504 + m.x505 + m.x506 + m.x507
+ m.x633 + m.x634 + m.x635 + m.x636 + m.x637 + m.x638 + m.x639 + m.x640 + m.x741 + m.x742
+ m.x743 + m.x744 + m.x745 + m.x746 + m.x747 + m.x748 + m.x783 + m.x784 + m.x785 + m.x786
+ m.x787 + m.x788 + m.x789 + m.x790 <= 55)
m.c29 = Constraint(expr= m.x273 + m.x274 + m.x275 + m.x276 + m.x277 + m.x278 + m.x279 + m.x280 + m.x281 + m.x282
+ m.x375 + m.x376 + m.x377 + m.x378 + m.x379 + m.x380 + m.x381 + m.x382 + m.x383 + m.x384
+ m.x435 + m.x436 + m.x437 + m.x438 + m.x439 + m.x440 + m.x441 + m.x442 + m.x443 + m.x444
+ m.x508 + m.x509 + m.x510 + m.x511 + m.x512 + m.x513 + m.x514 + m.x515 + m.x516 + m.x517
+ m.x563 + m.x564 + m.x565 + m.x566 + m.x567 + m.x568 + m.x569 + m.x570 + m.x571 + m.x572
+ m.x641 + m.x642 + m.x643 + m.x644 + m.x645 + m.x646 + m.x647 + m.x648 + m.x649 + m.x650
+ m.x687 + m.x688 + m.x689 + m.x690 + m.x691 + m.x692 + m.x693 + m.x694 + m.x695 + m.x696
+ m.x825 + m.x826 + m.x827 + m.x828 + m.x829 + m.x830 + m.x831 + m.x832 + m.x833 + m.x834
+ m.x910 + m.x911 + m.x912 + m.x913 + m.x914 + m.x915 + m.x916 + m.x917 + m.x918 + m.x919
+ m.x1034 + m.x1035 + m.x1036 + m.x1037 + m.x1038 + m.x1039 + m.x1040 + m.x1041 + m.x1042
+ m.x1043 <= 170)
m.c30 = Constraint(expr= m.x176 + m.x177 + m.x178 + m.x179 + m.x180 + m.x181 + m.x223 + m.x224 + m.x225 + m.x226
+ m.x227 + m.x228 + m.x283 + m.x284 + m.x285 + m.x286 + m.x287 + m.x288 + m.x385 + m.x386
+ m.x387 + m.x388 + m.x389 + m.x390 + m.x573 + m.x574 + m.x575 + m.x576 + m.x577 + m.x578
+ m.x886 + m.x887 + m.x888 + m.x889 + m.x890 + m.x891 + m.x920 + m.x921 + m.x922 + m.x923
+ m.x924 + m.x925 + m.x957 + m.x958 + m.x959 + m.x960 + m.x961 + m.x962 + m.x1011 + m.x1012
+ m.x1013 + m.x1014 + m.x1015 + m.x1016 <= 139)
m.c31 = Constraint(expr= m.x128 + m.x129 + m.x130 + m.x131 + m.x132 + m.x133 + m.x134 + m.x135 + m.x136 + m.x137
+ m.x138 + m.x139 + m.x229 + m.x230 + m.x231 + m.x232 + m.x233 + m.x234 + m.x235 + m.x236
+ m.x237 + m.x238 + m.x239 + m.x240 + m.x289 + m.x290 + m.x291 + m.x292 + m.x293 + m.x294
+ m.x295 + m.x296 + m.x297 + m.x298 + m.x299 + m.x300 + m.x391 + m.x392 + m.x393 + m.x394
+ m.x395 + m.x396 + m.x397 + m.x398 + m.x399 + m.x400 + m.x401 + m.x402 + m.x445 + m.x446
+ m.x447 + m.x448 + m.x449 + m.x450 + m.x451 + m.x452 + m.x453 + m.x454 + m.x455 + m.x456
+ m.x518 + m.x519 + m.x520 + m.x521 + m.x522 + m.x523 + m.x524 + m.x525 + m.x526 + m.x527
+ m.x528 + m.x529 + m.x579 + m.x580 + m.x581 + m.x582 + m.x583 + m.x584 + m.x585 + m.x586
+ m.x587 + m.x588 + m.x589 + m.x590 + m.x651 + m.x652 + m.x653 + m.x654 + m.x655 + m.x656
+ m.x657 + m.x658 + m.x659 + m.x660 + m.x661 + m.x662 + m.x697 + m.x698 + m.x699 + m.x700
+ m.x701 + m.x702 + m.x703 + m.x704 + m.x705 + m.x706 + m.x707 + m.x708 + m.x835 + m.x836
+ m.x837 + m.x838 + m.x839 + m.x840 + m.x841 + m.x842 + m.x843 + m.x844 + m.x845 + m.x846
+ m.x847 + m.x848 + m.x849 + m.x850 + m.x851 + m.x852 + m.x853 + m.x854 + m.x855 + m.x856
+ m.x857 + m.x858 + m.x926 + m.x927 + m.x928 + m.x929 + m.x930 + m.x931 + m.x932 + m.x933
+ m.x934 + m.x935 + m.x936 + m.x937 + m.x963 + m.x964 + m.x965 + m.x966 + m.x967 + m.x968
+ m.x969 + m.x970 + m.x971 + m.x972 + m.x973 + m.x974 + m.x1044 + m.x1045 + m.x1046 + m.x1047
+ m.x1048 + m.x1049 + m.x1050 + m.x1051 + m.x1052 + m.x1053 + m.x1054 + m.x1055 <= 111)
m.c32 = Constraint(expr= m.x88 + m.x96 + m.x103 + m.x112 + m.x119 + m.x128 + m.x140 + m.x150 + m.x159 + m.x168
+ m.x182 + m.x190 + m.x200 + m.x207 + m.x214 + m.x229 + m.x241 + m.x251 + m.x258 + m.x265
+ m.x273 + m.x289 + m.x301 + m.x309 + m.x319 + m.x326 + m.x335 + m.x343 + m.x351 + m.x358
+ m.x367 + m.x375 + m.x391 + m.x403 + m.x411 + m.x421 + m.x428 + m.x435 + m.x445 + m.x457
+ m.x465 + m.x475 + m.x482 + m.x491 + m.x500 + m.x508 + m.x518 + m.x530 + m.x540 + m.x547
+ m.x554 + m.x563 + m.x579 + m.x591 + m.x601 + m.x608 + m.x617 + m.x624 + m.x633 + m.x641
+ m.x651 + m.x663 + m.x671 + m.x680 + m.x687 + m.x697 + m.x709 + m.x716 + m.x725 + m.x732
+ m.x741 + m.x749 + m.x757 + m.x767 + m.x774 + m.x783 + m.x791 + m.x799 + m.x809 + m.x816
+ m.x825 + m.x835 + m.x847 + m.x859 + m.x867 + m.x877 + m.x892 + m.x901 + m.x910 + m.x926
+ m.x938 + m.x948 + m.x963 + m.x975 + m.x983 + m.x993 + m.x1002 + m.x1017 + m.x1025 + m.x1034
+ m.x1044 + m.x1082 | |
- m.b168 + 2*m.b168*m.b392 + 2*m.b168*m.b396 - 2*m.b168*m.b436
- 2*m.b168*m.b445 + 2*m.b168*m.b490 - 3*m.b490 + 2*m.b168*m.b494 - m.b494 - 2*m.b168*m.b529
- 2*m.b168*m.b547 + 2*m.b169*m.b327 - 8*m.b169 + 2*m.b169*m.b335 + 2*m.b169*m.b339 + 2*m.b169
*m.b366 + 2*m.b169*m.b374 + 2*m.b169*m.b378 - 2*m.b169*m.b433 + 2*m.b169*m.b440 - 2*m.b169*
m.b441 + 2*m.b169*m.b444 + 2*m.b169*m.b454 + 2*m.b169*m.b458 - 2*m.b169*m.b525 + 2*m.b169*
m.b534 + 2*m.b170*m.b417 + 2*m.b170 + 2*m.b170*m.b425 + 2*m.b170*m.b429 - 2*m.b170*m.b444 - 2*
m.b170*m.b446 - 2*m.b170*m.b528 - 2*m.b170*m.b530 - 2*m.b170*m.b548 + 2*m.b172*m.b451 + 2*
m.b172*m.b464 - 2*m.b172*m.b503 - 2*m.b172*m.b505 - 2*m.b173*m.b503 + m.b173 - 2*m.b176*m.b501
+ m.b176 + 2*m.b177*m.b422 - 2*m.b177*m.b506 + 2*m.b178*m.b352 + m.b178 - 2*m.b178*m.b499 - 2
*m.b178*m.b505 + 2*m.b179*m.b476 + m.b179 - 2*m.b179*m.b500 - 2*m.b179*m.b501 + 2*m.b180*
m.b300 + 2*m.b180*m.b352 - 2*m.b180*m.b500 - 2*m.b180*m.b504 + 2*m.b181*m.b371 - 4*m.b181 + 2*
m.b181*m.b406 + 2*m.b181*m.b422 + 2*m.b181*m.b451 + 2*m.b182*m.b389 - m.b182 + 2*m.b182*m.b487
- 2*m.b182*m.b505 + 2*m.b183*m.b332 - 3*m.b183 + 2*m.b183*m.b371 + 2*m.b183*m.b437 + 2*m.b183
*m.b451 - 2*m.b183*m.b501 + 2*m.b184*m.b422 + m.b184 - 2*m.b184*m.b504 - 2*m.b184*m.b506 - 2*
m.b186*m.b451 + m.b186 - 2*m.b186*m.b457 - 2*m.b186*m.b464 - 2*m.b186*m.b470 + 2*m.b186*m.b503
+ 2*m.b186*m.b505 + 2*m.b186*m.b544 - 2*m.b189*m.b455 - 2*m.b189*m.b468 + 3*m.b468 + 2*m.b189
*m.b533 + 2*m.b189*m.b535 - 3*m.b535 + 2*m.b190*m.b418 - 2*m.b418 + 2*m.b190*m.b419 - 3*m.b419
+ 2*m.b190*m.b428 + 2*m.b190*m.b430 - 3*m.b430 - 2*m.b190*m.b460 - 2*m.b190*m.b473 - 2*m.b190
*m.b545 - 2*m.b190*m.b550 + 2*m.b191*m.b348 - m.b191 + 2*m.b348 + 2*m.b191*m.b349 - 2*m.b349
+ 2*m.b191*m.b358 + 2*m.b191*m.b360 - m.b360 - 2*m.b191*m.b453 - 2*m.b191*m.b459 - 2*m.b459
- 2*m.b191*m.b466 - 2*m.b191*m.b472 + 2*m.b472 + 2*m.b191*m.b520 + 2*m.b191*m.b522 - 2*m.b191
*m.b544 - 2*m.b192*m.b449 - 2*m.b192*m.b454 - 2*m.b192*m.b455 - 2*m.b192*m.b462 - 2*m.b192*
m.b467 - 2*m.b192*m.b468 + 2*m.b192*m.b482 + 2*m.b192*m.b484 + 2*m.b192*m.b527 + 2*m.b192*
m.b529 + 2*m.b192*m.b533 + 2*m.b192*m.b535 + 2*m.b193*m.b292 - 6*m.b193 + 2*m.b193*m.b294 + 2*
m.b193*m.b312 + 2*m.b193*m.b316 + 2*m.b193*m.b348 + 2*m.b193*m.b349 + 2*m.b193*m.b358 + 2*
m.b193*m.b360 - 2*m.b193*m.b454 - 2*m.b193*m.b458 - 2*m.b193*m.b467 - 2*m.b193*m.b471 + 2*
m.b193*m.b527 + 2*m.b193*m.b529 - 2*m.b193*m.b543 + 2*m.b193*m.b547 + 2*m.b194*m.b367 - 15*
m.b194 + 2*m.b194*m.b368 + 2*m.b194*m.b377 + 2*m.b194*m.b379 + 2*m.b194*m.b402 + 2*m.b194*
m.b403 - m.b403 + 2*m.b194*m.b412 + 2*m.b194*m.b414 - m.b414 + 2*m.b194*m.b418 + 2*m.b194*
m.b419 + 2*m.b194*m.b428 + 2*m.b194*m.b430 + 2*m.b194*m.b448 - 2*m.b448 + 2*m.b194*m.b457 + 2*
m.b194*m.b459 + 2*m.b195*m.b385 - m.b195 + 2*m.b195*m.b386 + 2*m.b195*m.b395 + 2*m.b195*m.b397
- 2*m.b195*m.b450 + m.b450 - 2*m.b195*m.b459 - 2*m.b195*m.b463 + m.b463 - 2*m.b195*m.b472 + 2
*m.b195*m.b493 + 2*m.b195*m.b495 - 2*m.b495 - 2*m.b195*m.b544 + 2*m.b196*m.b328 - 15*m.b196 +
2*m.b196*m.b329 + 2*m.b196*m.b338 + 2*m.b196*m.b340 + 2*m.b196*m.b367 + 2*m.b196*m.b368 + 2*
m.b196*m.b377 + 2*m.b196*m.b379 + 2*m.b196*m.b433 + 2*m.b196*m.b434 + 2*m.b196*m.b443 + 2*
m.b196*m.b445 + 2*m.b196*m.b448 - 2*m.b196*m.b455 + 2*m.b196*m.b457 + 2*m.b196*m.b459 - 2*
m.b196*m.b468 + 2*m.b196*m.b533 + 2*m.b196*m.b535 + 2*m.b197*m.b418 + 2*m.b197 + 2*m.b197*
m.b419 + 2*m.b197*m.b428 + 2*m.b197*m.b430 - 2*m.b197*m.b458 - 2*m.b197*m.b460 - 2*m.b197*
m.b471 - 2*m.b197*m.b473 - 2*m.b197*m.b543 - 2*m.b197*m.b545 + 2*m.b197*m.b547 - 2*m.b197*
m.b550 - 2*m.b201*m.b501 + 2*m.b201*m.b533 + 2*m.b202*m.b422 + 2*m.b202*m.b428 - 2*m.b202*
m.b506 - 2*m.b202*m.b545 + 2*m.b203*m.b352 + 2*m.b203*m.b358 - 2*m.b203*m.b499 - 2*m.b203*
m.b505 + 2*m.b203*m.b520 - 2*m.b203*m.b544 + 2*m.b204*m.b476 - 2*m.b204 + 2*m.b204*m.b482 - 2*
m.b204*m.b500 - 2*m.b204*m.b501 + 2*m.b204*m.b527 + 2*m.b204*m.b533 + 2*m.b205*m.b300 - 2*
m.b205 + 2*m.b205*m.b312 + 2*m.b205*m.b352 + 2*m.b205*m.b358 - 2*m.b205*m.b500 - 2*m.b205*
m.b504 + 2*m.b205*m.b527 - 2*m.b205*m.b543 + 2*m.b206*m.b371 - 8*m.b206 + 2*m.b206*m.b377 + 2*
m.b206*m.b406 + 2*m.b206*m.b412 + 2*m.b206*m.b422 + 2*m.b206*m.b428 + 2*m.b206*m.b451 + 2*
m.b206*m.b457 + 2*m.b207*m.b389 - 2*m.b207 + 2*m.b207*m.b395 + 2*m.b207*m.b487 + 2*m.b207*
m.b493 - 2*m.b207*m.b505 - 2*m.b207*m.b544 + 2*m.b208*m.b332 - 8*m.b208 + 2*m.b208*m.b338 + 2*
m.b208*m.b371 + 2*m.b208*m.b377 + 2*m.b208*m.b437 + 2*m.b208*m.b443 + 2*m.b208*m.b451 + 2*
m.b208*m.b457 - 2*m.b208*m.b501 + 2*m.b208*m.b533 + 2*m.b209*m.b422 + 2*m.b209 + 2*m.b209*
m.b428 - 2*m.b209*m.b504 - 2*m.b209*m.b506 - 2*m.b209*m.b543 - 2*m.b209*m.b545 + 2*m.b232*
m.b426 - 3*m.b426 - 2*m.b232*m.b536 + 2*m.b233*m.b356 - m.b233 + 2*m.b233*m.b518 - 2*m.b233*
m.b535 + 2*m.b234*m.b480 - 2*m.b234 + 2*m.b234*m.b525 + 2*m.b235*m.b308 - 2*m.b235 + 2*m.b235*
m.b356 + 2*m.b235*m.b525 - 2*m.b235*m.b534 + 2*m.b236*m.b375 - 4*m.b236 + 2*m.b236*m.b410 -
m.b410 + 2*m.b236*m.b426 + 2*m.b236*m.b455 + 2*m.b237*m.b393 - m.b237 + 2*m.b237*m.b491 -
m.b491 - 2*m.b237*m.b535 + 2*m.b238*m.b336 - 4*m.b238 + 2*m.b238*m.b375 + 2*m.b238*m.b441 + 2*
m.b238*m.b455 + 2*m.b239*m.b426 + m.b239 - 2*m.b239*m.b534 - 2*m.b239*m.b536 + 2*m.b241*m.b346
- 2*m.b241 + 2*m.b346 + 2*m.b241*m.b361 - 2*m.b241*m.b424 - 2*m.b241*m.b430 + 2*m.b241*m.b523
+ 2*m.b241*m.b550 - 2*m.b242*m.b420 - 2*m.b242*m.b425 - 2*m.b242*m.b426 + 2*m.b242*m.b485 + 2
*m.b242*m.b530 + 2*m.b242*m.b536 + 2*m.b243*m.b288 - 4*m.b243 + 2*m.b243*m.b318 + 2*m.b243*
m.b346 + 2*m.b243*m.b361 - 2*m.b243*m.b425 - 2*m.b243*m.b429 + 2*m.b243*m.b530 + 2*m.b243*
m.b548 + 2*m.b244*m.b365 - 5*m.b244 + 2*m.b244*m.b380 + 2*m.b244*m.b400 + 2*m.b244*m.b415 - 2*
m.b244*m.b418 + 2*m.b244*m.b431 + 2*m.b244*m.b460 + 2*m.b245*m.b383 - 2*m.b245 + 2*m.b245*
m.b398 - 2*m.b245*m.b421 + m.b421 - 2*m.b245*m.b430 + 2*m.b245*m.b496 + 2*m.b245*m.b550 + 2*
m.b246*m.b326 - 4*m.b246 + 2*m.b246*m.b341 + 2*m.b246*m.b365 + 2*m.b246*m.b380 - 2*m.b246*
m.b417 - 2*m.b246*m.b418 - 2*m.b246*m.b426 + 2*m.b246*m.b446 + 2*m.b246*m.b460 + 2*m.b246*
m.b536 - 2*m.b247*m.b429 + 2*m.b247*m.b548 - 2*m.b249*m.b350 + m.b249 - 2*m.b249*m.b355 - 2*
m.b249*m.b356 + 2*m.b249*m.b478 + 2*m.b249*m.b484 - 2*m.b249*m.b517 - 2*m.b249*m.b518 + 2*
m.b249*m.b529 + 2*m.b249*m.b535 + 2*m.b250*m.b280 - 3*m.b250 + 2*m.b250*m.b304 + 2*m.b250*
m.b316 + 2*m.b250*m.b354 - 2*m.b250*m.b355 - 2*m.b250*m.b359 + 2*m.b250*m.b360 - 2*m.b250*
m.b517 - 2*m.b250*m.b521 + 2*m.b250*m.b529 + 2*m.b250*m.b547 - 2*m.b251*m.b343 - 4*m.b251 - 2*
m.b251*m.b345 + 2*m.b345 - 2*m.b251*m.b346 - 2*m.b251*m.b348 + 2*m.b251*m.b373 + 2*m.b251*
m.b379 + 2*m.b251*m.b408 + 2*m.b251*m.b414 + 2*m.b251*m.b424 + 2*m.b251*m.b430 + 2*m.b251*
m.b453 + 2*m.b251*m.b459 - 2*m.b252*m.b344 - 2*m.b252*m.b351 + 2*m.b351 - 2*m.b252*m.b360 + 2*
m.b252*m.b391 + 2*m.b252*m.b397 + 2*m.b252*m.b489 + 2*m.b252*m.b495 - 2*m.b252*m.b522 + 2*
m.b253*m.b322 - 5*m.b253 + 2*m.b253*m.b334 + 2*m.b253*m.b340 - 2*m.b253*m.b343 - 2*m.b253*
m.b347 - 2*m.b253*m.b348 - 2*m.b253*m.b356 + 2*m.b253*m.b373 + 2*m.b253*m.b379 + 2*m.b253*
m.b439 + 2*m.b253*m.b445 + 2*m.b253*m.b453 + 2*m.b253*m.b459 - 2*m.b253*m.b518 + 2*m.b253*
m.b535 - 2*m.b254*m.b346 + 3*m.b254 - 2*m.b254*m.b359 - 2*m.b254*m.b361 + 2*m.b254*m.b424 + 2*
m.b254*m.b430 - 2*m.b254*m.b521 - 2*m.b254*m.b523 + 2*m.b254*m.b547 - 2*m.b254*m.b550 + 2*
m.b256*m.b296 - 3*m.b256 + 2*m.b256*m.b306 + 2*m.b256*m.b308 + 2*m.b256*m.b350 + 2*m.b256*
m.b355 + 2*m.b256*m.b356 - 2*m.b256*m.b479 - 2*m.b256*m.b483 + 2*m.b256*m.b525 - 2*m.b256*
m.b528 - 2*m.b256*m.b534 + 2*m.b257*m.b369 - 12*m.b257 + 2*m.b257*m.b374 + 2*m.b257*m.b375 + 2
*m.b257*m.b404 + 2*m.b257*m.b409 + 2*m.b257*m.b410 + 2*m.b257*m.b420 + 2*m.b257*m.b425 + 2*
m.b257*m.b426 + 2*m.b257*m.b449 + 2*m.b257*m.b454 + 2*m.b257*m.b455 + 2*m.b258*m.b387 - m.b258
+ 2*m.b258*m.b392 + 2*m.b258*m.b393 - 2*m.b258*m.b475 - 2*m.b258*m.b484 + 2*m.b258*m.b490 + 2
*m.b258*m.b491 - 2*m.b258*m.b529 - 2*m.b258*m.b535 + 2*m.b259*m.b330 - 10*m.b259 + 2*m.b259*
m.b335 + 2*m.b259*m.b336 + 2*m.b259*m.b369 + 2*m.b259*m.b374 + 2*m.b259*m.b375 + 2*m.b259*
m.b435 + 2*m.b259*m.b440 + 2*m.b259*m.b441 + 2*m.b259*m.b449 + 2*m.b259*m.b454 + 2*m.b259*
m.b455 - 2*m.b259*m.b480 - 2*m.b259*m.b525 + 2*m.b260*m.b420 + 3*m.b260 + 2*m.b260*m.b425 + 2*
m.b260*m.b426 - 2*m.b260*m.b483 - 2*m.b260*m.b485 - 2*m.b260*m.b528 - 2*m.b260*m.b530 - 2*
m.b260*m.b534 - 2*m.b260*m.b536 - 2*m.b262*m.b282 - 2*m.b262*m.b286 - 2*m.b262*m.b288 - 2*
m.b262*m.b292 - 2*m.b262*m.b343 - 2*m.b262*m.b345 - 2*m.b262*m.b346 - 2*m.b262*m.b348 + 2*
m.b262*m.b374 + 2*m.b262*m.b378 + 2*m.b262*m.b409 + 2*m.b262*m.b413 + 2*m.b262*m.b425 + 2*
m.b262*m.b429 + 2*m.b262*m.b454 + 2*m.b262*m.b458 - 2*m.b263*m.b284 + 4*m.b263 - 2*m.b263*
m.b298 - 2*m.b263*m.b316 - 2*m.b263*m.b344 - 2*m.b263*m.b351 - 2*m.b263*m.b360 + 2*m.b263*
m.b392 + 2*m.b263*m.b396 + 2*m.b263*m.b490 + 2*m.b263*m.b494 - 2*m.b263*m.b529 - 2*m.b263*
m.b547 - 2*m.b264*m.b277 - 2*m.b264*m.b282 - 2*m.b264*m.b290 - 2*m.b264*m.b292 - 2*m.b264*
m.b308 + 2*m.b264*m.b322 + 2*m.b264*m.b335 + 2*m.b264*m.b339 - 2*m.b264*m.b343 - 2*m.b264*
m.b347 - 2*m.b264*m.b348 - 2*m.b264*m.b356 + 2*m.b264*m.b374 + 2*m.b264*m.b378 + 2*m.b264*
m.b440 + 2*m.b264*m.b444 + 2*m.b264*m.b454 + 2*m.b264*m.b458 - 2*m.b264*m.b525 + 2*m.b264*
m.b534 - 2*m.b265*m.b288 + 7*m.b265 - 2*m.b265*m.b314 - 2*m.b265*m.b318 - 2*m.b265*m.b346 - 2*
m.b265*m.b359 - 2*m.b265*m.b361 + 2*m.b265*m.b425 + 2*m.b265*m.b429 - 2*m.b265*m.b528 - 2*
m.b265*m.b530 - 2*m.b265*m.b548 - 2*m.b267*m.b363 + 6*m.b267 - 2*m.b267*m.b370 - 2*m.b267*
m.b379 + 2*m.b267*m.b382 + 2*m.b267*m.b383 + 2*m.b267*m.b385 - 2*m.b267*m.b405 + m.b405 - 2*
m.b267*m.b414 - 2*m.b267*m.b421 - 2*m.b267*m.b430 - 2*m.b267*m.b450 - 2*m.b267*m.b459 + 2*
m.b268*m.b323 + 2*m.b268 + 2*m.b268*m.b325 + 2*m.b268*m.b326 + 2*m.b268*m.b328 + 2*m.b268*
m.b364 + 2*m.b268*m.b365 - 2*m.b268*m.b366 - 2*m.b268*m.b375 - 2*m.b268*m.b401 - 2*m.b268*
m.b402 - 2*m.b268*m.b410 - 2*m.b268*m.b417 - 2*m.b268*m.b418 - 2*m.b268*m.b426 + 2*m.b268*
m.b433 - 2*m.b268*m.b455 - 2*m.b269*m.b365 + 9*m.b269 - 2*m.b269*m.b378 - 2*m.b269*m.b380 - 2*
m.b269*m.b400 | |
<filename>sdk/machinelearning/azure-mgmt-guestconfig/azure/mgmt/guestconfig/models/_models_py3.py<gh_stars>1000+
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._guest_configuration_client_enums import *
class AssignmentInfo(msrest.serialization.Model):
"""Information about the guest configuration assignment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the guest configuration assignment.
:vartype name: str
:param configuration: Information about the configuration.
:type configuration: ~azure.mgmt.guestconfig.models.ConfigurationInfo
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ConfigurationInfo'},
}
def __init__(
self,
*,
configuration: Optional["ConfigurationInfo"] = None,
**kwargs
):
super(AssignmentInfo, self).__init__(**kwargs)
self.name = None
self.configuration = configuration
class AssignmentReport(msrest.serialization.Model):
"""AssignmentReport.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the report for the guest configuration assignment.
:vartype id: str
:ivar report_id: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype report_id: str
:param assignment: Configuration details of the guest configuration assignment.
:type assignment: ~azure.mgmt.guestconfig.models.AssignmentInfo
:param vm: Information about the VM.
:type vm: ~azure.mgmt.guestconfig.models.VMInfo
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~azure.mgmt.guestconfig.models.ComplianceStatus
:ivar operation_type: Type of report, Consistency or Initial. Possible values include:
"Consistency", "Initial".
:vartype operation_type: str or ~azure.mgmt.guestconfig.models.Type
:param resources: The list of resources for which guest configuration assignment compliance is
checked.
:type resources: list[~azure.mgmt.guestconfig.models.AssignmentReportResource]
"""
_validation = {
'id': {'readonly': True},
'report_id': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'compliance_status': {'readonly': True},
'operation_type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'report_id': {'key': 'reportId', 'type': 'str'},
'assignment': {'key': 'assignment', 'type': 'AssignmentInfo'},
'vm': {'key': 'vm', 'type': 'VMInfo'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'compliance_status': {'key': 'complianceStatus', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[AssignmentReportResource]'},
}
def __init__(
self,
*,
assignment: Optional["AssignmentInfo"] = None,
vm: Optional["VMInfo"] = None,
resources: Optional[List["AssignmentReportResource"]] = None,
**kwargs
):
super(AssignmentReport, self).__init__(**kwargs)
self.id = None
self.report_id = None
self.assignment = assignment
self.vm = vm
self.start_time = None
self.end_time = None
self.compliance_status = None
self.operation_type = None
self.resources = resources
class AssignmentReportDetails(msrest.serialization.Model):
"""Details of the guest configuration assignment report.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~azure.mgmt.guestconfig.models.ComplianceStatus
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:ivar job_id: GUID of the report.
:vartype job_id: str
:ivar operation_type: Type of report, Consistency or Initial. Possible values include:
"Consistency", "Initial".
:vartype operation_type: str or ~azure.mgmt.guestconfig.models.Type
:param resources: The list of resources for which guest configuration assignment compliance is
checked.
:type resources: list[~azure.mgmt.guestconfig.models.AssignmentReportResource]
"""
_validation = {
'compliance_status': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'job_id': {'readonly': True},
'operation_type': {'readonly': True},
}
_attribute_map = {
'compliance_status': {'key': 'complianceStatus', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'job_id': {'key': 'jobId', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[AssignmentReportResource]'},
}
def __init__(
self,
*,
resources: Optional[List["AssignmentReportResource"]] = None,
**kwargs
):
super(AssignmentReportDetails, self).__init__(**kwargs)
self.compliance_status = None
self.start_time = None
self.end_time = None
self.job_id = None
self.operation_type = None
self.resources = resources
class AssignmentReportResource(msrest.serialization.Model):
"""The guest configuration assignment resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~azure.mgmt.guestconfig.models.ComplianceStatus
:ivar resource_id: Name of the guest configuration assignment resource setting.
:vartype resource_id: str
:param reasons: Compliance reason and reason code for a resource.
:type reasons: list[~azure.mgmt.guestconfig.models.AssignmentReportResourceComplianceReason]
:ivar properties: Properties of a guest configuration assignment resource.
:vartype properties: any
"""
_validation = {
'compliance_status': {'readonly': True},
'resource_id': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'compliance_status': {'key': 'complianceStatus', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'reasons': {'key': 'reasons', 'type': '[AssignmentReportResourceComplianceReason]'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
reasons: Optional[List["AssignmentReportResourceComplianceReason"]] = None,
**kwargs
):
super(AssignmentReportResource, self).__init__(**kwargs)
self.compliance_status = None
self.resource_id = None
self.reasons = reasons
self.properties = None
class AssignmentReportResourceComplianceReason(msrest.serialization.Model):
"""Reason and code for the compliance of the guest configuration assignment resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar phrase: Reason for the compliance of the guest configuration assignment resource.
:vartype phrase: str
:ivar code: Code for the compliance of the guest configuration assignment resource.
:vartype code: str
"""
_validation = {
'phrase': {'readonly': True},
'code': {'readonly': True},
}
_attribute_map = {
'phrase': {'key': 'phrase', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssignmentReportResourceComplianceReason, self).__init__(**kwargs)
self.phrase = None
self.code = None
class ConfigurationInfo(msrest.serialization.Model):
"""Information about the configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the configuration.
:vartype name: str
:ivar version: Version of the configuration.
:vartype version: str
"""
_validation = {
'name': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationInfo, self).__init__(**kwargs)
self.name = None
self.version = None
class ConfigurationParameter(msrest.serialization.Model):
"""Represents a configuration parameter.
:param name: Name of the configuration parameter.
:type name: str
:param value: Value of the configuration parameter.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(ConfigurationParameter, self).__init__(**kwargs)
self.name = name
self.value = value
class ConfigurationSetting(msrest.serialization.Model):
"""Configuration setting of LCM (Local Configuration Manager).
:param configuration_mode: Specifies how the LCM(Local Configuration Manager) actually applies
the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and
ApplyAndAutoCorrect. Possible values include: "ApplyOnly", "ApplyAndMonitor",
"ApplyAndAutoCorrect".
:type configuration_mode: str or ~azure.mgmt.guestconfig.models.ConfigurationMode
:param allow_module_overwrite: If true - new configurations downloaded from the pull service
are allowed to overwrite the old ones on the target node. Otherwise, false.
:type allow_module_overwrite: bool
:param action_after_reboot: Specifies what happens after a reboot during the application of a
configuration. The possible values are ContinueConfiguration and StopConfiguration. Possible
values include: "ContinueConfiguration", "StopConfiguration".
:type action_after_reboot: str or ~azure.mgmt.guestconfig.models.ActionAfterReboot
:param refresh_frequency_mins: The time interval, in minutes, at which the LCM checks a pull
service to get updated configurations. This value is ignored if the LCM is not configured in
pull mode. The default value is 30.
:type refresh_frequency_mins: float
:param reboot_if_needed: Set this to true to automatically reboot the node after a
configuration that requires reboot is applied. Otherwise, you will have to manually reboot the
node for any configuration that requires it. The default value is false. To use this setting
when a reboot condition is enacted by something other than DSC (such as Windows Installer),
combine this setting with the xPendingReboot module.
:type reboot_if_needed: bool
:param configuration_mode_frequency_mins: How often, in minutes, the current configuration is
checked and applied. This property is ignored if the ConfigurationMode property is set to
ApplyOnly. The default value is 15.
:type configuration_mode_frequency_mins: float
"""
_attribute_map = {
'configuration_mode': {'key': 'configurationMode', 'type': 'str'},
'allow_module_overwrite': {'key': 'allowModuleOverwrite', 'type': 'bool'},
'action_after_reboot': {'key': 'actionAfterReboot', 'type': 'str'},
| |
#
# * The source code in this file is based on the soure code of CuPy.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # CuPy License #
#
# Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
import unittest
import pytest
import numpy
import nlcpy
from nlcpy import testing
class TestDims(unittest.TestCase):
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes()
@testing.numpy_nlcpy_array_equal()
def test_broadcast_to(self, xp, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((3, 1, 4), xp, dtype)
b = xp.broadcast_to(a, (2, 3, 3, 4))
return b
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes()
@testing.numpy_nlcpy_raises()
def test_broadcast_to_fail(self, xp, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((3, 1, 4), xp, dtype)
xp.broadcast_to(a, (1, 3, 4))
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes()
@testing.numpy_nlcpy_raises()
def test_broadcast_to_short_shape(self, xp, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((1, 3, 4), xp, dtype)
xp.broadcast_to(a, (3, 4))
@testing.for_all_dtypes()
@testing.numpy_nlcpy_array_equal()
def test_broadcast_to_numpy19(self, xp, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((3, 1, 4), xp, dtype)
if xp is nlcpy:
b = xp.broadcast_to(a, (2, 3, 3, 4))
else:
dummy = xp.empty((2, 3, 3, 4))
b, _ = xp.broadcast_arrays(a, dummy)
return b
@testing.for_all_dtypes()
def test_broadcast_to_fail_numpy19(self, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((3, 1, 4), nlcpy, dtype)
with self.assertRaises(ValueError):
nlcpy.broadcast_to(a, (1, 3, 4))
@testing.for_all_dtypes()
def test_broadcast_to_short_shape_numpy19(self, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((1, 3, 4), nlcpy, dtype)
with self.assertRaises(ValueError):
nlcpy.broadcast_to(a, (3, 4))
@testing.numpy_nlcpy_array_equal()
def test_expand_dims0(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, 0)
@testing.numpy_nlcpy_array_equal()
def test_expand_dims1(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, 1)
@testing.numpy_nlcpy_array_equal()
def test_expand_dims2(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, 2)
@testing.numpy_nlcpy_array_equal()
def test_expand_dims_negative1(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, -2)
@testing.numpy_nlcpy_raises()
def test_expand_dims_negative2(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, -4)
@testing.numpy_nlcpy_array_equal()
def test_expand_dims_tuple_axis(self, xp):
a = testing.shaped_arange((2, 2, 2), xp)
return [xp.expand_dims(a, axis) for axis in [
(0, 1, 2),
(0, -1, -2),
(0, 3, 5),
(0, -3, -5),
(),
(1,),
]]
def test_expand_dims_out_of_range(self):
for xp in (numpy, nlcpy):
a = testing.shaped_arange((2, 2, 2), xp)
for axis in [(1, -6), (1, 5)]:
with pytest.raises(numpy.AxisError):
xp.expand_dims(a, axis)
def test_expand_dims_repeated_axis(self):
for xp in (numpy, nlcpy):
a = testing.shaped_arange((2, 2, 2), xp)
with pytest.raises(ValueError):
xp.expand_dims(a, (1, 1))
@testing.numpy_nlcpy_array_equal()
def test_squeeze1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze()
@testing.numpy_nlcpy_array_equal()
def test_squeeze2(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.squeeze(a)
@testing.numpy_nlcpy_array_equal()
def test_squeeze_int_axis1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze(axis=2)
@testing.numpy_nlcpy_array_equal()
def test_squeeze_int_axis2(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return xp.squeeze(a, axis=-3)
@testing.with_requires('numpy>=1.13')
@testing.numpy_nlcpy_raises()
def test_squeeze_int_axis_failure1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
xp.squeeze(a, axis=-9)
def test_squeeze_int_axis_failure2(self):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), nlcpy)
with self.assertRaises(nlcpy.core.error._AxisError):
nlcpy.squeeze(a, axis=-9)
@testing.numpy_nlcpy_array_equal()
def test_squeeze_tuple_axis1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return xp.squeeze(a, axis=(2, 4))
@testing.numpy_nlcpy_array_equal()
def test_squeeze_tuple_axis2(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return xp.squeeze(a, axis=(-4, -3))
@testing.numpy_nlcpy_array_equal()
def test_squeeze_tuple_axis3(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return xp.squeeze(a, axis=(4, 2))
@testing.numpy_nlcpy_array_equal()
def test_squeeze_tuple_axis4(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return xp.squeeze(a, axis=())
@testing.with_requires('numpy>=1.13')
@testing.numpy_nlcpy_raises()
def test_squeeze_tuple_axis_failure1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
xp.squeeze(a, axis=(-9,))
@testing.numpy_nlcpy_raises()
def test_squeeze_tuple_axis_failure2(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
xp.squeeze(a, axis=(2, 2))
def test_squeeze_tuple_axis_failure3(self):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), nlcpy)
with self.assertRaises(nlcpy.core.error._AxisError):
nlcpy.squeeze(a, axis=(-9,))
@testing.numpy_nlcpy_array_equal()
def test_squeeze_scalar1(self, xp):
a = testing.shaped_arange((), xp)
return xp.squeeze(a, axis=0)
@testing.numpy_nlcpy_array_equal()
def test_squeeze_scalar2(self, xp):
a = testing.shaped_arange((), xp)
return xp.squeeze(a, axis=-1)
@testing.with_requires('numpy>=1.13')
@testing.numpy_nlcpy_raises()
def test_squeeze_scalar_failure1(self, xp):
a = testing.shaped_arange((), xp)
xp.squeeze(a, axis=-2)
@testing.with_requires('numpy>=1.13')
@testing.numpy_nlcpy_raises()
def test_squeeze_scalar_failure2(self, xp):
a = testing.shaped_arange((), xp)
xp.squeeze(a, axis=1)
def test_squeeze_scalar_failure3(self):
a = testing.shaped_arange((), nlcpy)
with self.assertRaises(nlcpy.core.error._AxisError):
nlcpy.squeeze(a, axis=-2)
def test_squeeze_scalar_failure4(self):
a = testing.shaped_arange((), nlcpy)
with self.assertRaises(nlcpy.core.error._AxisError):
nlcpy.squeeze(a, axis=1)
@testing.numpy_nlcpy_raises()
def test_squeeze_failure(self, xp):
a = testing.shaped_arange((2, 1, 3, 4), xp)
xp.squeeze(a, axis=2)
@testing.numpy_nlcpy_array_equal()
def test_external_squeeze(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return xp.squeeze(a)
@testing.parameterize(
{'shapes': [(), ()]},
{'shapes': [(0,), (0,)]},
{'shapes': [(1,), (1,)]},
{'shapes': [(2,), (2,)]},
{'shapes': [(0,), (1,)]},
{'shapes': [(2, 3), (1, 3)]},
{'shapes': [(2, 1, 3, 4), (3, 1, 4)]},
{'shapes': [(4, 3, 2, 3), (2, 3)]},
{'shapes': [(2, 0, 1, 1, 3), (2, 1, 0, 0, 3)]},
{'shapes': [(0, 1, 1, 3), (2, 1, 0, 0, 3)]},
{'shapes': [(0, 1, 1, 0, 3), (5, 2, 0, 1, 0, 0, 3), (2, 1, 0, 0, 0, 3)]},
)
class TestBroadcastArrays(unittest.TestCase):
@testing.for_all_dtypes()
@testing.for_orders('CF')
@testing.numpy_nlcpy_array_equal()
def test_broadcast_arrays(self, xp, dtype, order):
arrays = [testing.shaped_arange(s, xp, dtype, order) for s in self.shapes]
return xp.broadcast_arrays(*arrays)
@testing.numpy_nlcpy_array_equal()
def test_broadcast_arrays_with_list_input(self, xp):
arrays = [testing.shaped_arange(s, xp).tolist() for s in self.shapes]
return xp.broadcast_arrays(*arrays)
@testing.parameterize(
{'shapes': [(3,), (2,)]},
{'shapes': [(3, 2), (2, 3)]},
{'shapes': [(3, 2), (3, 4)]},
{'shapes': [(0, ), (2, )]},
)
class TestBroadcastArraysInvalidShape(unittest.TestCase):
@testing.numpy_nlcpy_raises()
def test_broadcast_arrays_invalid_shape(self, xp):
arrays = [testing.shaped_arange(s, xp) for s in self.shapes]
xp.broadcast_arrays(*arrays)
class TestBroadcastArraysFailure(unittest.TestCase):
def test_broadcast_arrays_subok(self):
try:
nlcpy.broadcast_arrays(nlcpy.empty([1, 3]), nlcpy.empty([2, 1]), subok=True)
except NotImplementedError:
return
raise Exception
class TestAtLeast(unittest.TestCase):
def check_atleast(self, func, xp):
a = testing.shaped_arange((), xp, 'i')
b = testing.shaped_arange((2,), xp, 'f')
c = testing.shaped_arange((3, 4), xp, 'd')
d = testing.shaped_arange((4, 2, 3), xp, 'F', order='F')
e = 1
f = xp.float32(1)
return func(a, b, c, d, e, f)
@testing.numpy_nlcpy_array_equal()
def test_atleast_1d(self, xp):
return self.check_atleast(xp.atleast_1d, xp)
@testing.numpy_nlcpy_array_equal()
def test_atleast_1d2(self, xp):
a = testing.shaped_arange((4, 2, 3), xp)
return xp.atleast_1d(a)
@testing.numpy_nlcpy_array_equal()
def test_atleast_2d(self, xp):
return self.check_atleast(xp.atleast_2d, xp)
@testing.numpy_nlcpy_array_equal()
def test_atleast_2d2(self, xp):
a = testing.shaped_arange((4, 2, 3), | |
<filename>deepppl/translation/stan2ir.py
"""
Copyright 2018 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import ast
from ..parser.stanListener import stanListener
import astor
import astpretty
import torch
import ipdb
if __name__ is not None and "." in __name__:
from .ir import *
from .ir2python import *
else:
assert False
def gatherChildrenIRList(ctx):
irs = []
if ctx.children is not None:
for child in ctx.children:
if hasattr(child, 'ir') and child.ir is not None:
irs += child.ir
return irs
def gatherChildrenIR(ctx):
irs = []
if ctx.children is not None:
for child in ctx.children:
if hasattr(child, 'ir') and child.ir is not None:
irs.append(child.ir)
return irs
def is_active(f):
return f is not None and f() is not None
def idxFromExprList(exprList):
if len(exprList) == 1:
return exprList[0]
else:
return Tuple(
exprs = exprList)
class StanToIR(stanListener):
def __init__(self):
self.networks = None
self._to_model = []
def exitVariableDecl(self, ctx):
vid = ctx.IDENTIFIER().getText()
dims = ctx.arrayDims().ir if ctx.arrayDims() is not None else None
type_ = ctx.type_().ir
dims = None
if ctx.arrayDims() is not None and type_.dim is not None:
# Avi: to check
dims = Tuple(exprs = [ctx.arrayDims().ir, type_.dim])
elif ctx.arrayDims() is not None:
dims = ctx.arrayDims().ir
elif type_.dim is not None:
dims = type_.dim
init = ctx.expression().ir if is_active(ctx.expression) else None
ctx.ir = VariableDecl(
id = vid,
dim = dims,
type_ = type_,
init = init)
def exitType_(self, ctx):
ptype = ctx.primitiveType()
if ctx.primitiveType() is not None:
type_ = ctx.primitiveType().getText()
elif ctx.vectorType() is not None:
# TODO: differentiate row_vector
# type_ = ctx.vectorType().getText()
type_ = 'vector'
elif ctx.matrixType() is not None:
#type_ = ctx.matrixType().getText()
type_ = 'matrix'
else:
assert False, f"unknown type: {ptype.getText()}"
constraints = ctx.typeConstraints().ir if ctx.typeConstraints() else None
is_array = ctx.isArray is not None
dims = ctx.arrayDims().ir if ctx.arrayDims() is not None else None
ctx.ir = Type_(type_ = type_, constraints = constraints, is_array = is_array, dim = dims)
def exitTypeConstraints(self, ctx):
constraints_list = ctx.typeConstraintList()
if constraints_list:
ctx.ir = [x.ir for x in constraints_list.typeConstraint()]
def exitTypeConstraint(self, ctx):
id_ = ctx.IDENTIFIER()
if id_.getText() == 'lower':
sort = 'lower'
elif id_.getText() == 'upper':
sort = 'upper'
else:
assert False, f'unknown constraint: {id_.getText()}'
constant = ctx.atom().ir
constraint = Constraint(sort = sort, value = constant)
ctx.ir = constraint
def exitInferredArrayShape(self, ctx):
ctx.ir = AnonymousShapeProperty()
def exitArrayDim(self, ctx):
if is_active(ctx.expression):
ctx.ir = ctx.expression().ir
elif is_active(ctx.inferredArrayShape):
ctx.ir = ctx.inferredArrayShape().ir
def exitInferredTensorShape(self, ctx):
ctx.ir = AnonymousShapeProperty()
def exitArrayDimCommaList(self, ctx):
ctx.ir = gatherChildrenIR(ctx)
def exitArrayDims(self, ctx):
cl = ctx.arrayDimCommaList()
elements = cl.ir
if len(elements) == 1:
ctx.ir = elements[0]
else:
ctx.ir = Tuple(exprs = elements)
def exitParameterDecl(self, ctx):
if is_active(ctx.variableDecl):
ctx.ir = ctx.variableDecl().ir
else: # Could be more defensive
pass
def exitParameterDeclsOpt(self, ctx):
ctx.ir = gatherChildrenIR(ctx)
def exitVariableDeclsOpt(self, ctx):
ctx.ir = gatherChildrenIR(ctx)
# Vector, matrix and array expressions (section 4.2)
def exitConstant(self, ctx):
if ctx.IntegerLiteral() is not None:
f = int
elif ctx.RealLiteral() is not None:
f = float
else:
assert False, "Unknonwn literal"
ctx.ir = Constant(value = f(ctx.getText()))
def exitVariable(self, ctx):
ctx.ir = Variable(id = ctx.getText())
def exitIndexExpression(self, ctx):
if is_active(ctx.expressionCommaListOpt):
ctx.ir = ctx.expressionCommaListOpt().ir
else:
assert False, "Unknown index expression:{}.".format(ctx.getText())
def exitAtom(self, ctx):
if is_active(ctx.constant):
ctx.ir = ctx.constant().ir
elif is_active(ctx.variable):
ctx.ir = ctx.variable().ir
elif is_active(ctx.expression):
ctx.ir = ctx.expression().ir
elif is_active(ctx.atom) and is_active(ctx.indexExpression):
name = ctx.atom().ir
index = ctx.indexExpression().ir
ctx.ir = Subscript(id = name, index = index)
elif is_active(ctx.netLValue):
ctx.ir = ctx.netLValue().ir
elif is_active(ctx.variableProperty):
ctx.ir = ctx.variableProperty().ir
else:
assert False, "Not yet implemented atom: {}".format(ctx.getText())
def exitExpression(self, ctx):
if is_active(ctx.atom):
ctx.ir = ctx.atom().ir
return
if is_active(ctx.callExpr):
ctx.ir = ctx.callExpr().ir
return
if ctx.TRANSPOSE_OP() is not None:
assert False, "Not yet implemented"
elif ctx.e1 is not None and ctx.e2 is not None:
self._exitBinaryExpression(ctx)
elif ctx.e1 is not None:
if is_active(ctx.PLUS_OP):
op = UPlus()
elif is_active(ctx.MINUS_OP):
op = UMinus()
elif is_active(ctx.NOT_OP):
op = UNot()
else:
assert False, f'Unknown operator: {ctx.getText()}'
ctx.ir = UnaryOperator(
value = ctx.e1.ir,
op = op)
else:
text = ctx.getText()
assert False, "Not yet implemented: {}".format(text)
def _exitBinaryExpression(self, ctx):
left = ctx.e1.ir
right = ctx.e2.ir
if is_active(ctx.LEFT_DIV_OP):
assert False, "Not yet implemented"
mapping = {
ctx.PLUS_OP : Plus,
ctx.MINUS_OP : Minus,
ctx.POW_OP : Pow,
ctx.OR_OP : Or,
ctx.AND_OP : And,
ctx.GT_OP : GT,
ctx.LT_OP : LT,
ctx.GE_OP : GE,
ctx.LE_OP : LE,
ctx.EQ_OP : EQ,
ctx.DOT_DIV_OP : DotDiv,
ctx.DIV_OP : Div,
ctx.DOT_MULT_OP : DotMult,
ctx.MULT_OP : Mult}
op = None
for src in mapping:
if is_active(src):
op = mapping[src]()
break
if op is not None:
ctx.ir = BinaryOperator(left = left,
right = right,
op = op)
elif ctx.e3 is not None:
false = ctx.e3.ir
ctx.ir = ConditionalStmt(test = left,
true = right,
false = false)
else:
text = ctx.getText()
assert False, "Not yet implemented: {}".format(text)
def exitExpressionCommaList(self, ctx):
## TODO: check wheter we want to build a list of statements
## or a List node
ctx.ir = gatherChildrenIR(ctx)
def exitExpressionCommaListOpt(self, ctx):
ir = gatherChildrenIRList(ctx)
if len(ir) == 1:
ctx.ir = ir[0]
else:
ctx.ir = Tuple(exprs = ir)
# Statements (section 5)
# Assignment (section 5.1)
def exitLvalue(self, ctx):
id = Variable(ctx.IDENTIFIER().getText())
if ctx.expressionCommaList() is not None:
idx = idxFromExprList(ctx.expressionCommaList().ir)
ctx.ir = Subscript(id = id, index = idx)
else:
ctx.ir = id
def exitAssignStmt(self, ctx):
lvalue = ctx.lvalue().ir
expr = ctx.expression().ir
if ctx.op is not None:
op = None
if ctx.PLUS_EQ() is not None:
op = Plus()
if ctx.MINUS_EQ() is not None:
op = Minus()
if ctx.MULT_EQ() is not None:
op = Mult()
if ctx.DOT_MULT_EQ() is not None:
op = DotMult()
if ctx.DIV_EQ() is not None:
op = Div()
if ctx.DOT_DIV_EQ() is not None:
op = DotDiv()
if op:
expr = BinaryOperator(left = lvalue, op = op, right = expr)
ctx.ir = AssignStmt(
target = lvalue,
value = expr)
# Sampling (section 5.3)
def exitLvalueSampling(self, ctx):
if is_active(ctx.lvalue):
ctx.ir = ctx.lvalue().ir
elif is_active(ctx.expression):
ctx.ir = ctx.expression().ir
elif is_active(ctx.netLValue):
ctx.ir = ctx.netLValue().ir
else:
assert False
def exitNetParam(self, ctx):
ids = [ctx.IDENTIFIER().getText()]
if ctx.netParam():
ir = ctx.netParam()[0].ir
ids.extend(ir)
ctx.ir = ids
def exitNetworksBlock(self, ctx):
ops = ctx.netVariableDeclsOpt()
decls = [x.ir for x in ops.netVariableDecl()]
nets = NetworksBlock(decls = decls)
self.networks = nets
ctx.ir = nets
def exitNetClass(self, ctx):
ctx.ir = ctx.getText()
exitNetName = exitNetClass
def exitNetVariableDecl(self, ctx):
netCls = ctx.netClass().ir
name = ctx.netName().ir
parameters = []
ctx.ir = NetDeclaration(name = name, cls = netCls, \
params = parameters)
def exitNetParamDecl(self, ctx):
netName = ctx.netName().getText()
if self.networks is not None:
nets = [x for x in self.networks.decls if x.name == netName]
if len(nets) == 1:
nets[0].params.append(ctx.netParam().ir)
elif len(nets) > 1:
raise AlreadyDeclaredException(netName)
else:
raise UndeclaredNetworkException(netName)
else:
raise UndeclaredNetworkException(netName)
def exitNetLValue(self, ctx):
name = ctx.netName().getText()
ids = ctx.netParam().ir
ctx.ir = NetVariable(name = name, ids = ids)
def exitVariableProperty(self, ctx):
property = ctx.IDENTIFIER().getText()
if is_active(ctx.netLValue):
var = ctx.netLValue().ir
cls = NetVariableProperty
elif is_active(ctx.variable):
var = ctx.variable().ir
cls = VariableProperty
else:
assert False, "Not yet implemented."
ctx.ir = cls(var = var, prop= property)
def exitSamplingStmt(self, ctx):
lvalue = ctx.lvalueSampling().ir
if ctx.PLUS_EQ() is not None:
assert False, 'Not yet implemented'
elif ctx.truncation() is not None:
assert False, 'Not yet implemented'
else:
id = ctx.IDENTIFIER()[0].getText()
exprList = ctx.expressionCommaList().ir
ctx.ir = SamplingStmt(target = lvalue,
id = id,
args = exprList)
# For loops (section 5.4)
def exitForStmt(self, ctx):
id = ctx.IDENTIFIER().getText()
body = ctx.statement().ir if hasattr(ctx.statement(), 'ir') else None
atom = ctx.atom()
from_ = atom[0].ir
to_ = atom[1].ir if len(atom) > 1 else None
ctx.ir = ForStmt(id = id,
from_ = from_,
to_ = to_,
body = body)
# Conditional statements (section 5.5)
def exitConditionalStmt(self, ctx):
test = ctx.expression().ir
false = ctx.s2.ir if ctx.s2 is not None else None
ctx.ir = ConditionalStmt(
test = test,
true = ctx.s1.ir,
| |
import copy
from random import shuffle
import numpy as np
import json
import os
import time
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier, PassiveAggressiveClassifier, Perceptron, SGDRegressor, PassiveAggressiveRegressor
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler
# NOTE: adjust this when switching between the different networks
ADAP_SIZE = 216
# ADAP_SIZE = 4096
CAP_VERIFICATION_TIME = 570 * 1000
scalers = [MinMaxScaler, StandardScaler, MaxAbsScaler]
penalty_sgd = ['l1', 'l2', 'elasticnet', 'none']
loss_sgd_regr = ['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']
loss_sgd_class = ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'] + loss_sgd_regr
loss_pa = ['hinge', 'squared_hinge']
penalty_per = ['l1', 'l2', 'elasticnet', None]
# Classifiers in format (model, [loss functions], [penalties], [scalers])
classifiers = [
(PassiveAggressiveClassifier, loss_pa, [None], scalers),
(SGDClassifier, loss_sgd_class, penalty_sgd, scalers),
(Perceptron, [None], penalty_per, scalers),
(GaussianNB, [None], [None], scalers),
]
regressors = [
(SGDRegressor, ['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'], penalty_per[:3], scalers),
(PassiveAggressiveRegressor, ['epsilon_insensitive', 'squared_epsilon_insensitive'], [None], scalers)
]
def getSimpleModelName(originalName):
if originalName == "PassiveAggressiveClassifier":
return "PA-C"
elif originalName == "PassiveAggressiveRegressor":
return "PA-R"
elif originalName == "SGDClassifier":
return "SGD-C"
elif originalName == "SGDRegressor":
return "SGD-R"
else:
return originalName
data = json.load(open(os.path.join('machine_learner','collected_data','dataset_selected_features.json')))
features_global = data['features']
targets_pl_class_global = data['target_classification_packetloss']
targets_la_class_global = data['target_classification_latency']
targets_pl_regr_global = data['target_regression_packetloss']
targets_la_regr_global = data['target_regression_latency']
targets_ec_regr_global = data['target_regression_energyconsumption']
verification_times_global = data['verification_times']
del data
class ModelEncapsClass:
def __init__(self, model, loss, penalty):
self.loss = loss
self.penalty = penalty
self.model = model
if loss == None and penalty == None:
self.single_model = model()
elif loss == None and penalty != None:
self.single_model = model(penalty=penalty)
elif loss != None and penalty == None:
self.single_model = model(loss=loss)
else:
self.single_model = model(loss=loss, penalty=penalty)
def getName(self):
'''
Returns a tuple with 3 entries containing the model, loss and penalty names
'''
return (getSimpleModelName(self.model.__name__) , \
self.loss.replace('_', '' ) if self.loss != None else 'None', \
self.penalty if self.penalty != None else 'None')
def predict(self, features):
'''
Proxy method
'''
return self.single_model.predict(features).tolist()
def partial_fit(self, features, targets_pl, targets_la):
'''
Proxy method
'''
comb_targets = [targets_pl[i] + (2*targets_la[i]) for i in range(len(targets_pl))]
self.single_model.partial_fit(features, comb_targets, classes=np.array([0,1,2,3]))
class ModelEncapsRegr:
def __init__(self, models, losses, penalties):
self.loss_pl, self.loss_la = losses
self.penalty_pl, self.penalty_la = penalties
self.model_pl, self.model_la = models
self.models = []
for i in range(2):
if losses[i] == None and penalties[i] == None:
m = models[i]()
elif losses[i] == None and penalties[i] != None:
m = models[i](penalty=penalties[i])
elif losses[i] != None and penalties[i] == None:
m = models[i](loss=losses[i])
else:
m = models[i](loss=losses[i], penalty=penalties[i])
self.models.append(m)
def getName(self):
'''
Returns a tuple with 3 entries containing the model, loss and penalty names
'''
return (getSimpleModelName(self.model_pl.__name__) + '.' + getSimpleModelName(self.model_la.__name__), \
(self.loss_pl.replace('_', '' ) if self.loss_pl != None else 'None') + ('.' + self.loss_la.replace('_', '' ) if self.loss_la != None else 'None'), \
(self.penalty_pl if self.penalty_pl != None else 'None') + '.' + (self.penalty_la if self.penalty_la != None else 'None'))
def predict(self, features_pl, features_la):
pred_pl = self.models[0].predict(features_pl)
pred_la = self.models[1].predict(features_la)
class_pl = [1 if i < 10 else 0 for i in pred_pl]
class_la = [1 if i < 5 else 0 for i in pred_la]
return [class_pl[i] + (2 * class_la[i]) for i in range(len(features_pl))]
def partial_fit(self, features_pl, features_la, targets_pl, targets_la):
self.models[0].partial_fit(features_pl, targets_pl)
self.models[1].partial_fit(features_la, targets_la)
def simulateModelWithTimes(model, scaler, loss, penalty, amtTrainingCycles = 30, explorationPercentage = 0.05):
isClassifier = model in [i[0] for i in classifiers]
start = time.perf_counter()
data = []
cycles = int(len(targets_pl_class_global) / ADAP_SIZE)
if isClassifier:
model = ModelEncapsClass(model, loss, penalty)
else:
model = ModelEncapsRegr([model]*2, [loss]*2, [penalty]*2)
model_name, loss_name, pen_name = model.getName()
# FIXME training cycles and exploration percentage temporarily stored in the model name
model_name += f'-{amtTrainingCycles}-{explorationPercentage}'
name = f'{model_name}_{loss_name}_{pen_name}_{scaler.__name__ if scaler != None else "None"}'
outputPath = os.path.join('machine_learner', 'collected_data', 'target', name + '.json')
if scaler != None:
scaler = scaler()
# Index used to keep track of the last configuration that was learned in a single cycle
learning_index = 0
# Simulate the classifier over all the cycles
for i in range(cycles):
# Extract the features and targets for the different goals in this cycle
features = getCopyFeatures(i*ADAP_SIZE,(i+1)*ADAP_SIZE)
targets_pl_class = targets_pl_class_global[i*ADAP_SIZE:(i+1)*ADAP_SIZE]
targets_la_class = targets_la_class_global[i*ADAP_SIZE:(i+1)*ADAP_SIZE]
targets_pl_regr = targets_pl_regr_global[i*ADAP_SIZE:(i+1)*ADAP_SIZE]
targets_la_regr = targets_la_regr_global[i*ADAP_SIZE:(i+1)*ADAP_SIZE]
targets_ec_regr = targets_ec_regr_global[i*ADAP_SIZE:(i+1)*ADAP_SIZE]
verification_times = verification_times_global[i*ADAP_SIZE:(i+1)*ADAP_SIZE]
# The predictions by the classifier (or regressor)
classBefore = []
classAfter = []
verifiedOptions = [0 for i in range(ADAP_SIZE)]
learning_time = 0
verification_time_total = sum(verification_times)
verification_time_with_learning = 0
# Differentiate between training and testing cycles
if i < amtTrainingCycles:
# Limited training since we have a cap on verification times
beginIndex = learning_index
endIndex, verification_time_with_learning = \
determineLearningSamples(verification_times, beginIndex, CAP_VERIFICATION_TIME)
if beginIndex < endIndex:
features_learning = features[beginIndex:endIndex]
if isClassifier:
targets_learning_pl = targets_pl_class[beginIndex:endIndex]
targets_learning_la = targets_la_class[beginIndex:endIndex]
else:
targets_learning_pl = targets_pl_regr[beginIndex:endIndex]
targets_learning_la = targets_la_regr[beginIndex:endIndex]
for opIndex in range(beginIndex, endIndex):
verifiedOptions[opIndex] = 1
else:
features_learning = features[beginIndex:] + features[:endIndex]
if isClassifier:
targets_learning_pl = targets_pl_class[beginIndex:] + targets_pl_class[:endIndex]
targets_learning_la = targets_la_class[beginIndex:] + targets_la_class[:endIndex]
else:
targets_learning_pl = targets_pl_regr[beginIndex:] + targets_pl_regr[:endIndex]
targets_learning_la = targets_la_regr[beginIndex:] + targets_la_regr[:endIndex]
for opIndex in range(beginIndex, ADAP_SIZE):
verifiedOptions[opIndex] = 1
for opIndex in range(endIndex):
verifiedOptions[opIndex] = 1
learning_index = endIndex
# Actual training cycles
startLearningTime = time.perf_counter()
if scaler != None:
scaler.partial_fit(features_learning)
features_learning = scaler.transform(features_learning)
classBefore = [-1 for i in range(ADAP_SIZE)]
if isClassifier:
model.partial_fit(features_learning, targets_learning_pl, targets_learning_la)
else:
model.partial_fit(features_learning, features_learning, targets_learning_pl, targets_learning_la)
learning_time = (time.perf_counter() - startLearningTime) * 1000
classAfter = [-1 for i in range(ADAP_SIZE)]
else:
# Testing cycle
startLearningTime = time.perf_counter()
if scaler != None:
features = scaler.transform(features)
if isClassifier:
classBefore = model.predict(features)
else:
classBefore = model.predict(features, features)
meanTime = time.perf_counter() - startLearningTime
# Determine the class(es) of predictions that should be used for online learning
if 3 in classBefore:
# If the options which are predicted to be of class 3 go over the verification time cap,
# take a random sample of all the options with prediction 3.
# Otherwise, take a percentage of options of class 1 and 2 as well (exploration)
indices3 = [i for i in range(ADAP_SIZE) if classBefore[i] in [3]]
verifTime3 = sum(np.array(verification_times)[indices3])
if verifTime3 > CAP_VERIFICATION_TIME:
indices = sampleOfOptions(verification_times, indices3, CAP_VERIFICATION_TIME)
else:
indices12 = [i for i in range(ADAP_SIZE) if classBefore[i] in [1,2]]
sample = sampleOfOptions(verification_times, indices12, CAP_VERIFICATION_TIME-verifTime3, explorationPercentage)
indices = indices3 + sample
elif (1 in classBefore) or (2 in classBefore):
indices12 = [i for i in range(ADAP_SIZE) if classBefore[i] in [1,2]]
indices = sampleOfOptions(verification_times, indices12, CAP_VERIFICATION_TIME)
else:
indices0123 = [i for i in range(ADAP_SIZE) if classBefore[i] in [0,1,2,3]]
indices = sampleOfOptions(verification_times, indices0123, CAP_VERIFICATION_TIME)
startLearningTime = time.perf_counter()
# Collect the samples for online learning
if isClassifier:
model.partial_fit(np.array(features)[indices].tolist(), \
np.array(targets_pl_class)[indices].tolist(), \
np.array(targets_la_class)[indices].tolist())
else:
model.partial_fit(np.array(features)[indices].tolist(), \
np.array(features)[indices].tolist(), \
np.array(targets_pl_regr)[indices].tolist(), \
np.array(targets_la_regr)[indices].tolist())
learning_time = (time.perf_counter() - startLearningTime + meanTime) * 1000
if isClassifier:
classAfter = model.predict(features)
else:
classAfter = model.predict(features, features)
for index in indices:
verifiedOptions[index] = 1
# Only count the verification times of the configurations that were actually considered
verification_time_with_learning = sum(np.array(verification_times)[indices].tolist())
data.append({
'cycle' : i+1,
'learningTime' : learning_time,
'verificationTimeWithLearning' : verification_time_with_learning,
'verificationTimeWithoutLearning': verification_time_total,
'training' : 'true' if i < amtTrainingCycles else 'false',
'adaptationOptions': {
'packetLoss' : targets_pl_regr,
'energyConsumption' : targets_ec_regr,
'latency' : targets_la_regr,
'classificationBefore' : classBefore,
'classificationAfter' : classAfter,
'regressionPLBefore' : [-1] * ADAP_SIZE,
'regressionPLAfter' : [-1] * ADAP_SIZE,
'regressionLABefore' : [-1] * ADAP_SIZE,
'regressionLAAfter' : [-1] * ADAP_SIZE,
'verifiedConfigurations' : verifiedOptions
}
})
with open(outputPath, 'w') as f:
json.dump(data, f, indent=1)
end = time.perf_counter()
print(f'{end-start:.2f} seconds:\t{name}')
def determineLearningSamples(verifTimes, beginIndex, cappedTime):
# Determines the resulting index, when starting from beginIndex, for all the options
# which can be verified in time.
# @param verifTimes: the verification times of all the options
# @param beginIndex: the beginning index from which the options are considered (inclusive)
# @param cappedTime: the limit on the amount of time for verification
#
# @returns endIndex, time: the ending index (exclusive), and the total verification times for the options in between
endIndex = beginIndex
time = 0
while True:
time += verifTimes[endIndex]
endIndex = (endIndex+1) % len(verifTimes)
if (time + verifTimes[endIndex] > cappedTime) or (endIndex == beginIndex):
break
return endIndex, time
def sampleOfOptions(verifTimes, indices, cappedTime, explorationPercentage = 1):
# Returns a list of indices of adaptation options within the bound time and exploration constraint
if len(indices) == 0:
return []
totalVerifTime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.