input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
########################################
#
# These functions edit sessions
#
########################################
from datetime import date, timedelta, datetime
from skipole import FailPage, GoTo, ValidateError, ServerError
from .. import sun, database_ops
def fill_edit_sessions(skicall):
"""Populates the edit sessions page, this is the page shown when an administrator chooses
sessions from the setup page"""
call_data = skicall.call_data
page_data = skicall.page_data
sessions = database_ops.get_sessions()
if sessions is None:
raise FailPage("Database Error")
if sessions:
# sessions are enabled
page_data['enabletext', 'para_text'] = "Sessions Enabled."
page_data['enabletext', 'widget_class'] = "w3-section w3-green"
page_data['enable', 'button_text'] = "Disable Sessions"
page_data['enable', 'link_ident'] = "disable_sessions"
else:
page_data['enabletext', 'para_text'] = "Sessions Disabled."
page_data['enabletext', 'widget_class'] = "w3-section w3-red"
page_data['enable', 'button_text'] = "Enable Sessions"
page_data['enable', 'link_ident'] = "enable_sessions"
today = datetime.utcnow().date()
now = datetime.utcnow()
rise_hour, rise_min = sun.sunrisetoday()
if now.hour < rise_hour:
# show timeslots from yesterday
fromdate = today - timedelta(days=1)
else:
fromdate = today
nextday = fromdate + timedelta(days=1)
postday = nextday + timedelta(days=1)
page_data['today', 'button_text'] = fromdate.strftime("%A %B %d, %Y")
page_data['today', 'get_field1'] = fromdate.isoformat()
page_data['tomorrow', 'button_text'] = nextday.strftime("%A %B %d, %Y")
page_data['tomorrow', 'get_field1'] = nextday.isoformat()
page_data['dayafter', 'button_text'] = postday.strftime("%A %B %d, %Y")
page_data['dayafter', 'get_field1'] = postday.isoformat()
def enable_sessions(skicall):
"Enable sessions"
if not database_ops.set_sessions(True):
raise FailPage("Database Error")
def disable_sessions(skicall):
"Disable sessions"
if not database_ops.set_sessions(False):
raise FailPage("Database Error")
def _get_startday(startday_string, call_data):
"""Given a start day string, return date object startday
and also put firstday, today, lastday into call_data"""
# get today
# get lastday, where lastday is the latest time at which slots can be administered
# currently today plus six days
# and firstday, where firstday is the earliest time at which slots can be administered
# currently yesterday
if 'today' in call_data:
today = call_data['today']
else:
today = datetime.utcnow().date()
call_data['today'] = today
if 'firstday' in call_data:
firstday = call_data['firstday']
else:
firstday = today - timedelta(days=1)
call_data['firstday'] = firstday
if 'lastday' in call_data:
lastday = call_data['lastday']
else:
lastday = today + timedelta(days=6)
call_data['lastday'] = lastday
try:
startcomponents = [int(i) for i in startday_string.split('-')]
except:
raise FailPage("Invalid date")
if len(startcomponents) != 3:
raise FailPage("Invalid date")
year,month,day = startcomponents
if not ((year == today.year) or (year == today.year + 1) or (year == today.year - 1)):
raise FailPage("Invalid date")
try:
startday = date(year, month, day)
except:
raise FailPage("Invalid date")
if startday > lastday:
raise FailPage("Cannot set sessions that far ahead")
if startday < firstday:
raise FailPage("Cannot set sessions for the past")
return startday
def _get_slot(string_sequence, call_data):
"Given a string sequence, return the slot"
try:
sequence = int(string_sequence)
except:
raise FailPage("Invalid data")
if sequence < 2 or sequence > 21:
raise FailPage("Invalid data")
slot = sun.Slot(call_data['startday'], sequence)
if slot.in_daylight():
raise FailPage("Invalid data")
return slot
def list_slots(skicall):
"Lists slots for the night, for admin users"
call_data = skicall.call_data
page_data = skicall.page_data
# Get the day being edited, either from get_fields
# or previously calculated and set into call_data['startday']
if 'startday' in call_data:
startday = call_data['startday']
else:
# from get fields
if call_data['today', 'get_field1']:
startday = _get_startday(call_data['today', 'get_field1'], call_data)
elif call_data['tomorrow', 'get_field1']:
startday = _get_startday(call_data['tomorrow', 'get_field1'], call_data)
elif call_data['dayafter', 'get_field1']:
startday = _get_startday(call_data['dayafter', 'get_field1'], call_data)
else:
raise FailPage("Invalid date")
call_data['set_values']["session_date_ident"] = startday.isoformat().replace('-','_')
endday = startday + timedelta(days=1)
now = datetime.utcnow()
now_15 = now + timedelta(minutes=15)
page_data['evenning', 'para_text'] = "Evenning of " + startday.strftime("%A %B %d, %Y")
page_data['morning', 'para_text'] = "Morning of " + endday.strftime("%A %B %d, %Y")
slots = sun.night_slots(startday)
contents = []
# contents: col 0 is the text to place in the first column,
# col 1, 2, 3, 4 are the get field contents of links 1,2,3 and 4
# col 5 - True if the first button and link is to be shown, False if not
# col 6 - True if the second button and link is to be shown, False if not
# col 7 - True if the third button and link is to be shown, False if not
# col 8 - True if the fourth button and link is to be shown, False if not
# col0_classes is a list of classes for the text column
col0_classes = []
con = database_ops.open_database()
try:
sessions_enabled = database_ops.get_sessions(con)
for slot in slots:
but1 = False
but2 = False
but3 = False
but4 = False
column_text = str(slot)
status, user_id = database_ops.get_slot_status(slot, con)
if now_15 > slot.endtime:
# slot has passed, tests now_15 rather than now, so slot is considered
# past when it only has 15 or less minutes to go, cannot be booked, enabled or disabled
col0_classes.append('w3-grey')
column_text = column_text + " Past"
elif not status:
if sessions_enabled:
# session available, but can be booked or disabled
col0_classes.append('w3-green')
but3 = True
but4 = True
else:
# session disabled
col0_classes.append('w3-grey')
column_text = column_text + " Disabled"
elif status == 1:
# session booked
col0_classes.append('w3-red')
# can be freed
but1 = True
# get the username
user = database_ops.get_user_from_id(user_id, con)
if user is not None:
username, role, email, member = user
column_text = column_text + " Booked by: " + username
if member:
if role == 'MEMBER':
column_text = column_text + " Member: " + member
elif role == 'GUEST':
column_text = column_text + " Guest: " + member
elif role == 'ADMIN':
column_text = column_text + " Admin: " + member
else:
# session disabled
col0_classes.append('w3-grey')
column_text = column_text + " Disabled"
if sessions_enabled:
# can enable it
but2 = True
str_seq = str(slot.sequence)
row = [column_text,
str_seq,
str_seq,
str_seq,
str_seq,
but1,
but2,
but3,
but4 ]
contents.append(row)
finally:
database_ops.close_database(con)
page_data['slots', 'contents'] = contents
page_data['slots', 'col0_classes'] = col0_classes
# previous and next buttons
if startday <= call_data['firstday']:
page_data['previous', 'show'] = False
if startday >= call_data['lastday']:
page_data['next', 'show'] = False
def next_day(skicall):
"Choose the next day to edit"
call_data = skicall.call_data
page_data = skicall.page_data
# Get the day, sent by ident_data
if call_data['stored_values']['session_date']:
startday_string = call_data['stored_values']['session_date'].replace('_','-')
else:
raise FailPage("Invalid date")
new_startday = _get_startday(startday_string, call_data) + timedelta(days=1)
if new_startday > call_data['lastday']:
raise FailPage("Cannot set sessions that far ahead")
call_data["startday"] = new_startday
# and list the slots
list_slots(skicall)
def prev_day(skicall):
"Choose the previous day to edit"
call_data = skicall.call_data
page_data = skicall.page_data
# Get the day, sent by ident_data
if call_data['stored_values']['session_date']:
startday_string = call_data['stored_values']['session_date'].replace('_','-')
else:
raise FailPage("Invalid date")
new_startday = _get_startday(startday_string, call_data) - timedelta(days=1)
if new_startday < call_data['firstday']:
raise FailPage("Cannot set sessions for the past")
call_data["startday"] = new_startday
# and list the slots
list_slots(skicall)
def disable_slot(skicall):
"Disable a slot"
call_data = skicall.call_data
page_data = skicall.page_data
# Get the day, sent by ident_data
if call_data['stored_values']['session_date']:
startday_string = call_data['stored_values']['session_date'].replace('_','-')
else:
raise FailPage("Invalid date")
call_data["startday"] = _get_startday(startday_string, call_data)
slot = _get_slot(call_data['slots','btn_col3'], call_data)
sessions = database_ops.get_sessions()
if sessions is None:
raise FailPage("Database Error")
if not sessions:
raise FailPage("Cannot disable the session")
database_ops.disable_slot(slot)
# and list the slots
list_slots(skicall)
def enable_slot(skicall):
"Enables a slot"
call_data = skicall.call_data
page_data = skicall.page_data
# Get the day, sent by ident_data
if call_data['stored_values']['session_date']:
startday_string = call_data['stored_values']['session_date'].replace('_','-')
else:
raise FailPage("Invalid date")
call_data["startday"] = _get_startday(startday_string, call_data)
slot = _get_slot(call_data['slots','btn_col2'], call_data)
con = database_ops.open_database()
try:
sessions = database_ops.get_sessions(con)
if sessions is None:
raise FailPage("Database Error")
if not sessions:
raise FailPage("Cannot enable the session")
slot_status_id = database_ops.get_slot_status(slot, con)
if slot_status_id is None:
raise FailPage("Unable to get slot info from database")
slot_status, slot_user_id = slot_status_id
if (slot_status != 2):
raise FailPage("Slot must be disabled to enable it.")
# Delete the slot, which enables it
result = database_ops.delete_slot(slot, con)
if result:
con.commit()
finally:
database_ops.close_database(con)
# and list the slots
list_slots(skicall)
def confirm_free_slot(skicall):
"Fills in confirm button get field"
call_data = skicall.call_data
page_data = skicall.page_data
# Get the day, sent by ident_data
if call_data['stored_values']['session_date']:
startday_string = call_data['stored_values']['session_date'].replace('_','-')
else:
raise FailPage("Invalid date")
call_data["startday"] = _get_startday(startday_string, call_data)
slot = _get_slot(call_data['slots','btn_col1'], call_data)
con = database_ops.open_database()
try:
slot_status_id = database_ops.get_slot_status(slot, con)
if slot_status_id is None:
raise FailPage("Unable to get slot info from database")
slot_status, slot_user_id = slot_status_id
if (slot_status != 1):
raise FailPage("Slot must be booked to free it.")
# get username, membership number and email address
user = database_ops.get_user_from_id(slot_user_id, con)
if user is None:
raise FailPage("Unable to get user info from database")
username, role, email, | |
'''
Created on Jan 11, 2012
@author: <NAME>, 2nd Look Consulting
http://www.2ndlookconsulting.com/
Copyright (c) 2013, <NAME>, Argonne National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Argonne National Laboratory nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
from __future__ import division
import numpy as np
import os
import sys
sys.path.append('./')
sys.path.append('file_io')
import h5py
import maps_hdf5
import logging
def comp0(entryname, h51, maps_group_id1, h52, maps_group_id2):
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
if not np.allclose(this_xrfdata1, this_xrfdata2, atol=np.finfo(float).eps):
print entryname, ' differs. RMS='
for i in range(this_xrfdata1.shape[0]):
print '\t', i, np.sqrt(np.mean(( this_xrfdata1[i,:] - this_xrfdata2[i,:])**2))
#else: print entryname, ' is the same.'
def comp1(entryname, h51, maps_group_id1, h52, maps_group_id2):
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
this_xrfdata1 = np.nan_to_num(this_xrfdata1)
this_xrfdata2 = np.nan_to_num(this_xrfdata2)
if not np.allclose(this_xrfdata1, this_xrfdata2, atol=np.finfo(float).eps):
print entryname, ' differ.'
for i in range(this_xrfdata1.shape[0]):
if np.sum(np.abs(this_xrfdata1[i,:,:])) > 0:
print '\t', i, 'RMS= ', np.sqrt(np.mean(( this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:])**2)), ',', 100*np.sum(np.abs(this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:]))/np.sum(np.abs(this_xrfdata1[i,:,:])), '%'
else:
print '\t', i, 'RMS= ', np.sqrt(np.mean(( this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:])**2))
#else: print entryname, 'are the same.'
def comp2(entryname, h51, maps_group_id1, h52, maps_group_id2):
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
this_xrfdata1 = np.nan_to_num(this_xrfdata1)
this_xrfdata2 = np.nan_to_num(this_xrfdata2)
if this_xrfdata1.shape == this_xrfdata2.shape:
if not np.allclose(this_xrfdata1[:,:,:], this_xrfdata2[:,:,:], atol=np.finfo(float).eps):
print entryname, ' differs.'
for i in range(this_xrfdata1.shape[0]):
if np.sum(np.abs(this_xrfdata1[i,:,:])) > np.finfo(float).eps:
print '\t', i, 'RMS= ', np.sqrt(np.mean(( this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:])**2)), ',', 100*np.sum(np.abs(this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:]))/np.sum(np.abs(this_xrfdata1[i,:,:])), '%'
else:
print '\t', i, 'RMS= ', np.sqrt(np.mean(( this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:])**2))
#else: print entryname, ' is the same.'
else:
if len(this_xrfdata1.shape) == len(this_xrfdata2.shape):
min_range = min(this_xrfdata1.shape[0], this_xrfdata2.shape[0])
if not np.allclose(this_xrfdata1[0:min_range,:,:], this_xrfdata2[0:min_range,:,:], atol=np.finfo(float).eps):
print entryname, ' differs.'
for i in range(min_range):
if np.sum(np.abs(this_xrfdata1[i,:,:])) > np.finfo(float).eps:
print '\t', i, 'RMS= ', np.sqrt(np.mean(( this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:])**2)), ',', 100*np.sum(np.abs(this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:]))/np.sum(np.abs(this_xrfdata1[i,:,:])), '%'
else:
print '\t', i, 'RMS= ', np.sqrt(np.mean(( this_xrfdata1[i,:,:] - this_xrfdata2[i,:,:])**2))
#else: print entryname, ' is the same.'
else:
print entryname, ' have different shapes.'
""" ------------------------------------------------------------------------------------------------"""
def main(file1, file2):
logger = logging.getLogger('compare')
fHandler = logging.FileHandler('compare')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | PID[%(process)d] | %(funcName)s(): %(message)s')
fHandler.setFormatter(formatter)
logger.addHandler(fHandler)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s | %(levelname)s | PID[%(process)d] | %(funcName)s(): %(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.WARNING)
logger.addHandler(ch)
verbose = 1
#remove quotations marks
file1.strip('"')
if "'" in file1: file1 = file1[1:-1]
file1 = os.path.normpath(file1)
if verbose: print 'file1 =', file1
if not os.path.isfile(file1):
print 'Error - File', file1, ' does not exist. Please specify working directory.'
return
#remove quotations marks
file2.strip('"')
if "'" in file2: file2 = file2[1:-1]
file2 = os.path.normpath(file2)
if verbose: print 'file2 =', file2
if not os.path.isfile(file2):
print 'Error - File', file2, ' does not exist. Please specify working directory.'
return
f1 = h5py.File(file1, 'r')
if 'MAPS' not in f1:
print 'error, hdf5 file does not contain the required MAPS group. I am aborting this action'
return
maps_group_id1 = f1['MAPS']
l1 = list(maps_group_id1)
f2 = h5py.File(file2, 'r')
if 'MAPS' not in f2:
print 'error, hdf5 file does not contain the required MAPS group. I am aborting this action'
return
maps_group_id2 = f2['MAPS']
l2 = list(maps_group_id2)
s1 = set(l1)
s2 = set(l2)
if len(s1.difference(s2)):
print '\nElements in ', os.path.basename(file1), ' that are not in ', os.path.basename(file2), ':'
for i in s1.difference(s2): print i
elif len(s2.difference(s1)):
print '\nElements in ', os.path.basename(file2), ' that are not in ', os.path.basename(file1), ':'
print s2.difference(s1)
#else:
# print 'Files have the same groups.'
#print '\nCompare HDF5 fields in the files.'
h51 = maps_hdf5.h5(logger)
h52 = maps_hdf5.h5(logger)
'''
entryname = 'mca_arr'
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
if not np.allclose(this_xrfdata1, this_xrfdata2, atol=1.0e-6):
print entryname, ' differs.'
else: print entryname, ' is the same.'
'''
comp0('us_amp', h51, maps_group_id1, h52, maps_group_id2)
comp0('ds_amp', h51, maps_group_id1, h52, maps_group_id2)
comp0('energy', h51, maps_group_id1, h52, maps_group_id2)
comp0('energy_calib', h51, maps_group_id1, h52, maps_group_id2)
comp0('int_spec', h51, maps_group_id1, h52, maps_group_id2)
comp0('max_chan_spec', h51, maps_group_id1, h52, maps_group_id2)
'''
if verbose == 2:
import matplotlib.pyplot as plt
for i in range(5):
plt.plot(this_xrfdata1[i,:])
plt.plot(this_xrfdata2[i,:])
plt.show()
'''
comp1('scalers', h51, maps_group_id1, h52, maps_group_id2)
comp2('XRF_roi', h51, maps_group_id1, h52, maps_group_id2)
comp2('XRF_fits', h51, maps_group_id1, h52, maps_group_id2)
comp2('XRF_roi_plus', h51, maps_group_id1, h52, maps_group_id2)
comp0('x_axis', h51, maps_group_id1, h52, maps_group_id2)
comp0('y_axis', h51, maps_group_id1, h52, maps_group_id2)
comp1('XRF_roi_quant', h51, maps_group_id1, h52, maps_group_id2)
comp1('XRF_roi_plus_quant', h51, maps_group_id1, h52, maps_group_id2)
comp1('XRF_fits_quant', h51, maps_group_id1, h52, maps_group_id2)
entryname = 'channel_names'
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
if len(this_xrfdata1) == len(this_xrfdata2):
same = 1
for i in range(len(this_xrfdata1)):
if this_xrfdata1[i] != this_xrfdata2[i]: same = 0
if same == 0:
print entryname,' differ.'
for i in range(len(this_xrfdata1)):
print '\t', i, this_xrfdata1[i], this_xrfdata2[i]
#else: print entryname, 'are the same.'
else:
print entryname,' differ:'
print this_xrfdata1
print this_xrfdata2
print '\n'
entryname = 'channel_units'
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
if len(this_xrfdata1) != len(this_xrfdata2):
print entryname,' differ:'
print this_xrfdata1
print this_xrfdata2
print '\n'
entryname = 'scaler_names'
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
if len(this_xrfdata1) == len(this_xrfdata2):
same = 1
for i in range(len(this_xrfdata1)):
if this_xrfdata1[i] != this_xrfdata2[i]: same = 0
if same == 0:
print entryname,' differ.'
for i in range(len(this_xrfdata1)):
print '\t', i, this_xrfdata1[i], this_xrfdata2[i]
#else: print entryname, 'are the same.'
else: print entryname,' differ.'
entryname = 'scaler_units'
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
if len(this_xrfdata1) == len(this_xrfdata2):
same = 1
for i in range(len(this_xrfdata1)):
if this_xrfdata1[i] != this_xrfdata2[i]: same = 0
if same == 0:
print entryname,' differs.'
for i in range(len(this_xrfdata1)):
print '\t', i, this_xrfdata1[i], this_xrfdata2[i]
#else: print entryname, 'are the same.'
else: print entryname,' differ.'
comp0('add_long', h51, maps_group_id1, h52, maps_group_id2)
comp0('add_float', h51, maps_group_id1, h52, maps_group_id2)
entryname = 'add_string'
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
if len(this_xrfdata1) == len(this_xrfdata2):
same = 1
for i in range(len(this_xrfdata1)):
if this_xrfdata1[i].strip() != this_xrfdata2[i].strip():
same = 0
if same == 0:
print entryname,' differs.'
#else: print entryname, 'are the same.'
else: print entryname,' differ.'
# #Extra_strings are the same if extra_pvs are the same
# entryname = 'extra_strings'
# this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
# this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
# if len(this_xrfdata1) == len(this_xrfdata2):
# same = 1
# for i in range(len(this_xrfdata1)):
# if this_xrfdata1[i] != this_xrfdata2[i]:
# same = 0
# print this_xrfdata1[i],this_xrfdata2[i]
# if same == 0:
# print entryname,' differ.'
# else: print entryname, 'are the same.'
# else: print entryname,' differ.'
entryname = 'extra_pvs'
this_xrfdata1, valid_read = h51.read_hdf5_core(maps_group_id1, entryname)
this_xrfdata2, valid_read = h52.read_hdf5_core(maps_group_id2, entryname)
l1 = []
l2 = []
if type(this_xrfdata1) == int:
l1+[this_xrfdata1]
else:
for i in range(this_xrfdata1.shape[1]): l1.append(this_xrfdata1[0,i])
if type(this_xrfdata2) == int:
l1+[this_xrfdata2]
else:
for i in range(this_xrfdata2.shape[1]): l2.append(this_xrfdata2[0,i])
s1 = set(l1)
s2 = set(l2)
if len(s1.difference(s2)):
| |
string_array.num_total_chars,
builder.extract_value(ptr, 0), length, kind, is_ascii, ind
])
return context.get_dummy_value()
return types.void(str_arr_t, ind_t, ptr_t, len_t), codegen
def lower_is_na(context, builder, bull_bitmap, ind):
fnty = lir.FunctionType(lir.IntType(1),
[lir.IntType(8).as_pointer(),
lir.IntType(64)])
fn_getitem = builder.module.get_or_insert_function(fnty,
name="is_na")
return builder.call(fn_getitem, [bull_bitmap,
ind])
@intrinsic
def _memcpy(typingctx, dest_t, src_t, count_t, item_size_t=None):
def codegen(context, builder, sig, args):
dst, src, count, itemsize = args
# buff_arr = context.make_array(sig.args[0])(context, builder, buff_arr)
# ptr = builder.gep(buff_arr.data, [ind])
cgutils.raw_memcpy(builder, dst, src, count, itemsize)
return context.get_dummy_value()
return types.void(types.voidptr, types.voidptr, types.intp, types.intp), codegen
# TODO: use overload for all getitem cases (currently implemented via lower_builtin)
@overload(operator.getitem)
def str_arr_getitem_int(A, arg):
if (A != string_array_type):
return None
if isinstance(arg, types.Integer):
def str_arr_getitem_by_integer_impl(A, arg):
if arg < 0 or arg >= len(A):
raise IndexError("StringArray getitem with index out of bounds")
start_offset = getitem_str_offset(A, arg)
end_offset = getitem_str_offset(A, arg + 1)
length = end_offset - start_offset
ptr = get_data_ptr_ind(A, start_offset)
ret = decode_utf8(ptr, length)
# ret = numba.cpython.unicode._empty_string(kind, length)
# _memcpy(ret._data, ptr, length, 1)
return ret
return str_arr_getitem_by_integer_impl
elif (isinstance(arg, types.Array) and isinstance(arg.dtype, (types.Boolean, types.Integer))):
def str_arr_getitem_by_array_impl(A, arg):
if len(A) != len(arg):
raise IndexError("Mismatch of boolean index and indexed array sizes")
idxs = np.arange(len(A))
taken_idxs = idxs[arg]
result_size = len(taken_idxs)
total_chars = 0
for i in prange(result_size):
total_chars += len(A[taken_idxs[i]])
ret = pre_alloc_string_array(result_size, total_chars)
for i in prange(result_size):
ret[i] = A[taken_idxs[i]]
if str_arr_is_na(A, taken_idxs[i]):
str_arr_set_na(ret, i)
return ret
return str_arr_getitem_by_array_impl
return None
@intrinsic
def decode_utf8(typingctx, ptr_t, len_t=None):
def codegen(context, builder, sig, args):
ptr, length = args
# create str and call decode with internal pointers
uni_str = cgutils.create_struct_proxy(string_type)(context, builder)
fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer(),
lir.IntType(64),
lir.IntType(32).as_pointer(),
lir.IntType(32).as_pointer(),
lir.IntType(64).as_pointer(),
uni_str.meminfo.type.as_pointer()])
fn_decode = builder.module.get_or_insert_function(
fnty, name="decode_utf8")
builder.call(fn_decode, [ptr, length,
uni_str._get_ptr_by_name('kind'),
uni_str._get_ptr_by_name('is_ascii'),
uni_str._get_ptr_by_name('length'),
uni_str._get_ptr_by_name('meminfo')])
uni_str.hash = context.get_constant(_Py_hash_t, -1)
uni_str.data = context.nrt.meminfo_data(builder, uni_str.meminfo)
# Set parent to NULL
uni_str.parent = cgutils.get_null_value(uni_str.parent.type)
return uni_str._getvalue()
return string_type(types.voidptr, types.intp), codegen
# @lower_builtin(operator.getitem, StringArrayType, types.Integer)
# @lower_builtin(operator.getitem, StringArrayType, types.IntegerLiteral)
# def lower_string_arr_getitem(context, builder, sig, args):
# # TODO: support multibyte unicode
# # TODO: support Null
# kind = numba.cpython.unicode.PY_UNICODE_1BYTE_KIND
# def str_arr_getitem_impl(A, i):
# start_offset = getitem_str_offset(A, i)
# end_offset = getitem_str_offset(A, i + 1)
# length = end_offset - start_offset
# ret = numba.cpython.unicode._empty_string(kind, length)
# ptr = get_data_ptr_ind(A, start_offset)
# _memcpy(ret._data, ptr, length, 1)
# return ret
# res = context.compile_internal(builder, str_arr_getitem_impl, sig, args)
# return res
# typ = sig.args[0]
# ind = args[1]
# string_array = context.make_helper(builder, typ, args[0])
# # check for NA
# # i/8, XXX: lshr since always positive
# #byte_ind = builder.lshr(ind, lir.Constant(lir.IntType(64), 3))
# #bit_ind = builder.srem
# # cgutils.printf(builder, "calling bitmap\n")
# # with cgutils.if_unlikely(builder, lower_is_na(context, builder, string_array.null_bitmap, ind)):
# # cgutils.printf(builder, "is_na %d \n", ind)
# # cgutils.printf(builder, "calling bitmap done\n")
# fnty = lir.FunctionType(lir.IntType(8).as_pointer(),
# [lir.IntType(32).as_pointer(),
# lir.IntType(8).as_pointer(),
# lir.IntType(64)])
# fn_getitem = builder.module.get_or_insert_function(fnty,
# name="getitem_string_array_std")
# return builder.call(fn_getitem, [string_array.offsets,
# string_array.data, args[1]])
@lower_builtin(operator.getitem, StringArrayType, types.SliceType)
def lower_string_arr_getitem_slice(context, builder, sig, args):
def str_arr_slice_impl(str_arr, idx):
n = len(str_arr)
slice_idx = numba.cpython.unicode._normalize_slice(idx, n)
span = numba.cpython.unicode._slice_span(slice_idx)
if slice_idx.step == 1:
start_offset = getitem_str_offset(str_arr, slice_idx.start)
end_offset = getitem_str_offset(str_arr, slice_idx.stop)
n_chars = end_offset - start_offset
new_arr = pre_alloc_string_array(span, np.int64(n_chars))
# TODO: more efficient copy
for i in range(span):
new_arr[i] = str_arr[slice_idx.start + i]
return new_arr
else: # TODO: test
# get number of chars
n_chars = 0
for i in range(slice_idx.start, slice_idx.stop, slice_idx.step):
_str = str_arr[i]
n_chars += get_utf8_size(_str)
new_arr = pre_alloc_string_array(span, np.int64(n_chars))
# TODO: more efficient copy
for i in range(span):
new_arr[i] = str_arr[slice_idx.start + i * slice_idx.step]
return new_arr
res = context.compile_internal(builder, str_arr_slice_impl, sig, args)
return res
@numba.njit(no_cpython_wrapper=True)
def str_arr_item_to_numeric(out_arr, out_ind, str_arr, ind):
return _str_arr_item_to_numeric(sdc.hiframes.split_impl.get_c_arr_ptr(
out_arr.ctypes, out_ind), str_arr, ind, out_arr.dtype)
@intrinsic
def _str_arr_item_to_numeric(typingctx, out_ptr_t, str_arr_t, ind_t,
out_dtype_t=None):
assert str_arr_t == string_array_type
assert ind_t == types.int64
def codegen(context, builder, sig, args):
# TODO: return tuple with value and error and avoid array arg?
out_ptr, arr, ind, _dtype = args
string_array = context.make_helper(builder, string_array_type, arr)
fnty = lir.FunctionType(
lir.IntType(32),
[out_ptr.type,
lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
lir.IntType(64)])
fname = 'str_arr_to_int64'
if sig.args[3].dtype == types.float64:
fname = 'str_arr_to_float64'
else:
assert sig.args[3].dtype == types.int64
fn_to_numeric = builder.module.get_or_insert_function(fnty, fname)
return builder.call(
fn_to_numeric,
[out_ptr, string_array.offsets, string_array.data, ind])
return types.int32(
out_ptr_t, string_array_type, types.int64, out_dtype_t), codegen
# TODO: support array of strings
# @typeof_impl.register(np.ndarray)
# def typeof_np_string(val, c):
# arr_typ = numba.core.typing.typeof._typeof_ndarray(val, c)
# # match string dtype
# if isinstance(arr_typ.dtype, (types.UnicodeCharSeq, types.CharSeq)):
# return string_array_type
# return arr_typ
@unbox(StringArrayType)
def unbox_str_series(typ, val, c):
"""
Unbox a Pandas String Series. We just redirect to StringArray implementation.
"""
dtype = StringArrayPayloadType()
payload = cgutils.create_struct_proxy(dtype)(c.context, c.builder)
string_array = c.context.make_helper(c.builder, typ)
# function signature of string_array_from_sequence
# we use void* instead of PyObject*
fnty = lir.FunctionType(lir.VoidType(),
[lir.IntType(8).as_pointer(),
lir.IntType(64).as_pointer(),
lir.IntType(32).as_pointer().as_pointer(),
lir.IntType(8).as_pointer().as_pointer(),
lir.IntType(8).as_pointer().as_pointer(),
])
fn = c.builder.module.get_or_insert_function(fnty, name="string_array_from_sequence")
c.builder.call(fn, [val,
string_array._get_ptr_by_name('num_items'),
payload._get_ptr_by_name('offsets'),
payload._get_ptr_by_name('data'),
payload._get_ptr_by_name('null_bitmap'),
])
# the raw data is now copied to payload
# The native representation is a proxy to the payload, we need to
# get a proxy and attach the payload and meminfo
meminfo, meminfo_data_ptr = construct_string_array(c.context, c.builder)
c.builder.store(payload._getvalue(), meminfo_data_ptr)
string_array.meminfo = meminfo
string_array.offsets = payload.offsets
string_array.data = payload.data
string_array.null_bitmap = payload.null_bitmap
string_array.num_total_chars = c.builder.zext(c.builder.load(
c.builder.gep(string_array.offsets, [string_array.num_items])), lir.IntType(64))
# FIXME how to check that the returned size is > 0?
is_error = cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return NativeValue(string_array._getvalue(), is_error=is_error)
# zero = context.get_constant(types.intp, 0)
# cond = builder.icmp_signed('>=', size, zero)
# with cgutils.if_unlikely(builder, cond):
# http://llvmlite.readthedocs.io/en/latest/user-guide/ir/ir-builder.html#comparisons
# *** glob support *****
@infer_global(glob)
class GlobInfer(AbstractTemplate):
def generic(self, args, kws):
if not kws and len(args) == 1 and args[0] == string_type:
return signature(string_array_type, *args)
@lower_builtin(glob, string_type)
def lower_glob(context, builder, sig, args):
path = args[0]
uni_str = cgutils.create_struct_proxy(string_type)(
context, builder, value=path)
path = uni_str.data
typ = sig.return_type
dtype = StringArrayPayloadType()
meminfo, meminfo_data_ptr = construct_string_array(context, builder)
string_array = context.make_helper(builder, typ)
str_arr_payload = cgutils.create_struct_proxy(dtype)(context, builder)
# call glob in C
fnty = lir.FunctionType(lir.VoidType(),
[lir.IntType(32).as_pointer().as_pointer(),
lir.IntType(8).as_pointer().as_pointer(),
lir.IntType(8).as_pointer().as_pointer(),
lir.IntType(64).as_pointer(),
lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="c_glob")
builder.call(fn, [str_arr_payload._get_ptr_by_name('offsets'),
str_arr_payload._get_ptr_by_name('data'),
str_arr_payload._get_ptr_by_name('null_bitmap'),
string_array._get_ptr_by_name('num_items'),
path])
builder.store(str_arr_payload._getvalue(), meminfo_data_ptr)
string_array.meminfo = meminfo
string_array.offsets = str_arr_payload.offsets
string_array.data = str_arr_payload.data
string_array.null_bitmap = str_arr_payload.null_bitmap
string_array.num_total_chars = builder.zext(builder.load(
builder.gep(string_array.offsets, [string_array.num_items])), lir.IntType(64))
# cgutils.printf(builder, "n %d\n", string_array.num_items)
ret = string_array._getvalue()
# context.nrt.decref(builder, ty, ret)
return impl_ret_new_ref(context, builder, typ, ret)
@numba.njit(no_cpython_wrapper=True)
def append_string_array_to(result, pos, A):
# precondition: result is allocated with the size enough to contain A
i, j = 0, pos
for str in A:
result[j] = str
if str_arr_is_na(A, i):
sdc.str_arr_ext.str_arr_set_na(result, j)
i += 1
j += 1
return i
@numba.njit(no_cpython_wrapper=True)
def create_str_arr_from_list(str_list):
n = len(str_list)
data_total_chars = 0
for i in numba.prange(n):
data_total_chars += get_utf8_size(str_list[i])
str_arr = pre_alloc_string_array(n, data_total_chars)
cp_str_list_to_array(str_arr, str_list)
return str_arr
@numba.njit(no_cpython_wrapper=True)
def str_arr_set_na_by_mask(str_arr, nan_mask):
# precondition: (1) str_arr and nan_mask have the same size
# (2) elements for which na bits are set all have zero lenght
for i in numba.prange(len(str_arr)):
if nan_mask[i]:
str_arr_set_na(str_arr, i)
return str_arr
@overload(operator.add)
def sdc_str_arr_operator_add(self, other):
self_is_str_arr = self == string_array_type
other_is_str_arr = other == string_array_type
operands_are_str_arr = self_is_str_arr and other_is_str_arr
if not (operands_are_str_arr
or (self_is_str_arr and isinstance(other, types.UnicodeType))
or (isinstance(self, types.UnicodeType) and other_is_str_arr)):
return None
if operands_are_str_arr:
def _sdc_str_arr_operator_add_impl(self, other):
size_self, size_other = len(self), len(other)
if size_self != size_other:
raise ValueError("Mismatch of String Arrays sizes in operator.add")
res_total_chars = 0
for i in numba.prange(size_self):
if not str_arr_is_na(self, i) and not str_arr_is_na(other, i):
res_total_chars += (get_utf8_size(self[i]) + get_utf8_size(other[i]))
res_arr = pre_alloc_string_array(size_self, res_total_chars)
for i in numba.prange(size_self):
if not (str_arr_is_na(self, i) or str_arr_is_na(other, i)):
res_arr[i] = self[i] + other[i]
else:
res_arr[i] = ''
str_arr_set_na(res_arr, i)
return res_arr
elif self_is_str_arr:
def _sdc_str_arr_operator_add_impl(self, other):
res_size = len(self)
res_total_chars = 0
for i in numba.prange(res_size):
if not str_arr_is_na(self, i):
res_total_chars += get_utf8_size(self[i]) + get_utf8_size(other)
res_arr = pre_alloc_string_array(res_size, res_total_chars)
for i in numba.prange(res_size):
if not str_arr_is_na(self, i):
res_arr[i] = self[i] + other
else:
res_arr[i] = ''
str_arr_set_na(res_arr, i)
return res_arr
elif other_is_str_arr:
def _sdc_str_arr_operator_add_impl(self, other):
res_size = len(other)
res_total_chars = 0
for i in numba.prange(res_size):
if not str_arr_is_na(other, i):
res_total_chars += get_utf8_size(other[i]) + get_utf8_size(self)
res_arr = pre_alloc_string_array(res_size, res_total_chars)
for i in numba.prange(res_size):
if not str_arr_is_na(other, i):
res_arr[i] = self + other[i]
else:
res_arr[i] = ''
str_arr_set_na(res_arr, i)
return res_arr
else:
return None
return _sdc_str_arr_operator_add_impl
@overload(operator.mul)
def sdc_str_arr_operator_mul(self, other):
self_is_str_arr = self == string_array_type
other_is_str_arr = other == string_array_type
if not ((self_is_str_arr and check_is_array_of_dtype(other, types.Integer)
or self_is_str_arr and isinstance(other, types.Integer)
or other_is_str_arr and check_is_array_of_dtype(self, types.Integer)
or other_is_str_arr and isinstance(self, types.Integer))):
return None
one_operand_is_scalar = isinstance(self, types.Integer) or isinstance(other, types.Integer)
def _sdc_str_arr_operator_mul_impl(self, other):
_self, _other = (self, other) if | |
+ m.b431 <= 1)
m.c4622 = Constraint(expr= m.b326 - m.b342 + m.b432 <= 1)
m.c4623 = Constraint(expr= m.b327 - m.b328 + m.b433 <= 1)
m.c4624 = Constraint(expr= m.b327 - m.b329 + m.b434 <= 1)
m.c4625 = Constraint(expr= m.b327 - m.b330 + m.b435 <= 1)
m.c4626 = Constraint(expr= m.b327 - m.b331 + m.b436 <= 1)
m.c4627 = Constraint(expr= m.b327 - m.b332 + m.b437 <= 1)
m.c4628 = Constraint(expr= m.b327 - m.b333 + m.b438 <= 1)
m.c4629 = Constraint(expr= m.b327 - m.b334 + m.b439 <= 1)
m.c4630 = Constraint(expr= m.b327 - m.b335 + m.b440 <= 1)
m.c4631 = Constraint(expr= m.b327 - m.b336 + m.b441 <= 1)
m.c4632 = Constraint(expr= m.b327 - m.b337 + m.b442 <= 1)
m.c4633 = Constraint(expr= m.b327 - m.b338 + m.b443 <= 1)
m.c4634 = Constraint(expr= m.b327 - m.b339 + m.b444 <= 1)
m.c4635 = Constraint(expr= m.b327 - m.b340 + m.b445 <= 1)
m.c4636 = Constraint(expr= m.b327 - m.b341 + m.b446 <= 1)
m.c4637 = Constraint(expr= m.b327 - m.b342 + m.b447 <= 1)
m.c4638 = Constraint(expr= m.b328 - m.b329 + m.b448 <= 1)
m.c4639 = Constraint(expr= m.b328 - m.b330 + m.b449 <= 1)
m.c4640 = Constraint(expr= m.b328 - m.b331 + m.b450 <= 1)
m.c4641 = Constraint(expr= m.b328 - m.b332 + m.b451 <= 1)
m.c4642 = Constraint(expr= m.b328 - m.b333 + m.b452 <= 1)
m.c4643 = Constraint(expr= m.b328 - m.b334 + m.b453 <= 1)
m.c4644 = Constraint(expr= m.b328 - m.b335 + m.b454 <= 1)
m.c4645 = Constraint(expr= m.b328 - m.b336 + m.b455 <= 1)
m.c4646 = Constraint(expr= m.b328 - m.b337 + m.b456 <= 1)
m.c4647 = Constraint(expr= m.b328 - m.b338 + m.b457 <= 1)
m.c4648 = Constraint(expr= m.b328 - m.b339 + m.b458 <= 1)
m.c4649 = Constraint(expr= m.b328 - m.b340 + m.b459 <= 1)
m.c4650 = Constraint(expr= m.b328 - m.b341 + m.b460 <= 1)
m.c4651 = Constraint(expr= m.b328 - m.b342 + m.b461 <= 1)
m.c4652 = Constraint(expr= m.b329 - m.b330 + m.b462 <= 1)
m.c4653 = Constraint(expr= m.b329 - m.b331 + m.b463 <= 1)
m.c4654 = Constraint(expr= m.b329 - m.b332 + m.b464 <= 1)
m.c4655 = Constraint(expr= m.b329 - m.b333 + m.b465 <= 1)
m.c4656 = Constraint(expr= m.b329 - m.b334 + m.b466 <= 1)
m.c4657 = Constraint(expr= m.b329 - m.b335 + m.b467 <= 1)
m.c4658 = Constraint(expr= m.b329 - m.b336 + m.b468 <= 1)
m.c4659 = Constraint(expr= m.b329 - m.b337 + m.b469 <= 1)
m.c4660 = Constraint(expr= m.b329 - m.b338 + m.b470 <= 1)
m.c4661 = Constraint(expr= m.b329 - m.b339 + m.b471 <= 1)
m.c4662 = Constraint(expr= m.b329 - m.b340 + m.b472 <= 1)
m.c4663 = Constraint(expr= m.b329 - m.b341 + m.b473 <= 1)
m.c4664 = Constraint(expr= m.b329 - m.b342 + m.b474 <= 1)
m.c4665 = Constraint(expr= m.b330 - m.b331 + m.b475 <= 1)
m.c4666 = Constraint(expr= m.b330 - m.b332 + m.b476 <= 1)
m.c4667 = Constraint(expr= m.b330 - m.b333 + m.b477 <= 1)
m.c4668 = Constraint(expr= m.b330 - m.b334 + m.b478 <= 1)
m.c4669 = Constraint(expr= m.b330 - m.b335 + m.b479 <= 1)
m.c4670 = Constraint(expr= m.b330 - m.b336 + m.b480 <= 1)
m.c4671 = Constraint(expr= m.b330 - m.b337 + m.b481 <= 1)
m.c4672 = Constraint(expr= m.b330 - m.b338 + m.b482 <= 1)
m.c4673 = Constraint(expr= m.b330 - m.b339 + m.b483 <= 1)
m.c4674 = Constraint(expr= m.b330 - m.b340 + m.b484 <= 1)
m.c4675 = Constraint(expr= m.b330 - m.b341 + m.b485 <= 1)
m.c4676 = Constraint(expr= m.b330 - m.b342 + m.b486 <= 1)
m.c4677 = Constraint(expr= m.b331 - m.b332 + m.b487 <= 1)
m.c4678 = Constraint(expr= m.b331 - m.b333 + m.b488 <= 1)
m.c4679 = Constraint(expr= m.b331 - m.b334 + m.b489 <= 1)
m.c4680 = Constraint(expr= m.b331 - m.b335 + m.b490 <= 1)
m.c4681 = Constraint(expr= m.b331 - m.b336 + m.b491 <= 1)
m.c4682 = Constraint(expr= m.b331 - m.b337 + m.b492 <= 1)
m.c4683 = Constraint(expr= m.b331 - m.b338 + m.b493 <= 1)
m.c4684 = Constraint(expr= m.b331 - m.b339 + m.b494 <= 1)
m.c4685 = Constraint(expr= m.b331 - m.b340 + m.b495 <= 1)
m.c4686 = Constraint(expr= m.b331 - m.b341 + m.b496 <= 1)
m.c4687 = Constraint(expr= m.b331 - m.b342 + m.b497 <= 1)
m.c4688 = Constraint(expr= m.b332 - m.b333 + m.b498 <= 1)
m.c4689 = Constraint(expr= m.b332 - m.b334 + m.b499 <= 1)
m.c4690 = Constraint(expr= m.b332 - m.b335 + m.b500 <= 1)
m.c4691 = Constraint(expr= m.b332 - m.b336 + m.b501 <= 1)
m.c4692 = Constraint(expr= m.b332 - m.b337 + m.b502 <= 1)
m.c4693 = Constraint(expr= m.b332 - m.b338 + m.b503 <= 1)
m.c4694 = Constraint(expr= m.b332 - m.b339 + m.b504 <= 1)
m.c4695 = Constraint(expr= m.b332 - m.b340 + m.b505 <= 1)
m.c4696 = Constraint(expr= m.b332 - m.b341 + m.b506 <= 1)
m.c4697 = Constraint(expr= m.b332 - m.b342 + m.b507 <= 1)
m.c4698 = Constraint(expr= m.b333 - m.b334 + m.b508 <= 1)
m.c4699 = Constraint(expr= m.b333 - m.b335 + m.b509 <= 1)
m.c4700 = Constraint(expr= m.b333 - m.b336 + m.b510 <= 1)
m.c4701 = Constraint(expr= m.b333 - m.b337 + m.b511 <= 1)
m.c4702 = Constraint(expr= m.b333 - m.b338 + m.b512 <= 1)
m.c4703 = Constraint(expr= m.b333 - m.b339 + m.b513 <= 1)
m.c4704 = Constraint(expr= m.b333 - m.b340 + m.b514 <= 1)
m.c4705 = Constraint(expr= m.b333 - m.b341 + m.b515 <= 1)
m.c4706 = Constraint(expr= m.b333 - m.b342 + m.b516 <= 1)
m.c4707 = Constraint(expr= m.b334 - m.b335 + m.b517 <= 1)
m.c4708 = Constraint(expr= m.b334 - m.b336 + m.b518 <= 1)
m.c4709 = Constraint(expr= m.b334 - m.b337 + m.b519 <= 1)
m.c4710 = Constraint(expr= m.b334 - m.b338 + m.b520 <= 1)
m.c4711 = Constraint(expr= m.b334 - m.b339 + m.b521 <= 1)
m.c4712 = Constraint(expr= m.b334 - m.b340 + m.b522 <= 1)
m.c4713 = Constraint(expr= m.b334 - m.b341 + m.b523 <= 1)
m.c4714 = Constraint(expr= m.b334 - m.b342 + m.b524 <= 1)
m.c4715 = Constraint(expr= m.b335 - m.b336 + m.b525 <= 1)
m.c4716 = Constraint(expr= m.b335 - m.b337 + m.b526 <= 1)
m.c4717 = Constraint(expr= m.b335 - m.b338 + m.b527 <= 1)
m.c4718 = Constraint(expr= m.b335 - m.b339 + m.b528 <= 1)
m.c4719 = Constraint(expr= m.b335 - m.b340 + m.b529 <= 1)
m.c4720 = Constraint(expr= m.b335 - m.b341 + m.b530 <= 1)
m.c4721 = Constraint(expr= m.b335 - m.b342 + m.b531 <= 1)
m.c4722 = Constraint(expr= m.b336 - m.b337 + m.b532 <= 1)
m.c4723 = Constraint(expr= m.b336 - m.b338 + m.b533 <= 1)
m.c4724 = Constraint(expr= m.b336 - m.b339 + m.b534 <= 1)
m.c4725 = Constraint(expr= m.b336 - m.b340 + m.b535 <= 1)
m.c4726 = Constraint(expr= m.b336 - m.b341 + m.b536 <= 1)
m.c4727 = Constraint(expr= m.b336 - m.b342 + m.b537 <= 1)
m.c4728 = Constraint(expr= m.b337 - m.b338 + m.b538 <= 1)
m.c4729 = Constraint(expr= m.b337 - m.b339 + m.b539 <= 1)
m.c4730 = Constraint(expr= m.b337 - m.b340 + m.b540 <= 1)
m.c4731 = Constraint(expr= m.b337 - m.b341 + m.b541 <= 1)
m.c4732 = Constraint(expr= m.b337 - m.b342 + m.b542 <= 1)
m.c4733 = Constraint(expr= m.b338 - m.b339 + m.b543 <= 1)
m.c4734 = Constraint(expr= m.b338 - m.b340 + m.b544 <= 1)
m.c4735 = Constraint(expr= m.b338 - m.b341 + m.b545 <= 1)
m.c4736 = Constraint(expr= m.b338 - m.b342 + m.b546 <= 1)
m.c4737 = Constraint(expr= m.b339 - m.b340 + m.b547 <= 1)
m.c4738 = Constraint(expr= m.b339 - m.b341 + m.b548 <= 1)
m.c4739 = Constraint(expr= m.b339 - m.b342 + m.b549 <= 1)
m.c4740 = Constraint(expr= m.b340 - m.b341 + m.b550 <= 1)
m.c4741 = Constraint(expr= m.b340 - m.b342 + m.b551 <= 1)
m.c4742 = Constraint(expr= m.b341 - m.b342 + m.b552 <= 1)
m.c4743 = Constraint(expr= m.b343 - m.b344 + m.b363 <= 1)
m.c4744 = Constraint(expr= m.b343 - m.b345 + m.b364 <= 1)
m.c4745 = Constraint(expr= m.b343 - m.b346 + m.b365 <= 1)
m.c4746 = Constraint(expr= m.b343 - m.b347 + m.b366 <= 1)
m.c4747 = Constraint(expr= m.b343 - m.b348 + m.b367 <= 1)
m.c4748 = Constraint(expr= m.b343 - m.b349 + m.b368 <= 1)
m.c4749 = Constraint(expr= m.b343 - m.b350 + m.b369 <= 1)
m.c4750 = Constraint(expr= m.b343 - m.b351 + m.b370 <= 1)
m.c4751 = Constraint(expr= m.b343 - m.b352 + m.b371 <= 1)
m.c4752 = Constraint(expr= m.b343 - m.b353 + m.b372 <= 1)
m.c4753 = Constraint(expr= m.b343 - m.b354 + m.b373 <= 1)
m.c4754 = Constraint(expr= m.b343 - m.b355 + m.b374 <= 1)
m.c4755 = Constraint(expr= m.b343 - m.b356 + m.b375 <= 1)
m.c4756 = Constraint(expr= m.b343 - m.b357 + m.b376 <= 1)
m.c4757 = Constraint(expr= m.b343 - m.b358 + m.b377 <= 1)
m.c4758 = Constraint(expr= m.b343 - m.b359 + m.b378 <= 1)
m.c4759 = Constraint(expr= m.b343 - m.b360 + m.b379 <= 1)
m.c4760 = Constraint(expr= m.b343 - m.b361 + m.b380 <= 1)
m.c4761 = Constraint(expr= m.b343 - m.b362 + m.b381 <= 1)
m.c4762 = Constraint(expr= m.b344 - m.b345 + m.b382 <= 1)
m.c4763 = Constraint(expr= m.b344 - m.b346 + m.b383 | |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 25 15:52:05 2017
@author: Xian_Work
"""
param_path="../Parameters/params_ui.json"
execfile("prelim.py")
from model_plotting import norm , compute_dist, gen_model_target, sim_plot, make_base_plot, mk_mix_agent, gen_evolve_share_series
#For writing output
import xlsxwriter
import pathlib2 as pathlib
import plotnine as p9
#Make path for output if they don't already exist
dirnames = ['../out/1b1k', '../out/OOS', '../out/het_delta',
'../out/1b2k/GOF_plots', '../out/2b2k/GOF_plots']
for dirname in dirnames:
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
####################################
#Model Target and Base Plots
####################################
#Data Series to plot against
data_tminus5 = norm(param.JPMC_cons_moments, param.plt_norm_index)
data_tminus5_search = param.JPMC_search_moments
###Use vcv weights###
cons_se_vcv_tminus5 =param.JPMC_cons_SE
search_se_vcv = param.JPMC_search_SE
### Make base plot with data series ###
base_tminus5 = make_base_plot(0, param.c_moments_len,
param.moments_len_diff, param.s_moments_len,
data_tminus5, data_tminus5_search,
cons_se_vcv_tminus5, search_se_vcv)
##Default param dicts##
pd_base = {"a0": param.a0_data, "T_series":T_series, "T_solve":param.TT,
"e":param.e_extend,
"beta_var":param.beta, "beta_hyp": param.beta_hyp, "a_size": param.a_size,
"rho":param.rho, "verbose":False, "L_":param.L,
"constrained":param.constrained, "Pi_":np.zeros((param.TT+1,param.a_size+1,9,9)),
"z_vals" : param.z_vals, "R" : param.R, "Rbor" : param.R,
"phi": param.phi, "k":param.k, "spline_k":param.spline_k, "solve_V": True,
"solve_search": True}
for t in range(param.TT+1):
for a_index in range(param.a_size+1):
pd_base['Pi_'][t][a_index] = param.Pi
pd_base['T_series']=len(pd_base['e'])-1
het_base = copy.deepcopy(pd_base)
### Estimated parameters for main and secondary models
models_params ={}
for path in ["../Parameters/model_params_main.json",
"../Parameters/model_params_sec.json",
"../Parameters/model_params_robust_gamma.json"]:
f= open(path)
models_params.update(json.load(f))
#########################
#Plotting helper functions
############################
rep_agents_log = []
het_agents_log = []
text_stats = []
def plot_rep(param_dict, label,
cons_filename, cons_plt_title,
search_filename, search_plt_title,
cons_ylim=(0.65, 1.03), search_ylim=(-0.02, 0.35),
cons_legend_loc=(0.27,0.22), search_legend_loc = (0.27,0.22),
GOF=False, show_data_CI=False,
save_stats=False, stats_name=None):
"""Helper function for plotting our basic plot"""
opt_plots=copy.deepcopy(base_tminus5)
opt_plots.add_agent(label,
pd_base['e'],
param.c_plt_start_index,
param.s_plt_start_index,
param.plt_norm_index,
*[(1,param_dict)], verbose=True)
opt_plots.plot(cons_filename, cons_plt_title,
search_filename, search_plt_title,
cons_legend_loc = cons_legend_loc,
search_legend_loc = search_legend_loc,
cons_t0=-5, tminus5=True, show_data_CI = show_data_CI,
cons_ylim=cons_ylim, search_ylim=search_ylim,
GOF=GOF,)
#save results and parameters
if save_stats==True:
rep_out=copy.deepcopy(opt_plots.agents[0])
rep_pd=param_dict
out_rep={'name':stats_name, 'cons_GOF':rep_out['cons_dist'],
'search_GOF':rep_out['search_dist'],
'k':rep_pd['k'], 'xi':rep_pd['phi'],
'delta':rep_pd['beta_var'], 'beta':rep_pd['beta_hyp'],
'L_':rep_pd['L_'], 'gamma':rep_pd['rho']}
rep_agents_log.append(out_rep)
def het_agent_plots(het_agent, main_label, type_labels,
file_prefix, out_subdir='/',
cons_plt_title = None, search_plt_title = None,
cons_comp_title = None, search_comp_title = None,
shares_title = None, show_data_CI=False,
cons_ylim = (0.65, 1.03), search_ylim=(-0.02, 0.35),
comps_cons_ylim= (0.47, 1.05), comps_search_ylim = (0, 0.8),
cons_legend_loc=(0.27,0.22), search_legend_loc=(0.27,0.22),
cons_legend_loc_comps =(0.27,0.22),
search_legend_loc_comps =(0.27,0.8),
GOF=False,
save_stats=False, stats_name=None,
save_plots= True):
'''
Creates average consumption/search, consumption/search by type, and shares
plots for a heterogeneous agent. type_labels is a dictionary of index:'label'
pairs to label each type in order
See model_plotting.py, documentation for the 'plot' function for documentation
on different options.
'''
#Filenames and plot titles
filename_cons = out_subdir + file_prefix + '_cons'
filename_search = out_subdir + file_prefix + '_search'
filename_cons_comps = out_subdir + file_prefix + '_cons_components'
filename_search_comps = out_subdir + file_prefix + '_search_components'
filename_shares = out_subdir + file_prefix + '_shares'
if save_plots==False:
filename_cons = None
filename_search = None
filename_cons_comps = None
filename_search_comps = None
filename_shares = None
if cons_plt_title == None:
cons_plt_title = 'Spending in Data and Model' + file_prefix
if search_plt_title == None:
search_plt_title = 'Job Search in Data and Model' + file_prefix
if cons_comp_title == None:
cons_comp_title = 'Spending by type in Model' + file_prefix
if search_comp_title == None:
search_comp_title = 'Job Search by type in Model' + file_prefix
if shares_title == None:
shares_title = 'Shares in Model' + file_prefix
#Main consumption and search plot
opt_plots =copy.deepcopy(base_tminus5)
opt_plots.add_agent(main_label, het_base['e'],
param.c_plt_start_index,
param.s_plt_start_index,
param.plt_norm_index,
*het_agent, verbose=True)
opt_plots.plot(filename_cons, cons_plt_title,
filename_search , search_plt_title,
cons_legend_loc = cons_legend_loc,
search_legend_loc=search_legend_loc,
cons_t0=-5, tminus5=True, show_data_CI = show_data_CI,
cons_ylim=cons_ylim, search_ylim=search_ylim,
GOF=GOF)
#Components
comp_dict = {}
for i in range(len(het_agent)):
comp_dict.update({type_labels[i]:het_agent[i][1]})
comp_plot=copy.deepcopy(base_tminus5)
for key,value in comp_dict.iteritems():
comp_plot.add_agent(key, het_base['e'],
param.c_plt_start_index,
param.s_plt_start_index,
param.plt_norm_index,
*[(1,value)])
comp_plot.plot(filename_cons_comps, cons_comp_title,
filename_search_comps , search_comp_title,
cons_ylim=comps_cons_ylim, search_ylim=comps_search_ylim,
cons_legend_loc=cons_legend_loc_comps,
search_legend_loc=search_legend_loc_comps,
cons_t0=-5, GOF=False, tminus5=True, show_data=False)
#Shares plot
fit_out = opt_plots.agents[0]
shares_plot=copy.deepcopy(base_tminus5)
shares_dict ={}
for i in range(len(het_agent)):
shares_dict.update({i:type_labels[i]})
shares=fit_out['share_ind']
for i in range(len(het_agent)):
share=shares[i]
shares_plot.add_series(shares_dict[i], share, fit_out['search'])
shares_plot.plot(filename_shares, shares_title,
None , "Job Search",
cons_legend_loc = search_legend_loc_comps,
search_legend_loc = search_legend_loc_comps,
cons_t0=-5,
cons_ylim=(0.00,1.06), show_data=False, GOF=False, cons_ylab=' ')
#save results and parameters
if save_stats==True:
for i in range(len(het_agent)):
type_out = {'name':stats_name, 'type':type_labels[i],
'init_share':het_agent[i][0]}
type_out.update({'cons_GOF':fit_out['cons_dist'],
'search_GOF':fit_out['search_dist'],})
type_pd = het_agent[i][1]
type_out.update({'k':type_pd['k'], 'xi':type_pd['phi'],
'delta':type_pd['beta_var'], 'beta':type_pd['beta_hyp'],
'L_':type_pd['L_'], 'gamma':type_pd['rho']})
het_agents_log.append(type_out)
return fit_out
################################################################################
#################### Models in Table 4 #########################################
################################################################################
##################################
###Representative agent
##################################
pd_rep=copy.deepcopy(pd_base)
est_params_1b1k = models_params['est_params_1b1k']
pd_rep.update(est_params_1b1k)
plot_rep(pd_rep, 'Model: Representative Agent',
'../out/1b1k/rep_cons', 'Spending in Data and Representative Agent Model',
'../out/1b1k/rep_search', 'Job Search in Data and Representative Agent Model')
plot_rep(pd_rep, 'Model: Representative Agent',
'../out/1b1k/rep_cons_GOF', 'Spending in Data and Representative Agent Model',
'../out/1b1k/rep_search_GOF', 'Job Search in Data and Representative Agent Model',
GOF=True, save_stats=True,
cons_legend_loc = (0.33, 0.22), search_legend_loc = (0.33, 0.22))
for name in ['1', '4', '10']:
pd_temp = copy.deepcopy(pd_rep)
pd_temp.update(models_params['est_params_1b1k_fix_gamma_' + name])
plot_rep(pd_temp, 'Model: Representative Agent' + ', gamma = ' + name,
'../out/1b1k/rep_robust_gamma_' + name, 'Spending in Data and Representative Agent Model',
'../out/1b1k/rep_robust_gamma_' + name + '_search', 'Job Search in Data and Representative Agent Model',
GOF=True, save_stats=True, stats_name = '1b1k, gamma =' +name,
cons_legend_loc = (0.38, 0.22), search_legend_loc = (0.38, 0.22))
#############################
#### 2 types of k only #####
#############################
#From optimizer
est_params_1b2k = models_params['est_params_1b2k']
#Set up agent
het_1b2k = copy.deepcopy(pd_rep)
het_1b2k.update({'beta_var':est_params_1b2k['beta_var'],'beta_hyp':est_params_1b2k['beta_hyp'],
'L_':est_params_1b2k['L_'], 'constrained':True, 'phi':est_params_1b2k['phi'] })
#weights
weights_1b2k = (est_params_1b2k['w_lo_k'], 1- est_params_1b2k['w_lo_k'])
params_1b2k = ('k', )
vals_1b2k = ((est_params_1b2k['k0'], ),
(est_params_1b2k['k1'], ),)
het_1b2k_agent = mk_mix_agent(het_1b2k, params_1b2k, vals_1b2k, weights_1b2k)
het_agent_labels = {0:'Low Search Cost', 1:'High Search Cost'}
het_agent_plots(het_1b2k_agent, 'Model: Standard',
het_agent_labels, '1b2k', out_subdir='1b2k/',
cons_plt_title = "Spending in Data and Standard Model",
search_plt_title = "Job Search in Data and Standard Model",
cons_comp_title = "Spending by type, Standard Model",
search_comp_title = "Job Search by type, Standard Model",
shares_title = "Shares, Standard Model", show_data_CI = True,
cons_legend_loc = (0.23, 0.22), search_legend_loc = (0.23, 0.22))
het_agent_plots(het_1b2k_agent, 'Model: Baseline',
het_agent_labels, '1b2k', out_subdir='1b2k/GOF_plots/',
cons_plt_title = "Spending in Data and Standard Model",
search_plt_title = "Job Search in Data and Standard Model",
cons_comp_title = "Spending by type, Standard Model",
search_comp_title = "Job Search by type, Standard Model",
shares_title = "Shares, Standard Model",
GOF=True, save_stats=True, show_data_CI = True,
stats_name = '2_k_types' )
#############################################
#### 2 types for beta and 2 types for k #####
#############################################
robustness_2b2k=[('est_params_2b2k', '2b2k', "Heterogeneous Beta"),
('est_params_2b2k_fix_xi', '2b2k_fix_xi', "Heterogeneous Beta, xi=1.0"),
('est_params_2b2k_fix_b1', '2b2k_fix_b1', "Heterogeneous Beta, B_hi=1.0"),]
#To plot both models together
plot_2b2k_both = copy.deepcopy(base_tminus5)
#Plot each model separately
for model in robustness_2b2k:
est_params_2b2k = models_params[model[0]]
if model[0] == 'est_params_2b2k':
show_data_CI_2b2k = True
else:
show_data_CI_2b2k = False
het_2b2k = copy.deepcopy(pd_rep)
het_2b2k.update({'beta_var':est_params_2b2k['beta_var'], 'L_':est_params_2b2k['L_'],
'constrained':True, 'phi':est_params_2b2k['phi']})
k0 = est_params_2b2k['k0']
k1 = est_params_2b2k['k1']
b0 = est_params_2b2k['b0']
b1 = est_params_2b2k['b1']
#weights
w_lo_k = est_params_2b2k['w_lo_k']
w_hi_k = 1 - w_lo_k
w_lo_beta = est_params_2b2k['w_lo_beta']
w_hi_beta = 1 - w_lo_beta
w_b0_k0 = w_lo_k * w_lo_beta
w_b1_k0 = w_lo_k * w_hi_beta
w_b0_k1 = w_hi_k * w_lo_beta
w_b1_k1 = w_hi_k * w_hi_beta
#weights
weights_2b2k = (w_b0_k0, w_b1_k0, w_b0_k1, w_b1_k1)
params_2b2k = ('beta_hyp', 'k' )
vals_2b2k = ((b0, k0),
(b1, k0 ),
(b0, k1),
(b1, k1 ))
het_2b2k_agent = mk_mix_agent(het_2b2k, params_2b2k, vals_2b2k, weights_2b2k)
het_agent_labels = {0:'Hyperbolic, Low Search Cost', 1:'Exponential, Low Search Cost',
2:'Hyperbolic, High Search Cost',3:'Exponential, High Search Cost'}
het_agent_plots(het_2b2k_agent, 'Model: Heterogeneity in Beta',
het_agent_labels, model[1], out_subdir='2b2k/',
cons_plt_title = "Spending in Data and Heterogeneous Beta Model",
search_plt_title = "Job Search in Data and Heterogeneous Beta Model",
cons_comp_title = "Spending by type, Heterogeneous Beta Model",
search_comp_title = "Job Search by type, Heterogeneous Beta Model",
shares_title = "Shares, Heterogeneous Beta Model",
show_data_CI = show_data_CI_2b2k,
cons_legend_loc = (0.29, 0.22), search_legend_loc = (0.29, 0.22),
cons_legend_loc_comps = (0.29, 0.25), search_legend_loc_comps = (0.29, 0.7), )
het_agent_plots(het_2b2k_agent, 'Model: Heterogeneity in Beta',
het_agent_labels, model[1], out_subdir='2b2k/GOF_plots/',
cons_plt_title = "Spending in Data and Heterogeneous Beta Model",
search_plt_title = "Job Search in Data and Heterogeneous Beta Model",
cons_comp_title = "Spending by type, Heterogeneous Beta Model",
search_comp_title = "Job Search by type, Heterogeneous Beta Model",
shares_title = "Shares, Heterogeneous Beta Model",
GOF=True, save_stats=True, stats_name = model[1],
show_data_CI = show_data_CI_2b2k,
cons_legend_loc_comps = (0.29, 0.25), search_legend_loc_comps = (0.29, 0.7),
cons_legend_loc = (0.32, 0.22), search_legend_loc = (0.32, 0.22))
#Combined plot with 2b2k and 2b2k, fixed xi=1.0 models
if model[1] == '2b2k' or model[1] == '2b2k_fix_xi':
plot_2b2k_both.add_agent('Model: ' + model[2], het_base['e'],
param.c_plt_start_index,
param.s_plt_start_index,
param.plt_norm_index,
*het_2b2k_agent)
###Stat for text
if model[1] == '2b2k' :
agent_series = gen_evolve_share_series(het_base['e'],
param.c_plt_start_index,
param.s_plt_start_index,
base_tminus5.periods,
param.plt_norm_index,
*het_2b2k_agent, verbose=True)
shares_b0k0 = list(agent_series['share_ind'][0])
shares_b0k1 = list(agent_series['share_ind'][2])
m5_share_myopic = shares_b0k0[10] + shares_b0k1[10]
text_stats.append(('''By month 5 - the last month of UI benefits - the
myopic types are XXX percent of the population''',
np.round(100*m5_share_myopic,decimals=0)))
plot_2b2k_both.plot('/2b2k/2b2k_with_fixed_xi_cons', "Spending in Data and Heterogeneous Beta Models",
'/2b2k/2b2k_with_fixed_xi_search', "Job Search in Data and Heterogeneous Beta Models",
cons_legend_loc =(0.34,0.22), search_legend_loc =(0.34,0.22),
cons_ylim=(0.65,1.03), GOF=True)
#######################################
#############Florida OOS############
#######################################
###generate the standard agents
FL_2k=copy.deepcopy(het_1b2k)
FL_2k['z_vals']=np.array(param.z_vals_FL)
FL_2k_agent = mk_mix_agent(FL_2k, params_1b2k, vals_1b2k, weights_1b2k)
###generate the spender-saver agents
est_params_2b2k = models_params['est_params_2b2k']
FL_2b2k = copy.deepcopy(pd_rep)
FL_2b2k['z_vals']=np.array(param.z_vals_FL)
FL_2b2k.update({'beta_var':est_params_2b2k['beta_var'], 'L_':est_params_2b2k['L_'],
'constrained':True, | |
client extensions to add to the Order. Do not set,
modify, or delete clientExtensions if your account is associated with MT4.
take_profit_on_fill: :class:`~async_v20.TakeProfitDetails`
TakeProfitDetails specifies the details of a Take Profit Order to be created on behalf of
a client. This may happen when an Order
is filled that opens a Trade requiring a Take Profit, or when a Trade's dependent Take Profit Order is
modified directly through the Trade.
stop_loss_on_fill: :class:`~async_v20.StopLossDetails`
StopLossDetails specifies the details of a Stop Loss Order to be created on behalf of a
client. This may happen when an Order
is filled that opens a Trade requiring a Stop Loss, or when a Trade's dependent Stop Loss Order is modified
directly through the Trade.
trailing_stop_loss_on_fill: :class:`~async_v20.TrailingStopLossDetails`
TrailingStopLossDetails specifies the details of a Trailing Stop Loss Order to be
created on behalf of a client. This may happen when an Order is
filled that opens a Trade requiring a Trailing Stop Loss, or when a Trade's dependent Trailing Stop Loss
Order is modified directly through the Trade.
trade_client_extensions: :class:`~async_v20.ClientExtensions`
Client Extensions to add to the Trade created when the Order is filled (if such a
Trade is created). Do not set, modify, or delete tradeClientExtensions if your account is associated with
MT4.
"""
def __init__(self, instrument: InstrumentName, units: DecimalNumber, price: PriceValue,
price_bound: PriceValue = sentinel,
time_in_force: TimeInForce = 'GTC', gtd_time: DateTime = sentinel,
position_fill: OrderPositionFill = 'DEFAULT', trigger_condition: OrderTriggerCondition = 'DEFAULT',
client_extensions: ClientExtensions = sentinel, take_profit_on_fill: TakeProfitDetails = sentinel,
stop_loss_on_fill: StopLossDetails = sentinel,
trailing_stop_loss_on_fill: TrailingStopLossDetails = sentinel,
trade_client_extensions: ClientExtensions = sentinel):
Model.__init__(**locals())
class StopOrderRequest(OrderRequest, type=OrderType('STOP')):
"""A StopOrderRequest specifies the parameters that may be set when creating a
Stop Order.
Attributes:
instrument: :class:`~async_v20.InstrumentName`
The Stop Order's Instrument.
units: :class:`~async_v20.DecimalNumber`
The quantity requested to be filled by the Stop Order. A posititive number of units
results in a long Order, and a negative number of units results in a short Order.
price: :class:`~async_v20.PriceValue`
The price threshold specified for the Stop Order. The Stop Order will only be
filled by a market price that is equal to or worse than this price.
price_bound: :class:`~async_v20.PriceValue`
The worst market price that may be used to fill this Stop Order. If the market gaps and
crosses through both the price and the priceBound, the Stop Order will be cancelled instead of being filled.
time_in_force: :class:`~async_v20.TimeInForce`
The time-in-force requested for the Stop Order.
gtd_time: :class:`~async_v20.DateTime`
The date/time when the Stop Order will
be cancelled if its timeInForce is "GTD".
position_fill: :class:`~async_v20.OrderPositionFill`
Specification of how Positions in the Account
are modified when the Order is filled.
trigger_condition: :class:`~async_v20.OrderTriggerCondition`
Specification of what component of a price should be used
for comparison when determining if the Order should be filled.
client_extensions: :class:`~async_v20.ClientExtensions`
The client extensions to add to the Order. Do not set,
modify, or delete clientExtensions if your account is associated with MT4.
take_profit_on_fill: :class:`~async_v20.TakeProfitDetails`
TakeProfitDetails specifies the details of a Take Profit Order to be created on behalf of
a client. This may happen when an Order
is filled that opens a Trade requiring a Take Profit, or when a Trade's dependent Take Profit Order is
modified directly through the Trade.
stop_loss_on_fill: :class:`~async_v20.StopLossDetails`
StopLossDetails specifies the details of a Stop Loss Order to be created on behalf of a
client. This may happen when an Order
is filled that opens a Trade requiring a Stop Loss, or when a Trade's dependent Stop Loss Order is modified
directly through the Trade.
trailing_stop_loss_on_fill: :class:`~async_v20.TrailingStopLossDetails`
TrailingStopLossDetails specifies the details of a Trailing Stop Loss Order to be
created on behalf of a client. This may happen when an Order is
filled that opens a Trade requiring a Trailing Stop Loss, or when a Trade's dependent Trailing Stop Loss
Order is modified directly through the Trade.
trade_client_extensions: :class:`~async_v20.ClientExtensions`
Client Extensions to add to the Trade created when the Order is filled (if such a
Trade is created). Do not set, modify, or delete tradeClientExtensions if your account is associated with
MT4.
"""
def __init__(self, instrument: InstrumentName, units: DecimalNumber, price: PriceValue,
price_bound: PriceValue = sentinel, time_in_force: TimeInForce = 'GTC', gtd_time: DateTime = sentinel,
position_fill: OrderPositionFill = 'DEFAULT', trigger_condition: OrderTriggerCondition = 'DEFAULT',
client_extensions: ClientExtensions = sentinel, take_profit_on_fill: TakeProfitDetails = sentinel,
stop_loss_on_fill: StopLossDetails = sentinel,
trailing_stop_loss_on_fill: TrailingStopLossDetails = sentinel,
trade_client_extensions: ClientExtensions = sentinel):
Model.__init__(**locals())
class Account(AccountSummary):
"""The full details of a client's Account. This includes full open Trade, open
Position and pending Order representation.
Attributes:
id: :class:`~async_v20.AccountID`
The Account's identifier
alias: :class:`str`
Client-assigned alias for the Account. Only provided
if the Account has an alias set
currency: :class:`~async_v20.Currency`
The home currency of the Account
balance: :class:`~async_v20.AccountUnits`
The current balance of the Account. Represented in the Account's home currency.
created_by_user_id: :class:`int`
ID of the user that created the Account.
created_time: :class:`~async_v20.DateTime`
The date/time when the Account was created.
pl: :class:`~async_v20.AccountUnits`
The total profit/loss realized over the lifetime of
the Account. Represented in the Account's home currency.
resettable_pl: :class:`~async_v20.AccountUnits`
The total realized profit/loss for the Account since it was
last reset by the client. Represented in the Account's home currency.
resettabled_pl_time: :class:`~async_v20.DateTime`
The date/time that the Account's resettablePL was last reset.
commission: :class:`~async_v20.AccountUnits`
The total amount of commission paid over the lifetime
of the Account. Represented in the Account's home currency.
margin_rate: :class:`~async_v20.DecimalNumber`
Client-provided margin rate override for the Account. The effective margin rate of the Account
is the lesser of this value and
the OANDA margin rate for the Account's division. This value is only provided if a margin rate override
exists for the Account.
margin_call_enter_time: :class:`~async_v20.DateTime`
The date/time when the Account entered a margin call state.
Only provided if the Account is in a margin call.
margin_call_extension_count: :class:`int`
The number of times that the Account's current margin call was extended.
last_margin_call_extension_time: :class:`~async_v20.DateTime`
The date/time of the Account's last margin call extension.
open_trade_count: :class:`int`
The number of Trades currently open in the Account.
open_position_count: :class:`int`
The number of Positions currently open in the Account.
pending_order_count: :class:`int`
The number of Orders currently pending in the Account.
hedging_enabled: :class:`bool`
Flag indicating that the Account has hedging enabled.
unrealized_pl: :class:`~async_v20.AccountUnits`
The total unrealized profit/loss for all Trades currently open
in the Account. Represented in the Account's home currency.
nav: :class:`~async_v20.AccountUnits`
The net asset value of the Account. Equal to
Account balance + unrealizedPL. Represented in the Account's home currency.
margin_used: :class:`~async_v20.AccountUnits`
Margin currently used for the Account.
Represented in the Account's home currency.
margin_available: :class:`~async_v20.AccountUnits`
Margin available for Account. Represented in the Account's home currency.
position_value: :class:`~async_v20.AccountUnits`
The value of the Account's open
positions represented in the Account's home currency.
margin_closeout_unrealized_pl: :class:`~async_v20.AccountUnits`
The Account's margin closeout unrealized PL.
margin_closeout_nav: :class:`~async_v20.AccountUnits`
The Account's margin closeout NAV.
margin_closeout_margin_used: :class:`~async_v20.AccountUnits`
The Account's margin closeout margin used.
margin_closeout_percent: :class:`~async_v20.DecimalNumber`
The Account's margin closeout percentage. When this value is 1.0
or above the Account is in a margin closeout situation.
margin_closeout_position_value: :class:`~async_v20.DecimalNumber`
The value of the Account's open positions as used
for margin closeout calculations represented in the Account's home currency.
withdrawal_limit: :class:`~async_v20.AccountUnits`
The current WithdrawalLimit for the account which will be zero or
a positive value indicating how much can be withdrawn from the account.
margin_call_margin_used: :class:`~async_v20.AccountUnits`
The Account's margin call margin used.
margin_call_percent: :class:`~async_v20.DecimalNumber`
The Account's margin call percentage. When this value is 1.0
or above the Account is in a margin call situation.
last_transaction_id: :class:`~async_v20.TransactionID`
The ID of the last Transaction created for the Account.
trades: ( :class:`~async_v20.TradeSummary`, ...)
The details of the Trades currently open in the Account.
positions: ( :class:`~async_v20.Position`, ...)
The details all Account Positions.
orders: ( :class:`~async_v20.Order`, ...)
The details of the Orders currently pending in the Account.
dividend: :class:`~async_v20.DecimalNumber`
Dividend
dividendAdjustment: :class:`~async_v20.DecimalNumber`
Undocumented
"""
def __init__(self, id: AccountID = sentinel, alias: str = sentinel, currency: Currency = sentinel,
balance: AccountUnits = sentinel,
created_by_user_id: int = sentinel, created_time: DateTime = sentinel, pl: AccountUnits = sentinel,
| |
refreshing the WSDL file terminated with error message')
self.log('Changing wsdl to wsdl which can\'t be validated')
warning, error, console = edit_wsdl(self, wsdl_error_url)
expected_error_msg = WSDL_EDIT_ERROR_VALIDATION_FAILED.format(wsdl_error_url)
self.log('SERVICE_10 3a.1 System displays the error message "{0}"'
'SERVICE_09 5a.1 System displays the error message "{0}"'.format(expected_error_msg))
self.is_equal(expected_error_msg, error)
self.is_not_none(console, msg='Set invalid WSDL: no console output shown for WSDL {0} : {1}'
.format(wsdl_error_url, console))
expected_log_msg = EDIT_WSDL_FAILED
self.log('SERVICE_09 5a.2 System logs the event "{0}" to the audit log'.format(expected_log_msg))
logs_found = log_checker.check_log(expected_log_msg, from_line=current_log_lines + 1)
self.is_true(logs_found)
self.log(
'SERVICE_09 trying to set WSDL URL that gives a validator warning: {0}'.format(wsdl_warning_url))
warning, error, console = edit_wsdl(self, wsdl_warning_url)
self.is_none(error,
msg='Set WSDL with validator warnings: got error for WSDL {0}'.format(wsdl_warning_url))
self.is_not_none(warning, msg='Set WSDL with validator warnings: no warning shown for WSDL {0} : {1}'
.format(wsdl_warning_url, warning))
self.is_none(console, msg='Set WSDL with validator warnings: got console output for WSDL {0} : {1}'
.format(wsdl_warning_url, console))
self.log('Warning message: {0}'.format(warning))
self.log('SERVICE_09 5. Canceling wsdl with warning adding')
self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CANCEL_XPATH).click()
self.wait_jquery()
self.log('SERVICE_09 5. Adding same wsdl again, this time confirming')
edit_wsdl(self, wsdl_warning_url)
self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CONTINUE_XPATH).click()
self.wait_jquery()
self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CONTINUE_XPATH).click()
self.wait_jquery()
wsdl_index = clients_table_vm.find_wsdl_by_name(self, wsdl_warning_url)
wsdl_row = clients_table_vm.client_services_popup_get_wsdl(self, wsdl_index=wsdl_index)
open_services_element = wsdl_row.find_element_by_css_selector(
popups.CLIENT_DETAILS_POPUP_WSDL_CLOSED_SERVICE_CSS)
open_services_element.click()
self.wait_jquery()
self.log('Check if wsdl services got refreshed, it should not contain any services')
try:
self.by_css('.service')
assert False
except:
pass
# UC SERVICE_09 - trying to update WSDL that gives a validator warning
self.log('SERVICE_09 Edit the Address of a WSDL')
# UC SERVICE_09 1. Select to edit the address of a WSDL
self.log('SERVICE_09 1. Select to edit the address of a WSDL')
self.click(wsdl_row)
edit_wsdl_button.click()
current_log_lines = log_checker.get_line_count()
# UC SERVICE_09 2. Insert new URL
self.log('SERVICE_09 2. Insert new URL')
edit_wsdl(self, wsdl_correct_url)
# UC SERVICE_09 3-5. Parse, verify and refresh WSDL
self.log('SERVICE_09 3-5. Parse, verify and refresh WSDL')
self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CONTINUE_XPATH).click()
self.wait_jquery()
expected_log_msg = EDIT_WSDL
self.log('SERVICE_09 6. System logs the event "{0}" to the audit log'.format(expected_log_msg))
logs_found = log_checker.check_log(expected_log_msg, from_line=current_log_lines + 1)
self.is_true(logs_found)
# UC SERVICE_19 Edit the Address of a Service
self.log('SERVICE_19 Edit the Address of a Service')
wsdl_index = clients_table_vm.find_wsdl_by_name(self, wsdl_correct_url)
self.log('Check wsdl services parameters')
# Find the service under the specified WSDL in service list (and expand the WSDL services list if not open yet)
if check_edit_errors:
service_row = clients_table_vm.client_services_popup_find_service(self, wsdl_index=wsdl_index,
service_name=service_2_name)
check_wsdl_service_parameters(self, service_row, service_2_name, service_2_url)
# UC SERVICE_19 1. Select to edit the address of a service.
self.log('SERVICE_19 1. Select to edit the address of a service.')
service_row = clients_table_vm.client_services_popup_find_service(self, wsdl_index=wsdl_index,
service_name=service_name)
check_wsdl_service_parameters(self, service_row, service_name, service_url)
# Click on the service row to select it
self.click(service_row)
# Click the "Edit" button to open "Edit Service Parameters" popup
edit_wsdl_button.click()
# Wait until "Edit Service Parameters" popup opens
self.wait_until_visible(type=By.XPATH, element=popups.EDIT_SERVICE_POPUP_XPATH)
# Find the "Service URL" and "Timeout" inputs. Get the service URL and timeout as we need them later.
service_url_input = self.by_id(popups.EDIT_SERVICE_POPUP_URL_ID)
service_url_input_value = service_url_input.get_attribute('value')
service_timeout = self.by_id(popups.EDIT_SERVICE_POPUP_TIMEOUT_ID).get_attribute('value')
self.log('Replace url with https version')
wsdl_correct_url_https = wsdl_correct_url.replace('http:', 'https:')
self.input(service_url_input, wsdl_correct_url_https)
self.log('SERVICE_19 5. System sets the TLS certification verification to "true" when url starts with https')
service_tls_checkbox = self.by_xpath(popups.EDIT_SERVICE_POPUP_TLS_ENABLED_XPATH)
self.is_equal('true', service_tls_checkbox.get_attribute('checked'))
self.log('Click OK')
self.by_xpath(popups.EDIT_SERVICE_POPUP_OK_BTN_XPATH).click()
self.wait_jquery()
self.log('Click on edit button again')
edit_wsdl_button.click()
self.wait_until_visible(type=By.XPATH, element=popups.EDIT_SERVICE_POPUP_XPATH)
self.log('Replace url with http version')
self.input(service_url_input, wsdl_correct_url)
self.log('SERVICE_19 5a. System sets the TLS certification verification to "false" when url starts with http')
self.is_false(self.by_id(popups.EDIT_SERVICE_POPUP_TLS_ID).is_enabled())
# Check service timeout value
self.is_equal(con1=ss_system_parameters.SERVICE_TIMEOUT_VALUE, con2=service_timeout,
msg='Service timeout not {0}'.format(service_timeout))
modified_service_url = service_url_input_value
if check_parameter_errors:
'''Append URL parameter db=CLIENT_CODE to the url'''
'''Let's be ready that the service may already have some parameters
so check if a question mark exists or not.'''
if '?' in modified_service_url:
'''We already have parameters, append to the list'''
modified_service_url += '&'
else:
'''No parameters, start a parameter string with a question mark'''
modified_service_url += '?'
modified_service_url += urllib.urlencode({service_url_additional_parameter: client['code']})
current_log_lines = log_checker.get_line_count()
self.log('SERVICE_19 4a. Invalid URL is inserted')
warning, error = edit_service(self, service_invalid_url, service_timeout)
expected_error_msg = SERVICE_EDIT_INVALID_URL.format(service_invalid_url)
self.log('SERVICE_19 4a.1 System displays the error message "{0}"'.format(expected_error_msg))
self.is_equal(expected_error_msg, error)
self.is_none(warning,
msg='Set invalid service URL: got warning for URL {0} : {1}'
.format(modified_service_url, warning))
expected_log_msg = EDIT_SERVICE_PARAMS_FAILED
self.log('SERVICE_19 4a.2. System logs the event "{0}"'.format(expected_log_msg))
logs_found = log_checker.check_log(expected_log_msg,
from_line=current_log_lines + 1)
self.is_true(logs_found)
self.log('Close error messages')
messages.close_error_messages(self)
self.log('SERVICE_21 4b. The inserted timeout value is not a positive integer')
for timeout in service_invalid_timeouts:
current_log_lines = log_checker.get_line_count()
self.log('Trying to set timeout to {0}'.format(timeout))
warning, error = edit_service(self, modified_service_url, timeout)
expected_error_msg = SERVICE_EDIT_INVALID_TIMEOUT.format(timeout)
self.log('SERVICE_21 4b.1 System displays the error message "{0}"'.format(expected_error_msg))
self.is_equal(expected_error_msg, error)
self.is_none(warning,
msg='Set invalid service URL: got warning for timeout {0} : {1}'
.format(timeout, warning))
expected_log_msg = EDIT_SERVICE_PARAMS_FAILED
self.log('SERVICE_21 4b.2 System logs the event "{0}"'.format(expected_log_msg))
logs_found = log_checker.check_log(expected_log_msg, from_line=current_log_lines + 1)
self.is_true(logs_found)
self.log('Close error messages if present')
messages.close_error_messages(self)
self.log('SERVICE_21 4a. edit the timeout value to infinity {0})'.format(service_infinite_timeout))
warning, error = edit_service(self, modified_service_url, service_infinite_timeout)
self.is_none(error, msg='Set infinite service timeout: got error for timeout {0}'.format(
service_infinite_timeout))
expected_warning_message = messages.SERVICE_EDIT_INFINITE_TIMEOUT_WARNING.format(service_infinite_timeout)
self.log('SERVICE_21 4a.1 System displays a warning message "{0}"'.format(expected_warning_message))
self.is_equal(expected_warning_message, warning)
self.log('Close error messages if present')
messages.close_error_messages(self)
self.log('SERVICE_21 4a.2a. Set infinite service timeout confirmation is canceled')
self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CANCEL_XPATH).click()
self.wait_jquery()
# Try to set modified service URL and original service timeout. Should get no errors or warnings.
self.log('Trying to set service timeout {1}, URL {0}'.format(modified_service_url, service_timeout))
warning, error = edit_service(self, modified_service_url, service_timeout)
self.is_none(error,
msg='Edit service: got error for timeout {1}, URL {0}'
.format(modified_service_url, service_timeout))
self.is_none(warning,
msg='Edit service: got warning for timeout {2}, URL {0} : {1}'
.format(modified_service_url, warning, service_timeout))
# If any error messages are shown, close them.
messages.close_error_messages(self)
return configure_service
def check_wsdl_service_parameters(self, service_row, service_name, service_url):
"""
Checks if service parameters match the WSDL file provided ones
:param self: main instance
:param service_row: service row selenium element
:param service_name: service name with version
:param service_url: service url
:return:
"""
service_cols = service_row.find_elements_by_tag_name('td')
self.is_equal(service_cols[1].text[:-4], service_name, msg='Expecting WSDL service name "{0}", got "{1}"'.
format(service_name, service_cols[1].text[:-4]))
self.is_equal(service_cols[2].text, service_name[:-3],
msg='Expecting WSDL service version "{0}", got "{1}"'.format(service_name[:-3], service_cols[2].text))
self.is_equal(service_cols[3].text, service_url,
msg='Expecting WSDL service URL "{0}", got "{1}"'.format(service_url, service_cols[3].text))
def test_enable_service(case, client=None, client_name=None, client_id=None, wsdl_index=None, wsdl_url=None):
'''
MainController test function. Enables a service.
:return:
'''
self = case
client_id = xroad.get_xroad_subsystem(client)
def enable_service():
"""
:param self: MainController class object
:return: None
''"""
# UC SERVICE_12 Enable a WSDL
self.log('SERVICE_12 Enable a WSDL')
# UC SERVICE_12 1. Select to enable a WSDL
self.log('SERVICE_12 1. Select to enable a WSDL')
# Open client popup using shortcut button to open it directly at Services tab.
clients_table_vm.open_client_popup_services(self, client_name=client_name, client_id=client_id)
# Find the table that lists all WSDL files and services
services_table = self.by_id(popups.CLIENT_DETAILS_POPUP_SERVICES_TABLE_ID)
# Wait until that table is visible (opened in a popup)
self.wait_until_visible(services_table)
# Find the service under the specified WSDL in service list (and expand the WSDL services list if not open yet)
clients_table_vm.client_services_popup_select_wsdl(self, wsdl_index=wsdl_index, wsdl_url=wsdl_url)
# Find and click the "Enable" button to enable the WSDL.
self.by_id(popups.CLIENT_DETAILS_POPUP_ENABLE_WSDL_BTN_ID).click()
# Wait until ajax query finishes
self.wait_jquery()
# Check if WSDL is really enabled - find the WSDL row by index and
if wsdl_url is not None:
wsdl_enabled_index = clients_table_vm.find_wsdl_by_name(self, wsdl_url)
else:
wsdl_enabled_index = wsdl_index
if wsdl_enabled_index is None:
raise RuntimeError('WSDL index not found for {0}'.format(wsdl_url))
# UC SERVICE_12 2. Check if WSDL is enabled
self.log('SERVICE_12 2. Check if WSDL is enabled')
# Find the WSDL row and check if it has class 'disabled'. If it does, it is not enabled. If not, everything worked.
wsdl_row = clients_table_vm.client_services_popup_get_wsdl(self, wsdl_index=wsdl_enabled_index)
wsdl_is_enabled = 'disabled' not in self.get_classes(wsdl_row)
# Assertion if wsdl is enabled
self.is_true(wsdl_is_enabled,
msg='SERVICE_12 2. WSDL {0} ({1}) is not enabled'.format(wsdl_enabled_index, wsdl_row.text))
return enable_service
def test_delete_service(case, client=None, client_name=None, client_id=None, wsdl_index=None, wsdl_url=None,
try_cancel=True, log_checker=None):
'''
MainController test function. Deletes a service from security server.
:param case: TestCase object
:param client_name: string | None - name of the client whose ACL we modify
:param client_id: string | None - XRoad ID of the client whose ACL we modify
:param wsdl_index: int | None - index (zero-based) for WSDL we select from the list
:param wsdl_url: str | None - URL for WSDL we select from the list
:param client | None - client which service will be deleted
:param try_cancel | | |
to solve i think, 152 has high occlusion and 4 objects
skip_list = ['kitchen_4/000006.left.jpg', 'kitchen_4/000014.left.jpg', 'kitchen_4/000169.left.jpg', 'kitchen_4/000177.left.jpg']
# 120 has some bug
# for img_i in range(0,100):
# for img_i in range(100,150):
# for img_i in range(155,177):
#for img_i in list(range(0,100)) + list(range(100,120)) + list(range(155,177)):
# for img_i in [138,142,153,163, 166, 349]:
# for img_i in [0]:
for img_i in range(0,1):
# Get Image
image_name = 'kitchen_4/00{}.left.jpg'.format(str(img_i).zfill(4))
if image_name in skip_list:
continue
# image_data, annotations = fat_image.get_random_image(name='{}_16k/kitchen_4/000005.left.jpg'.format(category_name))
image_data, annotations = fat_image.get_random_image(
name=image_name, required_objects=required_objects
)
# Skip if required image or image name is not in dataset
if image_data is None or annotations is None:
continue
# Do an image only if it has filter object, but still do all objects in scene
if filter_objects is not None:
found_filter_object = False
for anno in annotations:
if fat_image.category_id_to_names[anno['category_id']]['name'] in filter_objects:
found_filter_object = True
if found_filter_object == False:
continue
# print(found_filter_object)
# continue
# TODO
# restrict segmentation - done
# move in x,y in hypothesis - this will help in cases where pose needs to be moved up and down in camera
# try all possible combinations of rotation and viewpoint, increase topk number
# reduce viewpoint number in training
# icp only on pixels of that object
# ratios in losses
# reduce confidence
# try with descretization in ssd paper - done
# in 11,12,21 pose is right but not at right distance from camera - source cost was not getting included
# lower epsilon - done
# use in plane from perch
# train for more iterations
# try without normalize - done - not good
# try with lazy - done
# why is centroid in 2d - calculate centroid of mask. check if centroid of generated poses is actually at object center, 11, 12 - done
# use more rotations for non symmetric objects + 3cm for big and 2 cm for small
# try without depth translation becuase mean of renderd and observed should match
# the centroid alignment doesnt work if the object is not fully inside the camera - kitchen 150s
# issue when objects are too close together
# Visualize ground truth in ros
# yaw_only_objects, max_min_dict_gt, transformed_annotations = fat_image.visualize_pose_ros(
# image_data, annotations, frame='camera', camera_optical_frame=False, num_publish=1, write_poses=False, ros_publish=True
# )
# Run model to get multiple poses for each object
labels, model_annotations, model_poses_file, predicted_mask_path, top_model_annotations = \
fat_image.visualize_model_output(image_data, use_thresh=True, use_centroid=False, print_poses=True)
if True:
# Convert model output poses to table frame and save them to file so that they can be read by perch
_, max_min_dict, _ = fat_image.visualize_pose_ros(
# image_data, model_annotations, frame='table', camera_optical_frame=False, num_publish=1, write_poses=True, ros_publish=False
image_data, model_annotations, frame='camera', camera_optical_frame=False, num_publish=1, write_poses=True, ros_publish=False,
)
# Run perch/ICP on written poses
run_perch = True
if run_perch:
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, model_annotations, max_min_dict, frame='camera',
# use_external_render=0, required_object=[labels[1]],
use_external_render=0, required_object=labels,
camera_optical_frame=False, use_external_pose_list=1,
# model_poses_file=model_poses_file, use_centroid_shifting=0,
model_poses_file=model_poses_file, use_centroid_shifting=1,
predicted_mask_path=predicted_mask_path
)
else:
perch_annotations = top_model_annotations
stats = None
f_accuracy.write("{},".format(image_data['file_name']))
if perch_annotations is not None:
# # # Compare Poses by applying to model and computing distance
add_dict, add_s_dict = fat_image.compare_clouds(annotations, perch_annotations, use_add_s=True, convert_annotation_2=not run_perch)
if add_dict is not None and add_s_dict is not None:
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
if stats is not None:
f_runtime.write("{} {} {}".format(image_data['file_name'], stats['expands'], stats['runtime']))
f_accuracy.write("\n")
f_runtime.write("\n")
f_runtime.close()
f_accuracy.close()
def run_roman_crate(dataset_cfg=None):
image_directory = dataset_cfg['image_dir']
annotation_file = dataset_cfg['image_dir'] + '/instances_newmap1_roman_2018.json'
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=100,
model_dir=dataset_cfg['model_dir'],
model_mesh_in_mm=True,
# model_mesh_scaling_factor=0.005,
model_mesh_scaling_factor=1,
models_flipped=False,
img_width=960,
img_height=540,
distance_scale=100,
env_config="roman_env_config.yaml",
planner_config="roman_planner_config.yaml",
perch_debug_dir=dataset_cfg["perch_debug_dir"],
python_debug_dir=dataset_cfg["python_debug_dir"],
dataset_type=dataset_cfg["type"]
)
f_runtime = open('runtime.txt', "w", 1)
f_accuracy = open('accuracy.txt', "w", 1)
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
required_objects = ['crate_test']
f_accuracy.write("name,")
for object_name in required_objects:
f_accuracy.write("{},".format(object_name))
f_accuracy.write("\n")
for img_i in range(0,16):
# for img_i in [16, 17, 19, 22]:
# required_objects = ['coke']
image_name = 'NewMap1_roman/0000{}.left.png'.format(str(img_i).zfill(2))
image_data, annotations = fat_image.get_random_image(name=image_name, required_objects=required_objects)
# In case of crate its hard to get camera pose sometimes as ground is not visible (RANSAC plane estimation will fail)
# So get camera pose from an image where ground is visible and use that
# camera_pose_m = np.array([[0.757996, -0.00567911, 0.652234, -0.779052],
# [0.00430481, 0.999984, 0.00370417, -0.115213],
# [-0.652245, 1.32609e-16, 0.758009, 0.66139],
# [0, 0, 0, 1]])
camera_pose = {
'location_worldframe': np.array([-77.90518933, -11.52125029, 66.13899833]),
'quaternion_xyzw_worldframe': [-0.6445207366760153, 0.6408707673682607, -0.29401548348464, 0.2956899981377745]
}
# Camera pose goes here to get GT in world frame for accuracy computation
yaw_only_objects, max_min_dict, transformed_annotations, _ = \
fat_image.visualize_pose_ros(
image_data, annotations, frame='table', camera_optical_frame=False,
input_camera_pose=camera_pose
)
# max_min_dict['ymax'] = 1
# max_min_dict['ymin'] = -1
# max_min_dict['xmax'] = 0.5
# max_min_dict['xmin'] = -1
max_min_dict['ymax'] = 0.85
max_min_dict['ymin'] = -0.85
max_min_dict['xmax'] = 0.5
max_min_dict['xmin'] = -0.5
fat_image.search_resolution_translation = 0.08
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, annotations, max_min_dict, frame='table',
use_external_render=0, required_object=required_objects,
camera_optical_frame=False, use_external_pose_list=0, gt_annotations=transformed_annotations,
input_camera_pose=camera_pose, table_height=0.006, num_cores=8,
compute_type=2
)
# print(perch_annotations)
# print(transformed_annotations)
f_accuracy.write("{},".format(image_data['file_name']))
add_dict, add_s_dict = fat_image.compare_clouds(transformed_annotations, perch_annotations, downsample=True, use_add_s=True)
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
f_accuracy.write("\n")
f_runtime.write("{} {} {}\n".format(image_name, stats['rendered'], stats['runtime']))
f_runtime.close()
def analyze_roman_results(config=None):
import pandas as pd
dataset_cfg = config['dataset']
for device, analysis_cfg in config['analysis']['device'].items():
overall_stats_dict = {}
# Object wise metrics
print("\n### Object Wise AUC ###")
li = []
for accuracy_file in analysis_cfg['result_files']['accuracy']:
# Read file for every object
print("Accuracy file : {}".format(accuracy_file))
df = pd.read_csv(accuracy_file,
header=None,
index_col=None,
names=["filename", "add", "add-s", "blank"],
skiprows=1,
sep=",")
df = df.drop(columns=["add", "blank"])
df = df.set_index('filename')
add_s = np.copy(df['add-s'].to_numpy())
stats = compute_pose_metrics(add_s)
print("AUC : {}, Pose Percentage : {}, Mean ADD-S : {}".format(
stats['auc'], stats['pose_error_less_perc'], stats['mean_pose_error']))
li.append(df)
overall_stats_dict[get_filename_from_path(accuracy_file)] = stats
# Overall Metrics
# print("Dataframe with add-s")
df_acc = pd.concat(li, axis=0, ignore_index=False)
print("\n### Overall AUC ###")
stats = compute_pose_metrics(np.copy(df_acc['add-s'].to_numpy()))
print("AUC : {}, Pose Percentage : {}, Mean ADD-S : {}".format(
stats['auc'], stats['pose_error_less_perc'], stats['mean_pose_error']))
overall_stats_dict["overall"] = stats
## Runtime
print("\n### Object Wise Runtimes ###")
li = []
for runtime_file in analysis_cfg['result_files']['runtime']:
# Read file for every object
print("Runtime file : {}".format(runtime_file))
df = pd.read_csv(runtime_file,
header=0,
index_col=None,
# names=["filename", "runtime", "icp-runtime"],
# skiprows=1,
delim_whitespace=True)
# print(df)
df = df.set_index('name')
mean_runtime = df['runtime'].mean()
mean_rendered = df['expands'].mean()
print("Average runtime : {}, Average Rendered : {}".format(mean_runtime, mean_rendered))
li.append(df)
object_name = get_filename_from_path(runtime_file).replace('_runtime', '')
# overall_stats_dict[object_name]["runtime"] = mean_runtime
# overall_stats_dict[object_name]["rendered"] = mean_rendered
print("\n### Overall Runtime ###")
df_runtime = pd.concat(li, axis=0, ignore_index=False)
mean_runtime = df_runtime['runtime'].mean()
mean_rendered = df_runtime['expands'].mean()
print("Overall average runtime : {}".format(mean_runtime))
print("Overall average rendered : {}".format(mean_rendered))
# overall_stats_dict["overall"]["runtime"] = mean_runtime
# overall_stats_dict["overall"]["rendered"] = mean_rendered
# print("\n### Compiled Stats ###")
# df_overall_stats = \
# pd.DataFrame.from_dict(overall_stats_dict, orient='index')
# print(df_overall_stats)
# df_overall_stats.to_csv(
# os.path.join(fat_image.analysis_output_dir, "compiled_stats.csv"),
# float_format='%.4f')
def run_roman_crate_gpu(dataset_cfg=None):
if dataset_cfg["device"] == "gpu":
perch_config_yaml = "roman_gpu_env_config.yaml"
elif dataset_cfg["device"] == "cpu":
perch_config_yaml = "roman_env_config.yaml"
elif dataset_cfg["device"] == "icp":
perch_config_yaml = "roman_env_config.yaml"
image_directory = dataset_cfg['image_dir']
annotation_file = dataset_cfg['image_dir'] + '/instances_newmap1_roman_2018.json'
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=100,
model_dir=dataset_cfg['model_dir'],
model_mesh_in_mm=True,
# model_mesh_scaling_factor=0.005,
model_mesh_scaling_factor=1,
models_flipped=False,
img_width=960,
img_height=540,
distance_scale=100,
env_config=perch_config_yaml,
planner_config="roman_planner_config.yaml",
perch_debug_dir=dataset_cfg["perch_debug_dir"],
python_debug_dir=dataset_cfg["python_debug_dir"],
dataset_type=dataset_cfg["type"]
)
# f_runtime = open('runtime.txt', "w", 1)
# f_accuracy = open('accuracy.txt', "w", 1)
ts = calendar.timegm(time.gmtime())
f_accuracy = open('{}/accuracy_{}.txt'.format(fat_image.python_debug_dir, ts), "w", 1)
f_runtime = open('{}/runtime_{}.txt'.format(fat_image.python_debug_dir, ts), "w", 1)
f_runtime.write("{} {} {} {} {}\n".format('name', 'expands', 'runtime', 'icp_runtime', 'peak_gpu_mem'))
required_objects = ['crate_test']
f_accuracy.write("name,")
for object_name in required_objects:
f_accuracy.write("{},".format(object_name))
f_accuracy.write("\n")
for img_i in range(0, 26):
# for img_i in [16, 17, 19, 22]:
# required_objects = ['coke']
image_name = 'NewMap1_roman/0000{}.left.png'.format(str(img_i).zfill(2))
image_data, annotations = fat_image.get_random_image(name=image_name, required_objects=required_objects)
# In case of crate its hard to get camera pose sometimes as ground is not visible (RANSAC plane estimation will fail)
# So get camera pose from an image where ground is visible and use that
# camera_pose_m = np.array([[0.757996, -0.00567911, 0.652234, -0.779052],
# [0.00430481, 0.999984, 0.00370417, -0.115213],
# [-0.652245, 1.32609e-16, 0.758009, 0.66139],
# [0, 0, 0, 1]])
camera_pose = {
'location_worldframe': np.array([-77.90518933, -11.52125029, 66.13899833]),
'quaternion_xyzw_worldframe': [-0.6445207366760153, 0.6408707673682607, -0.29401548348464, 0.2956899981377745]
}
# Camera pose goes here to get | |
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import pprint
import sys
import time
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import importutils
from oslo_utils import timeutils
from neutron.agent.common import config
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as n_context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.openstack.common import service
from neutron import service as neutron_service
from networking_cisco.plugins.cisco.cfg_agent import device_status
from networking_cisco.plugins.cisco.common import (
cisco_constants as c_constants)
LOG = logging.getLogger(__name__)
# Constants for agent registration.
REGISTRATION_RETRY_DELAY = 2
MAX_REGISTRATION_ATTEMPTS = 30
class CiscoDeviceManagementApi(object):
"""Agent side of the device manager RPC API."""
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def report_dead_hosting_devices(self, context, hd_ids=None):
"""Report that a hosting device cannot be contacted (presumed dead).
:param: context: session context
:param: hosting_device_ids: list of non-responding hosting devices
:return: None
"""
cctxt = self.client.prepare()
cctxt.cast(context, 'report_non_responding_hosting_devices',
host=self.host, hosting_device_ids=hd_ids)
def register_for_duty(self, context):
"""Report that a config agent is ready for duty."""
cctxt = self.client.prepare()
return cctxt.call(context, 'register_for_duty', host=self.host)
class CiscoCfgAgent(manager.Manager):
"""Cisco Cfg Agent.
This class defines a generic configuration agent for cisco devices which
implement network services in the cloud backend. It is based on the
(reference) l3-agent, but has been enhanced to support multiple services
in addition to routing.
The agent acts like as a container for services and does not do any
service specific processing or configuration itself.
All service specific processing is delegated to service helpers which
the agent loads. Thus routing specific updates are processed by the
routing service helper, firewall by firewall helper etc.
A further layer of abstraction is implemented by using device drivers for
encapsulating all configuration operations of a service on a device.
Device drivers are specific to a particular device/service VM eg: CSR1kv.
The main entry points in this class are the `process_services()` and
`_backlog_task()` .
"""
target = oslo_messaging.Target(version='1.1')
OPTS = [
cfg.IntOpt('rpc_loop_interval', default=10,
help=_("Interval when the process_services() loop "
"executes in seconds. This is when the config agent "
"lets each service helper to process its neutron "
"resources.")),
cfg.StrOpt('routing_svc_helper_class',
default='networking_cisco.plugins.cisco.cfg_agent.service_helpers'
'.routing_svc_helper.RoutingServiceHelper',
help=_("Path of the routing service helper class.")),
cfg.StrOpt('fw_svc_helper_class',
default='neutron_fwaas.services.firewall.drivers.cisco'
'.csr_firewall_svc_helper.CsrFirewallServiceHelper',
help=_("Path of the firewall service helper class.")),
]
def __init__(self, host, conf=None):
self.conf = conf or cfg.CONF
self._dev_status = device_status.DeviceStatus()
self.context = n_context.get_admin_context_without_session()
self._initialize_rpc(host)
self._initialize_service_helpers(host)
self._start_periodic_tasks()
super(CiscoCfgAgent, self).__init__(host=self.conf.host)
def _initialize_rpc(self, host):
self.devmgr_rpc = CiscoDeviceManagementApi(topics.L3PLUGIN, host)
def _initialize_service_helpers(self, host):
svc_helper_class = self.conf.cfg_agent.routing_svc_helper_class
try:
self.routing_service_helper = importutils.import_object(
svc_helper_class, host, self.conf, self)
except ImportError as e:
LOG.warning(_LW("Error in loading routing service helper. Class "
"specified is %(class)s. Reason:%(reason)s"),
{'class': self.conf.cfg_agent.routing_svc_helper_class,
'reason': e})
self.routing_service_helper = None
fw_svc_helper_class = self.conf.cfg_agent.fw_svc_helper_class
try:
self.fw_service_helper = importutils.import_object(
fw_svc_helper_class, host, self.conf, self)
except ImportError as e:
LOG.warn(_LW("Error in loading firewall service helper. Class "
"specified is %(class)s. Reason:%(reason)s"),
{'class': self.conf.cfg_agent.fw_svc_helper_class,
'reason': e})
self.fw_service_helper = None
def _start_periodic_tasks(self):
self.loop = loopingcall.FixedIntervalLoopingCall(self.process_services)
self.loop.start(interval=self.conf.cfg_agent.rpc_loop_interval)
def after_start(self):
LOG.info(_LI("Cisco cfg agent started"))
def get_routing_service_helper(self):
return self.routing_service_helper
## Periodic tasks ##
@periodic_task.periodic_task
def _backlog_task(self, context):
"""Process backlogged devices."""
LOG.debug("Processing backlog.")
self._process_backlogged_hosting_devices(context)
## Main orchestrator ##
@lockutils.synchronized('cisco-cfg-agent', 'neutron-')
def process_services(self, device_ids=None, removed_devices_info=None):
"""Process services managed by this config agent.
This method is invoked by any of three scenarios.
1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL`
seconds. This is the most common scenario.
In this mode, the method is called without any arguments.
2. Called by the `_process_backlogged_hosting_devices()` as part of
the backlog processing task. In this mode, a list of device_ids
are passed as arguments. These are the list of backlogged
hosting devices that are now reachable and we want to sync services
on them.
3. Called by the `hosting_devices_removed()` method. This is when
the config agent has received a notification from the plugin that
some hosting devices are going to be removed. The payload contains
the details of the hosting devices and the associated neutron
resources on them which should be processed and removed.
To avoid race conditions with these scenarios, this function is
protected by a lock.
This method goes on to invoke `process_service()` on the
different service helpers.
:param device_ids : List of devices that are now available and needs
to be processed
:param removed_devices_info: Info about the hosting devices which
are going to be removed and details of the resources hosted on them.
Expected Format:
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]}, ...},
'deconfigure': True/False
}
:return: None
"""
LOG.debug("Processing services started")
# Now we process only routing service, additional services will be
# added in future
if self.routing_service_helper:
self.routing_service_helper.process_service(device_ids,
removed_devices_info)
else:
LOG.warning(_LW("No routing service helper loaded"))
if self.fw_service_helper:
self.fw_service_helper.process_service(device_ids,
removed_devices_info)
LOG.debug("Processing services completed")
def _process_backlogged_hosting_devices(self, context):
"""Process currently backlogged devices.
Go through the currently backlogged devices and process them.
For devices which are now reachable (compared to last time), we call
`process_services()` passing the now reachable device's id.
For devices which have passed the `hosting_device_dead_timeout` and
hence presumed dead, execute a RPC to the plugin informing that.
:param context: RPC context
:return: None
"""
res = self._dev_status.check_backlogged_hosting_devices()
if res['reachable']:
self.process_services(device_ids=res['reachable'])
if res['dead']:
LOG.debug("Reporting dead hosting devices: %s", res['dead'])
self.devmgr_rpc.report_dead_hosting_devices(context,
hd_ids=res['dead'])
def hosting_devices_removed(self, context, payload):
"""Deal with hosting device removed RPC message."""
try:
if payload['hosting_data']:
if payload['hosting_data'].keys():
self.process_services(removed_devices_info=payload)
except KeyError as e:
LOG.error(_LE("Invalid payload format for received RPC message "
"`hosting_devices_removed`. Error is %(error)s. "
"Payload is %(payload)s"),
{'error': e, 'payload': payload})
class CiscoCfgAgentWithStateReport(CiscoCfgAgent):
def __init__(self, host, conf=None):
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-cisco-cfg-agent',
'host': host,
'topic': c_constants.CFG_AGENT,
'configurations': {},
'start_flag': True,
'agent_type': c_constants.AGENT_TYPE_CFG}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
self._initialize_rpc(host)
self._agent_registration()
super(CiscoCfgAgentWithStateReport, self).__init__(host=host,
conf=conf)
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _agent_registration(self):
"""Register this agent with the server.
This method registers the cfg agent with the neutron server so hosting
devices can be assigned to it. In case the server is not ready to
accept registration (it sends a False) then we retry registration
for `MAX_REGISTRATION_ATTEMPTS` with a delay of
`REGISTRATION_RETRY_DELAY`. If there is no server response or a
failure to register after the required number of attempts,
the agent stops itself.
"""
for attempts in xrange(MAX_REGISTRATION_ATTEMPTS):
context = n_context.get_admin_context_without_session()
self.send_agent_report(self.agent_state, context)
res = self.devmgr_rpc.register_for_duty(context)
if res is True:
LOG.info(_LI("[Agent registration] Agent successfully "
"registered"))
return
elif res is False:
LOG.warning(_LW("[Agent registration] Neutron server said "
"that device manager was not ready. Retrying "
"in %0.2f seconds "), REGISTRATION_RETRY_DELAY)
time.sleep(REGISTRATION_RETRY_DELAY)
elif res is None:
LOG.error(_LE("[Agent registration] Neutron server said that "
"no device manager was found. Cannot continue. "
"Exiting!"))
raise SystemExit("Cfg Agent exiting")
LOG.error(_LE("[Agent registration] %d unsuccessful registration "
"attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS)
raise SystemExit("Cfg Agent exiting")
def _report_state(self):
"""Report state to the plugin.
This task run every `report_interval` period.
Collects, creates and sends a summary of the services currently
managed by this agent. Data is collected from the service helper(s).
Refer the `configurations` dict for the parameters reported.
:return: None
"""
LOG.debug("Report state task started")
configurations = {}
if self.routing_service_helper:
configurations = self.routing_service_helper.collect_state(
self.agent_state['configurations'])
non_responding = self._dev_status.get_backlogged_hosting_devices_info()
configurations['non_responding_hosting_devices'] = non_responding
self.agent_state['configurations'] = configurations
self.agent_state['local_time'] = str(timeutils.utcnow())
LOG.debug("State report data: %s", pprint.pformat(self.agent_state))
self.send_agent_report(self.agent_state, self.context)
def send_agent_report(self, report, context):
"""Send the agent report via RPC."""
try:
self.state_rpc.report_state(context, report, self.use_call)
report.pop('start_flag', None)
self.use_call = False
LOG.debug("Send agent report successfully completed")
except AttributeError:
# This means the server does not support report_state
LOG.warning(_LW("Neutron server does not support state report. "
"State report | |
<filename>layerserver/api/dblayer_content.py
import logging
import mimetypes
import os
import warnings
from functools import reduce
from operator import __or__ as OR
from django.conf import settings
from django.contrib.gis.gdal import CoordTransform, SpatialReference
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.core.files.uploadedfile import UploadedFile
from django.db import transaction
from django.db.models import Q
from django.forms.models import model_to_dict
from django.http import FileResponse, Http404, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils.cache import patch_response_headers
from django.utils.functional import cached_property
from rest_framework import filters, parsers, status, views, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from giscube.cache_utils import giscube_transaction_cache_response
from giscube.models import UserAsset
from ..filters import filterset_factory
from ..model_legacy import create_dblayer_model
from ..models import DataBaseLayer, DBLayerGroup
from ..pagination import create_geojson_pagination_class, create_json_pagination_class
from ..permissions import BulkDBLayerIsValidUser, DBLayerIsValidUser
from ..serializers import create_dblayer_serializer
logger = logging.getLogger(__name__)
class PageSize0NotAllowedException(Exception):
pass
class DBLayerContentViewSetMixin(object):
def get_model_serializer_class(self):
fields = list(self._fields.keys())
if self.request.method == 'GET':
only_fields = self.request.GET.get('fields', None)
if only_fields is not None:
only_fields = list(filter(None, only_fields.split(',')))
for field in list(only_fields):
if field not in fields:
only_fields.remove(field)
only_fields.append(self.layer.pk_field)
if self.layer.geom_field in fields:
only_fields.append(self.layer.geom_field)
fields = list(set(only_fields))
return create_dblayer_serializer(self.model, fields, self.lookup_field, self._virtual_fields)
def _virtual_fields_get_queryset(self, qs):
for field in self._virtual_fields.values():
qs = field.widget_class.get_queryset(qs, field, self.request)
return qs
@cached_property
def _virtual_fields(self):
return {field.name: field for field in self.layer.virtual_fields.filter(enabled=True)}
def filter_queryset_by_group_data_filter(self, qs):
actions = {
'get': 'view',
'options': 'view',
'head': 'view',
'post': 'add',
'put': 'update',
'patch': 'update',
'delete': 'delete'
}
permission = actions.get(self.request.method.lower())
layer_groups = DBLayerGroup.objects.filter(
**{
'layer': self.layer,
'group__in': self.request.user.groups.all(),
'can_%s' % permission: True
}
).exclude(data_filter={}, data_filter_status='disabled')
for layer_group in layer_groups:
qs = qs.filter(**layer_group.data_filter)
return qs
class DBLayerContentViewSet(DBLayerContentViewSetMixin, viewsets.ModelViewSet):
parser_classes = (parsers.MultiPartParser, parsers.JSONParser)
permission_classes = (DBLayerIsValidUser,)
queryset = []
model = None
pagination_class = None
page_size_query_param = 'page_size'
page_size = 50
ordering_fields = '__all__'
filter_fields = []
filter_class = None
filter_backends = (filters.OrderingFilter,)
lookup_url_kwarg = 'pk'
_fields = {}
def dispatch(self, request, *args, **kwargs):
self.layer = DataBaseLayer.objects.filter(name=kwargs['name']).first()
if self.layer is None:
raise Http404
self.model = create_dblayer_model(self.layer)
self.lookup_field = self.layer.pk_field
self.filter_fields = []
self._fields = {}
for field in self.layer.fields.filter(enabled=True):
if field.search is True:
self.filter_fields.append(field.name)
self._fields[field.name] = {
'fullsearch': field.fullsearch
}
lookup_field_value = kwargs.get(self.lookup_url_kwarg)
defaults = {}
defaults[self.lookup_field] = lookup_field_value
kwargs.update(defaults)
try:
self.pagination_class = self.get_pagination_class(self.layer)
except PageSize0NotAllowedException:
return HttpResponseBadRequest()
return super().dispatch(request, *args, **kwargs)
def get_serializer_class(self):
return self.get_model_serializer_class()
def bbox2wkt(self, bbox, srid):
bbox = bbox.split(',')
minx, miny, maxx, maxy = tuple(bbox)
wkt = ('SRID=4326;'
'POLYGON ('
'(%s %s, %s %s, %s %s, %s %s, %s %s))' %
(minx, miny, maxx, miny, maxx, maxy, minx, maxy, minx, miny))
geom = GEOSGeometry(wkt, srid=4326)
if srid != 4326:
srs_to = SpatialReference(srid)
srs_4326 = SpatialReference(4326)
trans = CoordTransform(srs_4326, srs_to)
geom.transform(trans)
return geom
def geom_from_intersects_param(self, intersects, srid):
if intersects.startswith('POLYGON'):
geom = GEOSGeometry(intersects, srid=4326)
warnings.warn(
'WKT POLYGON in intersects parameter is deprecated, use coordinates instead', DeprecationWarning
)
else:
coordinates = list(map(float, intersects.split(',')))
pairs = list(zip(coordinates[0::2], coordinates[1::2]))
geom = Polygon(pairs, srid=4326)
if srid != 4326:
srs_to = SpatialReference(srid)
srs_4326 = SpatialReference(4326)
trans = CoordTransform(srs_4326, srs_to)
geom.transform(trans)
return geom
def _geom_filters(self, qs):
in_bbox = self.request.query_params.get('in_bbox', None)
if in_bbox:
poly__bboverlaps = '%s__bboverlaps' % self.layer.geom_field
qs = qs.filter(**{poly__bboverlaps: self.bbox2wkt(
in_bbox, self.layer.srid)})
intersects = self.request.query_params.get('intersects', None)
if intersects:
poly__intersects = '%s__intersects' % self.layer.geom_field
qs = qs.filter(**{poly__intersects: self.geom_from_intersects_param(intersects, self.layer.srid)})
return qs
def _fullsearch_filters(self, qs):
q = self.request.query_params.get('q', None)
if q:
lst = []
for name, field in self._fields.items():
if field['fullsearch'] is True:
if name != self.layer.geom_field:
contains = '%s__icontains' % name
lst.append(Q(**{contains: q}))
if len(lst) > 0:
qs = qs.filter(reduce(OR, lst)) # noqa: E0602
return qs
def _get_queryset(self):
qs = self.model.objects.all()
qs = self._fullsearch_filters(qs)
qs = self._geom_filters(qs)
qs = self._virtual_fields_get_queryset(qs)
model_filter = filterset_factory(self.model, self.filter_fields, self._virtual_fields)
qs = model_filter(data=self.request.query_params, queryset=qs)
qs = qs.filter()
qs = self.filter_queryset_by_group_data_filter(qs)
return qs
def get_queryset(self):
qs = None
try:
qs = self._get_queryset()
except Exception:
qs = self.model.objects_default.none()
raise
return qs
def get_pagination_class(self, layer):
page_size = layer.get_page_size()
max_page_size = layer.get_max_page_size()
if not layer.allow_page_size_0 and self.request.GET.get('page_size', page_size) == '0':
raise PageSize0NotAllowedException()
if self.request.GET.get('page_size', page_size) != '0':
if self.layer.geom_field and self.layer.geom_field in self._fields:
return create_geojson_pagination_class(page_size=page_size, max_page_size=max_page_size)
else:
return create_json_pagination_class(page_size=page_size, max_page_size=max_page_size)
# def delete_multiple(self, request, *args, **kwargs):
# queryset = self.filter_queryset(self.get_queryset())
# queryset.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['get'])
def file_value(self, request, *args, **kwargs):
attribute = kwargs['attribute']
if attribute not in list(self._fields.keys()):
raise Http404
filter = {
self.lookup_field: kwargs['pk'],
}
obj = get_object_or_404(self.model, **filter)
file = getattr(obj, attribute)
full_path = file.path
fd = open(full_path, 'rb')
file_mime = mimetypes.guess_type(file.name.split('/')[-1])
response = FileResponse(fd, content_type=file_mime)
patch_response_headers(response, cache_timeout=60 * 60 * 24 * 7)
return response
@action(detail=True, methods=['get'])
def thumbnail_value(self, request, *args, **kwargs):
attribute = kwargs['attribute']
if attribute not in list(self._fields.keys()):
raise Http404
filter = {
self.lookup_field: kwargs['pk'],
}
obj = get_object_or_404(self.model, **filter)
file = getattr(obj, attribute)
thumbnail = file.storage.get_thumbnail(file.name, create=True)
full_path = thumbnail['path']
fd = open(full_path, 'rb')
file_mime = mimetypes.guess_type(file.name.split('/')[-1])
response = FileResponse(fd, content_type=file_mime)
patch_response_headers(response, cache_timeout=60 * 60 * 24 * 7)
return response
class Meta:
filter_overrides = ['geom']
class DBLayerContentBulkViewSet(DBLayerContentViewSetMixin, views.APIView):
ERROR_NOT_EXIST = 'ERROR_NOT_EXIST'
ERROR_ON_SAVE = 'ERROR_ON_SAVE'
csrf_exempt = True
permission_classes = (BulkDBLayerIsValidUser,)
queryset = []
model = None
def __init__(self, *args, **kwargs):
self._fields = {}
self.opened_files = []
self.created_objects = []
self.original_updated_objects = {}
self.updated_objects = []
self.user_assets = []
self._to_do = []
@giscube_transaction_cache_response()
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def initial(self, request, *args, **kwargs):
self.layer = DataBaseLayer.objects.get(name=kwargs['name'])
self.model = create_dblayer_model(self.layer)
self.lookup_field = self.layer.pk_field
self.geom_field = self.layer.geom_field
self._fields = {}
for field in self.layer.fields.filter(enabled=True):
self._fields[field.name] = {}
return super().initial(request, *args, **kwargs)
def get_queryset(self):
qs = self.model.objects.all()
qs = self.filter_queryset_by_group_data_filter(qs)
return qs
def to_image(self, field, path):
if path:
media_path = path.replace('media://', '')
self.user_assets.append(media_path)
path = os.path.join(settings.MEDIA_ROOT, media_path)
image_file = open(path, 'rb')
self.opened_files.append(image_file)
file_name = path.split('/')[-1]
file_mime = mimetypes.guess_type(file_name)
size = os.path.getsize(path)
return UploadedFile(image_file, file_name, file_mime, size)
@cached_property
def _image_fields(self):
from ..models import DataBaseLayerField
image_fields = {}
for field in self.layer.fields.filter(widget=DataBaseLayerField.WIDGET_CHOICES.image):
image_fields[field.name] = field
return image_fields
def apply_widgets(self, items):
image_fields = self._image_fields
for item in items:
item = self.get_properties(item)
for field in image_fields:
if field in item:
item[field] = self.to_image(field, item[field])
def clean_opened_files(self):
for file in self.opened_files:
try:
file.close()
except Exception as e:
if settings.DEBUG:
logger.warning(e)
def undo(self):
self.undo_add()
self.undo_update()
def undo_add(self):
image_fields = self._image_fields
for item in self.created_objects:
for field in image_fields:
file = getattr(item, field, None)
if file:
try:
file.delete(save=False)
except Exception as e:
logger.error(str(e), exc_info=True)
def undo_update(self):
image_fields = self._image_fields
for item in self.updated_objects:
pk = getattr(item, self.lookup_field)
old_model = self.model(**self.original_updated_objects[pk])
for field in image_fields:
old_file = getattr(old_model, field)
if old_file:
file = getattr(item, field, None)
if file.path != old_file.path:
file.delete(save=False)
def add(self, items):
self.apply_widgets(items)
Serializer = self.get_model_serializer_class()
add_serializers = []
for i, item in enumerate(items):
serializer = Serializer(data=item, context={'request': self.request})
if serializer.is_valid():
add_serializers.append(serializer)
else:
return {i: serializer.errors}
for i, serializer in enumerate(add_serializers):
try:
self.created_objects.append(serializer.save())
except Exception:
self.created_objects.append(serializer.instance)
return {i: self.ERROR_ON_SAVE}
def add_result(self, result):
result['ADD'] = []
for obj in self.created_objects:
result['ADD'].append({self.lookup_field: getattr(obj, self.lookup_field)})
def get_lookup_field_value(self, data):
if 'properties' in data and isinstance(data['properties'], dict):
if 'id' in data:
return data['id']
# Case: ADD - code is used as primary key in geojson
elif self.lookup_field in data['properties']:
return data['properties'][self.lookup_field]
# Case using normal pk, pk key doesn't exist in ADD
if self.lookup_field in data:
return data[self.lookup_field]
def get_properties(self, data):
if 'properties' in data and isinstance(data['properties'], dict):
new_data = data['properties']
if self.lookup_field not in new_data:
pk = self.get_lookup_field_value(data)
# Case ADD using normal as pk, pk key doesn't exist
if pk:
new_data[self.lookup_field] = pk
return new_data
return data
def update(self, items):
self.apply_widgets(items)
Serializer = self.get_model_serializer_class()
update_serializers = []
for i, item in enumerate(items):
filter = {}
filter[self.lookup_field] = self.get_lookup_field_value(item)
obj = self.model.objects.filter(**filter).first()
if obj is None:
return {i: self.ERROR_NOT_EXIST}
self.original_updated_objects[list(filter.values())[0]] = model_to_dict(obj, exclude=['pk'])
serializer = Serializer(instance=obj, data=item, partial=True, context={'request': self.request})
if serializer.is_valid():
update_serializers.append(serializer)
else:
return {i: serializer.errors}
for i, serializer in enumerate(update_serializers):
try:
self.updated_objects.append(serializer.save())
except Exception:
self.updated_objects.append(serializer.instance)
return {i: self.ERROR_ON_SAVE}
def delete(self, items):
filter = {}
filter['%s__in' % self.lookup_field] = items
qs = self.get_queryset().filter(**filter)
image_fields = self._image_fields
for item in qs:
for field in image_fields:
file = getattr(item, field, None)
if file is not None:
self._to_do.append(lambda: file.delete(save=False))
item.delete()
def delete_user_assets(self):
if len(self.user_assets) > 0:
user_assets = UserAsset.objects.filter(file__in=self.user_assets)
for asset in user_assets:
asset.delete()
def post(self, request, name):
data = request.data
errors = {}
result = {}
# TODO: schema
self.layer.db_connection.get_connection()
conn = self.layer.db_connection.connection_name()
autocommit = transaction.get_autocommit(using=conn)
transaction.set_autocommit(False, using=conn)
if 'ADD' | |
= [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 0, 0, 0])
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000, 9000, 10000, 10000, 10000, 10000])
def test_long_position_receives_stock_dividend(self):
# post some trades in the market
events = {}
for asset in [self.asset1, self.asset2]:
events[asset.sid] = factory.create_trade_history(
asset,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
trading_calendar=self.trading_calendar,
)
dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(
dbpath,
MockDailyBarReader(),
self.trading_calendar.all_sessions
)
splits = mergers = create_empty_splits_mergers_frame()
dividends = pd.DataFrame({
'sid': np.array([], dtype=np.uint32),
'amount': np.array([], dtype=np.float64),
'declared_date': np.array([], dtype='datetime64[ns]'),
'ex_date': np.array([], dtype='datetime64[ns]'),
'pay_date': np.array([], dtype='datetime64[ns]'),
'record_date': np.array([], dtype='datetime64[ns]'),
})
sid_1 = events[1]
stock_dividends = pd.DataFrame({
'sid': np.array([1], dtype=np.uint32),
'payment_sid': np.array([2], dtype=np.uint32),
'ratio': np.array([2], dtype=np.float64),
'declared_date': np.array([sid_1[0].dt], dtype='datetime64[ns]'),
'ex_date': np.array([sid_1[1].dt], dtype='datetime64[ns]'),
'record_date': np.array([sid_1[1].dt], dtype='datetime64[ns]'),
'pay_date': np.array([sid_1[2].dt], dtype='datetime64[ns]'),
})
writer.write(splits, mergers, dividends, stock_dividends)
adjustment_reader = SQLiteAdjustmentReader(dbpath)
data_portal = create_data_portal_from_trade_history(
self.env.asset_finder,
self.trading_calendar,
self.instance_tmpdir,
self.sim_params,
events,
)
data_portal._adjustment_reader = adjustment_reader
txns = [create_txn(self.asset1, events[1][0].dt, 10.0, 100)]
results = calculate_results(
self.sim_params,
self.asset_finder,
data_portal,
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.2, 0.2, 0.2, 0.2])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.2, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000] * 6)
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000] * 6)
def test_long_position_purchased_on_ex_date_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
self.asset1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
trading_calendar=self.trading_calendar
)
dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(
dbpath,
MockDailyBarReader(),
self.trading_calendar.all_sessions
)
splits = mergers = create_empty_splits_mergers_frame()
dividends = pd.DataFrame({
'sid': np.array([1], dtype=np.uint32),
'amount': np.array([10.00], dtype=np.float64),
'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),
'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),
'record_date': np.array([events[1].dt], dtype='datetime64[ns]'),
'pay_date': np.array([events[2].dt], dtype='datetime64[ns]'),
})
writer.write(splits, mergers, dividends)
adjustment_reader = SQLiteAdjustmentReader(dbpath)
data_portal = create_data_portal_from_trade_history(
self.env.asset_finder,
self.trading_calendar,
self.instance_tmpdir,
self.sim_params,
{1: events},
)
data_portal._adjustment_reader = adjustment_reader
# Simulate a transaction being filled on the ex_date.
txns = [create_txn(self.asset1, events[1].dt, 10.0, 100)]
results = calculate_results(
self.sim_params,
self.asset_finder,
data_portal,
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000, -1000])
def test_selling_before_dividend_payment_still_gets_paid(self):
# post some trades in the market
events = factory.create_trade_history(
self.asset1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
trading_calendar=self.trading_calendar,
)
dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(
dbpath,
MockDailyBarReader(),
self.trading_calendar.all_sessions,
)
splits = mergers = create_empty_splits_mergers_frame()
dividends = pd.DataFrame({
'sid': np.array([1], dtype=np.uint32),
'amount': np.array([10.00], dtype=np.float64),
'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),
'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),
'record_date': np.array([events[1].dt], dtype='datetime64[ns]'),
'pay_date': np.array([events[3].dt], dtype='datetime64[ns]'),
})
writer.write(splits, mergers, dividends)
adjustment_reader = SQLiteAdjustmentReader(dbpath)
data_portal = create_data_portal_from_trade_history(
self.env.asset_finder,
self.trading_calendar,
self.instance_tmpdir,
self.sim_params,
{1: events},
)
data_portal._adjustment_reader = adjustment_reader
buy_txn = create_txn(self.asset1, events[0].dt, 10.0, 100)
sell_txn = create_txn(self.asset1, events[2].dt, 10.0, -100)
txns = [buy_txn, sell_txn]
results = calculate_results(
self.sim_params,
self.asset_finder,
data_portal,
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.1, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0.1, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 1000, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows,
[-1000, -1000, 0, 1000, 1000, 1000])
def test_buy_and_sell_before_ex(self):
# need a six-day simparam
# post some trades in the market
events = factory.create_trade_history(
self.asset1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
trading_calendar=self.trading_calendar,
)
dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(
dbpath,
MockDailyBarReader(),
self.trading_calendar.all_sessions,
)
splits = mergers = create_empty_splits_mergers_frame()
dividends = pd.DataFrame({
'sid': np.array([1], dtype=np.uint32),
'amount': np.array([10.0], dtype=np.float64),
'declared_date': np.array([events[3].dt], dtype='datetime64[ns]'),
'ex_date': np.array([events[4].dt], dtype='datetime64[ns]'),
'pay_date': np.array([events[5].dt], dtype='datetime64[ns]'),
'record_date': np.array([events[4].dt], dtype='datetime64[ns]'),
})
writer.write(splits, mergers, dividends)
adjustment_reader = SQLiteAdjustmentReader(dbpath)
data_portal = create_data_portal_from_trade_history(
self.env.asset_finder,
self.trading_calendar,
self.instance_tmpdir,
self.sim_params,
{1: events},
)
data_portal._adjustment_reader = adjustment_reader
buy_txn = create_txn(self.asset1, events[1].dt, 10.0, 100)
sell_txn = create_txn(self.asset1, events[2].dt, 10.0, -100)
txns = [buy_txn, sell_txn]
results = calculate_results(
self.sim_params,
self.asset_finder,
data_portal,
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, -1000, 0, 0, 0, 0])
def test_ending_before_pay_date(self):
# post some trades in the market
events = factory.create_trade_history(
self.asset1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
trading_calendar=self.trading_calendar,
)
pay_date = self.sim_params.first_open
# find pay date that is much later.
for i in range(30):
pay_date = factory.get_next_trading_dt(pay_date, oneday,
self.trading_calendar)
dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(
dbpath,
MockDailyBarReader(),
self.trading_calendar.all_sessions,
)
splits = mergers = create_empty_splits_mergers_frame()
dividends = pd.DataFrame({
'sid': np.array([1], dtype=np.uint32),
'amount': np.array([10.00], dtype=np.float64),
'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),
'ex_date': np.array([events[0].dt], dtype='datetime64[ns]'),
'record_date': np.array([events[0].dt], dtype='datetime64[ns]'),
'pay_date': np.array([pay_date], dtype='datetime64[ns]'),
})
writer.write(splits, mergers, dividends)
adjustment_reader = SQLiteAdjustmentReader(dbpath)
data_portal = create_data_portal_from_trade_history(
self.env.asset_finder,
self.trading_calendar,
self.instance_tmpdir,
self.sim_params,
{1: events},
)
data_portal._adjustment_reader = adjustment_reader
txns = [create_txn(self.asset1, events[1].dt, 10.0, 100)]
results = calculate_results(
self.sim_params,
self.asset_finder,
data_portal,
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(
cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000, -1000]
)
def test_short_position_pays_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
self.asset1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
trading_calendar=self.trading_calendar,
)
dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(
dbpath,
MockDailyBarReader(),
self.trading_calendar.all_sessions,
)
splits = mergers = create_empty_splits_mergers_frame()
dividends = pd.DataFrame({
'sid': np.array([1], dtype=np.uint32),
'amount': np.array([10.00], dtype=np.float64),
'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),
'ex_date': np.array([events[2].dt], dtype='datetime64[ns]'),
'record_date': np.array([events[2].dt], dtype='datetime64[ns]'),
'pay_date': np.array([events[3].dt], dtype='datetime64[ns]'),
})
writer.write(splits, mergers, dividends)
adjustment_reader = SQLiteAdjustmentReader(dbpath)
data_portal = create_data_portal_from_trade_history(
self.env.asset_finder,
self.trading_calendar,
self.instance_tmpdir,
self.sim_params,
{1: events},
)
data_portal._adjustment_reader = adjustment_reader
txns = [create_txn(self.asset1, events[1].dt, 10.0, -100)]
results = calculate_results(
self.sim_params,
self.asset_finder,
data_portal,
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, -0.1, -0.1, -0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, -0.1, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 1000, 0, -1000, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 1000, 1000, 0, 0, 0])
def test_no_position_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
self.asset1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
trading_calendar=self.trading_calendar,
)
dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')
writer = SQLiteAdjustmentWriter(
dbpath,
MockDailyBarReader(),
self.trading_calendar.all_sessions,
)
splits = mergers = create_empty_splits_mergers_frame()
dividends = pd.DataFrame({
'sid': np.array([1], dtype=np.uint32),
'amount': np.array([10.00], dtype=np.float64),
'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),
'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),
'pay_date': np.array([events[2].dt], dtype='datetime64[ns]'),
'record_date': np.array([events[2].dt], dtype='datetime64[ns]'),
})
writer.write(splits, mergers, dividends)
adjustment_reader = SQLiteAdjustmentReader(dbpath)
data_portal = create_data_portal_from_trade_history(
self.env.asset_finder,
self.trading_calendar,
self.instance_tmpdir,
self.sim_params,
{1: events},
)
data_portal._adjustment_reader = adjustment_reader
results = calculate_results(
self.sim_params,
self.asset_finder,
data_portal,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 0, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 0, 0, 0, 0, 0])
def test_no_dividend_at_simulation_end(self):
# post some trades in the market
events = factory.create_trade_history(
self.asset1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
trading_calendar=self.trading_calendar,
)
dbpath | |
1 to n-2, with point 2 to n-1 as other vertex of triangle
# this could definitely be written more nicely
b_point = vertices[1]
root_b_dist = haversine_distance(root_point, b_point)
for i in np.arange(1, n - 1):
a_point = b_point
b_point = vertices[i + 1]
root_a_dist = root_b_dist
root_b_dist = haversine_distance(root_point, b_point)
a_b_dist = haversine_distance(a_point, b_point)
s = (root_a_dist + root_b_dist + a_b_dist) / 2.0
arg = (
np.tan(0.5 * s)
* np.tan(0.5 * (s - root_a_dist))
* np.tan(0.5 * (s - root_b_dist))
* np.tan(0.5 * (s - a_b_dist))
)
totalexcess += 4 * np.arctan(np.sqrt(arg))
return totalexcess * radius ** 2
class PointsOnSphere:
""" class representing points on an n-dimensional unit sphere """
def __init__(self, points):
"""
Args:
points (:class:`numpy.ndarray`):
The list of points on the unit sphere
"""
self.points = np.asarray(points, dtype=np.double)
# normalize vectors to force them onto the unit-sphere
self.points /= np.linalg.norm(self.points, axis=1)[:, np.newaxis]
self.dim = self.points.shape[-1]
@classmethod
def make_uniform(cls, dim: int, num_points: Optional[int] = None):
"""create uniformly distributed points on a sphere
Args:
dim (int): The dimension of space
num_points (int, optional): The number of points to generate. Note
that for one-dimensional spheres (intervals), only exactly two
points can be generated
"""
if dim == 1:
# just have two directions in 2d
if num_points is None:
num_points = 2
if num_points != 2:
raise ValueError("Can only place 2 points in 1d")
points = [[-1], [1]]
elif dim == 2:
if num_points is None:
num_points = 8
# distribute points evenly around the circle
φs = np.linspace(0, 2 * π, num_points, endpoint=False)
points = np.c_[np.cos(φs), np.sin(φs)]
elif dim == 3:
# Distribute points on the unit sphere using a sunflower spiral
# (inspired by https://stackoverflow.com/a/44164075/932593)
if num_points is None:
num_points = 18
indices = np.arange(0, num_points) + 0.5
φ = np.arccos(1 - 2 * indices / num_points)
θ = π * (1 + 5 ** 0.5) * indices
# convert to Cartesian coordinates
points = np.c_[np.cos(θ) * np.sin(φ), np.sin(θ) * np.sin(φ), np.cos(φ)]
elif num_points is None:
# use vertices of hypercube in n dimensions
points = [
p # type: ignore
for p in itertools.product([-1, 0, 1], repeat=dim)
if any(c != 0 for c in p)
]
else:
raise NotImplementedError()
# normalize vectors
return cls(points)
@cached_method()
def get_area_weights(self, balance_axes: bool = True):
"""return the weight of each point associated with the unit cell size
Args:
balance_axes (bool): Flag determining whether the weights should be
chosen such that the weighted average of all points is the
zero vector
Returns:
:class:`numpy.ndarray`: The weight associated with each point
"""
points_flat = self.points.reshape(-1, self.dim)
if self.dim == 1:
weights = np.array([0.5, 0.5])
elif self.dim == 2:
# get angles
φ = np.arctan2(points_flat[:, 1], points_flat[:, 0])
idx = np.argsort(φ)
s0 = φ[idx[0]] + 2 * π - φ[idx[-1]]
sizes = np.r_[s0, np.diff(φ[idx]), s0]
weights = (sizes[1:] + sizes[:-1]) / 2
weights /= 2 * π
elif self.dim == 3:
# calculate weights using spherical voronoi construction
voronoi = spatial.SphericalVoronoi(points_flat)
voronoi.sort_vertices_of_regions()
weights = [
get_spherical_polygon_area(voronoi.vertices[ix])
for ix in voronoi.regions
]
weights = np.array(weights, dtype=np.double)
weights /= surface_from_radius(1, dim=self.dim)
else:
raise NotImplementedError()
if balance_axes:
weights /= weights.sum() # normalize weights
# adjust weights such that all distances are weighted equally, i.e.,
# the weighted sum of all shell vectors should vanish. Additionally,
# the sum of all weights needs to be one. To satisfy these
# constraints simultaneously, the weights are adjusted minimally
# (in a least square sense).
matrix = np.c_[points_flat, np.ones(len(points_flat))]
vector = -weights @ matrix + np.r_[np.zeros(self.dim), 1]
weights += np.linalg.lstsq(matrix.T, vector, rcond=None)[0]
return weights.reshape(self.points.shape[:-1])
def get_distance_matrix(self):
"""calculate the (spherical) distances between each point
Returns:
:class:`numpy.ndarray`: the distance of each point to each other
"""
if self.dim == 1:
raise ValueError("Distances can only be calculated for dim >= 2")
elif self.dim == 2:
# use arc length on unit circle to calculate distances
def metric(a, b):
return np.arccos(a @ b)
elif self.dim == 3:
# calculate distances on sphere using haversine definition
metric = haversine_distance
else:
raise NotImplementedError()
# determine the distances between all points
dists = spatial.distance.pdist(self.points, metric)
return spatial.distance.squareform(dists)
def get_mean_separation(self) -> float:
""" float: calculates the mean distance to the nearest neighbor """
if len(self.points) < 1:
return float("nan")
dists_sorted = np.sort(self.get_distance_matrix(), axis=1)
return float(dists_sorted[:, 1].mean())
def write_to_xyz(self, path: str, comment: str = "", symbol: str = "S"):
"""write the point coordinates to a xyz file
Args:
filename (str): location of the file where data is written
comment (str, optional): comment that is written to the second line
symbol (str, optional): denotes the symbol used for the atoms
"""
with open(path, "w") as fp:
fp.write("%d\n" % len(self.points))
fp.write(comment + "\n")
for point in self.points:
point_str = " ".join(("%.12g" % v for v in point))
line = "%s %s\n" % (symbol, point_str)
fp.write(line)
def spherical_index_k(degree: int, order: int = 0) -> int:
"""returns the mode `k` from the degree `degree` and order `order`
Args:
degree (int): Degree of the spherical harmonics
order (int): Order of the spherical harmonics
Raises:
ValueError: if `order < -degree` or `order > degree`
Returns:
int: a combined index k
"""
if not -degree <= order <= degree:
raise ValueError("order must lie between -degree and degree")
return degree * (degree + 1) + order
def spherical_index_lm(k: int) -> Tuple[int, int]:
"""returns the degree `l` and the order `m` from the mode `k`
Args:
k (int): The combined index for the spherical harmonics
Returns:
tuple: The degree `l` and order `m` of the spherical harmonics
assoicated with the combined index
"""
degree = int(np.floor(np.sqrt(k)))
return degree, k - degree * (degree + 1)
def spherical_index_count(l: int) -> int:
"""return the number of modes for all indices <= l
The returned value is one less than the maximal mode `k` required.
Args:
degree (int): Maximal degree of the spherical harmonics
Returns:
int: The number of modes
"""
return 1 + 2 * l + l * l
def spherical_index_count_optimal(k_count: int) -> bool:
"""checks whether the modes captures all orders for maximal degree
Args:
k_count (int): The number of modes considered
"""
is_square = bool(int(np.sqrt(k_count) + 0.5) ** 2 == k_count)
return is_square
def spherical_harmonic_symmetric(degree: int, θ: float) -> float:
r"""axisymmetric spherical harmonics with degree `degree`, so `m=0`.
Args:
degree (int): Degree of the spherical harmonics
θ (float): Azimuthal angle at which the spherical harmonics is
evaluated (in :math:`[0, \pi]`)
Returns:
float: The value of the spherical harmonics
"""
# note that the definition of `sph_harm` has a different convention for the
# usage of the variables φ and θ and we thus have to swap the args
return np.real(sph_harm(0.0, degree, 0.0, θ)) # type: ignore
def spherical_harmonic_real(degree: int, order: int, θ: float, φ: float) -> float:
r"""real spherical harmonics of degree l and order m
Args:
degree (int): Degree :math:`l` of the spherical harmonics
order (int): Order :math:`m` of the spherical harmonics
θ (float): Azimuthal angle (in :math:`[0, \pi]`) at which the
spherical harmonics is evaluated.
φ (float): Polar angle (in :math:`[0, 2\pi]`) at which the spherical
harmonics is evaluated.
Returns:
float: The value of the spherical harmonics
"""
# note that the definition of `sph_harm` has a different convention for the
# usage of the variables φ and θ and we thus have to swap the args
# Moreover, the scipy functions expect first the order and then the degree
if order > 0:
term1 = sph_harm(order, degree, φ, θ)
term2 = (-1) ** order * sph_harm(-order, degree, φ, θ)
return np.real((term1 + term2) / np.sqrt(2)) # type: ignore
elif order == 0:
return np.real(sph_harm(0, degree, φ, θ)) # type: ignore
else: # order < 0
term1 = sph_harm(-order, degree, φ, θ)
term2 = (-1) ** order * sph_harm(order, degree, φ, θ)
return np.real((term1 - term2) / (np.complex(0, np.sqrt(2)))) # type: ignore
def spherical_harmonic_real_k(k: int, θ: float, φ: float) -> float:
r"""real spherical harmonics described | |
#!/usr/bin/env python3
# smugsync v2.3
# to keep my sanity since local directories can be both folders and galleries in SmugMug
# I use the following terminology in the code:
# local filesystem directory intended to be a 'Folder' in SmugMug is called a directory
# local filesystem directory intended to be a 'Gallery' in SmugMug is called an album
# The code determines the directory 'type' by the presence of a file called .smgallery or .smfolder
# Remote SmugMug Folder is called a folder
# Remote SmugMug Gallery is caled a gallery
from smugmug import SmugMug
from pathlib import Path
import argparse, sys, os, hashlib, json, time, mimetypes, fnmatch
#
# SmugMug modules
#
def list_albums(verbose):
"""
Download all albums in an account
"""
smugmug = SmugMug(verbose)
album_names = []
albums = smugmug.get_album_names()
for album_name in albums:
print(album_name)
def get_template_id(template_name):
template_id = None
template_id = smugmug.get_template_id(template_name)
if template_id == None:
print('Error: Could not find album template named \'' + template_name + '\'')
sys.exit(1)
def get_root_node_id(self):
"""
Get the root node ID of the account.
Returns node id string
"""
#smugmug = SmugMug() TODO: Add error handling
response = self.request('GET', self.smugmug_api_base_url + "/user/"+self.username, headers={'Accept': 'application/json'})
node = response['Response']['User']['Uris']['Node']
node_id = node['Uri'].rsplit('/',1)[1]
#print(node_id)
return node_id
def get_child_nodes(self, parent_node_id):
"""
Get a list of child nodes given the parents node_id
"""
start = 1
stepsize = 100
nodes = []
while(True):
params = {'start': start, 'count': stepsize}
response = self.request('GET', self.smugmug_api_base_url + "/node/" + parent_node_id + "!children", params=params, headers={'Accept': 'application/json'})
for node in (response['Response']['Node'] if 'Node' in response['Response'] else []):
#print(node)
nodes.append({"Name": node["Name"], "NodeID": node["NodeID"], "HasChildren": node["HasChildren"], "Type": node["Type"]})
#print(nodes)
if 'NextPage' in response['Response']['Pages']:
start += stepsize
else:
break
return nodes
def get_node_id(self, parent_node_id, node_name):
"""
Get/Return the node_id of a given node name directly under the given parent node id
"""
child_nodes = []
child_nodes = get_child_nodes(self, parent_node_id)
for child in child_nodes:
if node_name == child["Name"]:
return child["NodeID"]
if args.verbose:
print('Could not find node ' + node_name + ', under parent NodeID ' + parent_node_id)
return False
def get_starting_node_id(self, path):
parent_node_id = get_root_node_id(smugmug)
node_path = []
node_path = path('/')
for node_name in node_path:
node_id = get_node_id(smugmug, parent_node_id, node_name)
parent_node_id = node_id
def find_node(parent_node_id, node_name):
"""
Find the node_id given a node name starting from a parent node id
Returns array with NodeID and Name
"""
smugmug = SmugMug()
#root_node_id = get_root_node_id(smugmug)
nodes = get_child_nodes(smugmug, parent_node_id)
# Match against folder type nodes
#print(nodes)
for node in nodes[:]:
#print(node)
if node["Type"] == 'Folder':
if node["Name"] == node_name:
#print('Found matching folder node')
return node
break
if node["HasChildren"] == 'False':
nodes.remove(node)
else:
nodes.remove(node)
#If nothing found in starting folder, check sub-folders
else:
try:
if nodes:
#print("Did not find checking sub folders")
for node in nodes:
#print(node)
found = find_node(node["NodeID"], node_name)
if found:
return found
except:
print('Could not find folder ' + node_name)
def create_node(self, parent_node_id, node_name, node_type ):
#Creates a node and returns the nodeid
#TODO still need to add error checking
if node_type == 'Folder':
data = {"Type": node_type, "Name": node_name, "UrlName": smugmug.create_nice_name(node_name)}
response = self.request('POST', self.smugmug_api_base_url + "/node/"+parent_node_id + "!children", data=json.dumps(data), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
return response['Response']['Node']['NodeID']
#TODO add response error checking
if node_type == 'Album':
data = {"Type": node_type, "Name": node_name, "UrlName": smugmug.create_nice_name(node_name), "Privacy": 'Unlisted'}
response = self.request('POST', self.smugmug_api_base_url + "/node/"+parent_node_id + "!children", data=json.dumps(data), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
return response['Response']['Node']['NodeID']
def get_album_key(self, node_id):
response = smugmug.request('GET', smugmug.smugmug_api_base_url + "/node/"+node_id, headers={'Accept': 'application/json'})
albumkey = response['Response']['Node']['Uris']['Album']['Uri'].rsplit('/',1)[1]
#print(albumkey)
return albumkey
def create_tree(self, source, dest):
"""
Create tree structure given localsource directory
and smugmug destination node name & ID
"""
parent_node = dest
#handle initial root folder case
#if dest["Name"] == basenode["Name"]:
#parent_node = basenode
#else:
# parent_node = find_node()
#if entry == args.source:
subdirs = []
subdirs = has_dirs(source)
# for each subfolder
for subdir in subdirs:
#determine if gallery or folder
if has_files(subdir):
album_name = subdir.rsplit('/', 1)[1]
if not find_node(parent_node["NodeID"], album_name):
print(album_name + ' album does not exist, creating')
create_node(smugmug, parent_node["NodeID"], album_name, 'Album' )
else:
print(album_name + ' album exists')
elif has_dirs(subdir):
folder_name = subdir.rsplit('/', 1)[1]
subdir_node = find_node(parent_node["NodeID"], folder_name)
if not subdir_node:
print(folder_name + ' folder does not exist, creating')
create_node(smugmug, parent_node["NodeID"], folder_name, 'Folder' )
time.sleep(30)
else:
print(folder_name + ' folder exists')
create_tree(self, subdir, subdir_node)
def upload_image(self, image_data, image_name, image_type, album_id):
"""Upload an image"""
response = self.request('POST', self.smugmug_upload_uri,
data=image_data,
header_auth = True,
headers={'X-Smug-AlbumUri': "/api/v2/album/"+album_id,
'X-Smug-Version':self.smugmug_api_version,
'X-Smug-ResponseType':'JSON',
'Content-MD5': hashlib.md5(image_data).hexdigest(),
'X-Smug-FileName':image_name,
'Content-Length' : str(len(image_data)),
'Content-Type': image_type})
return response
def remove_image(self, image_uri):
"""Remove an image"""
#print(image_uri)
response = self.request('DELETE', 'https://api.smugmug.com'+image_uri,
header_auth = True,
headers={'Accept': 'application/json',
'X-Smug-Version':self.smugmug_api_version,
'X-Smug-ResponseType':'JSON'})
#print(response)
return response
def upload_overwrite_image(self, image_data, image_name, image_type, album_id, image_uri):
"""Upload and overwrite an existing image"""
response = self.request('POST', self.smugmug_upload_uri,
data=image_data,
header_auth = True,
headers={'X-Smug-AlbumUri': "/api/v2/album/"+album_id,
'X-Smug-Version':self.smugmug_api_version,
'X-Smug-ResponseType':'JSON',
'Content-MD5': hashlib.md5(image_data).hexdigest(),
'X-Smug-FileName':image_name,
'Content-Length' : str(len(image_data)),
'Content-Type': image_type,
'X-Smug-ImageUri': image_uri})
return response
def upload_files(self, album_id, image_paths):
# Uploading the images
total = len(image_paths)
count = 0
#album_image_names = smugmug.get_album_image_names(album_id)
album_images = smugmug.get_album_images(album_id)
for image_path in image_paths:
if args.verbose == True:
print('----------------------------------------------------')
count += 1
image_name = os.path.basename(image_path)
sys.stdout.write('Checking ' + image_name + ' [' + str(count) + '/' + str(total) + ']... ')
sys.stdout.flush()
if args.verbose == True:
print('')
#print(album_images['FileName'])
for image in album_images:
if image_name == image['FileName']:
try:
image_data = open(image_path, 'rb').read()
filehash = hashlib.md5(image_data).hexdigest()
if filehash == image['ArchivedMD5']:
print('File is the same, skipping.')
sys.stdout.flush()
else:
sys.stdout.write('File has changed, updating... ')
sys.stdout.flush()
# Finding the mime type
image_type = mimetypes.guess_type(image_path)[0]
# Uploading image
#print(image['Uris']['Image']['Uri'])
#sys.exit(1)
result = upload_overwrite_image(self, image_data=image_data, image_name=image_name, image_type=image_type, album_id=album_id, image_uri=image['Uris']['Image']['Uri'])
#print(result)
if result['stat'] != 'ok':
print('Error: Upload failed for file \'' + image + '\'')
print('Printing server response:')
print(result)
sys.exit(1)
print('Done')
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
raise
#print('File already exists, skipping.')
sys.stdout.flush()
break
#if image_name in album_images['FileName']:
# print('File already exists, skipping.')
# sys.stdout.flush()
else:
# Loading the image data
try:
image_data = open(image_path, 'rb').read()
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
raise
sys.stdout.write('File is new, uploading...')
# Finding the mime type
image_type = mimetypes.guess_type(image_path)[0]
# Uploading image
result = upload_image(self, image_data=image_data, image_name=image_name, image_type=image_type, album_id=album_id)
if result['stat'] != 'ok':
print('Error: Upload failed for file \'' + image + '\'')
print('Printing server response:')
print(result)
sys.exit(1)
print('Done')
# Small additional check if the number of images matches
album_images = smugmug.get_album_images(album_id)
if len(image_paths) != len(image_paths):
print('Warning: You selected ' + str(len(args.images)) + ' images, but there are ' + str(len(existing_images)) + ' in the online album.')
#print('Album done')
def remove_images(self, album_id, local_path):
# Remove files from gallery that do not exist locally
#album_image_names = smugmug.get_album_image_names(album_id)
album_images = smugmug.get_album_images(album_id)
resolved_path = Path(local_path).resolve()
str_path = str(resolved_path)
image_uris = []
for image in album_images:
#print(image)
filename = image['FileName']
#print(local_path)
local_file = str_path + '/' + filename
if not Path(local_file).is_file():
image_uris.append(image['Uri'])
if len(image_uris) == 0:
print("Album done")
elif 0 < len(image_uris) < (len(album_images) * .15) :
print("Removing",len(image_uris),"images from album")
for image_uri in image_uris:
result = remove_image(smugmug, image_uri)
print("Album done")
elif len(image_uris) >= (len(album_images) * .15):
print("More images to remove than reasonably expected, skipping removal.")
print("Album not done")
#
# Local filesystem modules
#
def is_album(dir_path):
"""Determines if a given local directory should map to a SmugMug Folder or
Gallery by presence of a .smgallery file"""
if os.path.isfile(dir_path + "/.smgallery"):
return True
else:
return False
def is_folder(dir_path):
"""Determines if a given local directory should map to a SmugMug Folder or
Gallery by presence of a .smfolder file"""
if os.path.isfile(dir_path + "/.smfolder"):
return True
else:
return False
def process_dir_as_gallery(directory, parent_node_id):
# Process local-directory as a gallery inside SmugMug Parent NodeID
dir_path = str(Path(directory).resolve())
dirname = dir_path.rsplit('/',1)[-1]
print('Processing album ' + dirname)
album_name = dirname
node_id = get_node_id(smugmug, parent_node_id, album_name)
if not node_id:
response = create_node(smugmug, parent_node_id, album_name, 'Album')
node_id = response
files = has_images(dir_path)
if files:
albumkey = get_album_key(smugmug, node_id)
upload_files(smugmug, albumkey, files)
remove_images(smugmug, albumkey, dir_path)
def process_dir_as_folder(directory, parent_node_id):
# Process local-directory as a folder inside SmugMug Parent NodeID
dir_path = str(Path(directory).resolve())
dirname = dir_path.rsplit('/',1)[-1]
# smugmug=SmugMug(args.verbose)
if args.verbose: print('Working on ' + dirname)
node_id = get_node_id(smugmug, parent_node_id, dirname)
if not node_id:
print('creating node ' + dirname)
response = create_node(smugmug, parent_node_id, dirname, 'Folder')
node_id = response
#Check subdirectories for | |
from django.shortcuts import render
from django.contrib import messages
from django.shortcuts import render_to_response, redirect
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse, HttpResponse
from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist
from constance import config # For the explicitly user-configurable stuff
from .decorators import site_is_configured, login_if_required_for_dashboard
from . import device_forms, profile_forms, beer_forms, setup_forms
from . import setup_views, mdnsLocator, almost_json, git_integration, connection_debug, udev_integration
import json, datetime, pytz, os, random, sys, subprocess
import fermentrack_django.settings as settings
from app.models import BrewPiDevice, OldControlConstants, NewControlConstants, PinDevice, SensorDevice, BeerLogPoint, Beer
from external_push.models import GenericPushTarget
from django.contrib.auth.models import User
def error_notifications(request):
if config.GIT_UPDATE_TYPE != "none":
# TODO - Reset this to 18 hours
# Check the git status at least every 6 hours
now_time = timezone.now()
try:
if config.LAST_GIT_CHECK < now_time - datetime.timedelta(hours=6):
try:
if git_integration.app_is_current():
config.LAST_GIT_CHECK = now_time
else:
messages.info(request, "This app is not at the latest version! " +
'<a href="/upgrade"">Upgrade from GitHub</a> to receive the latest version.')
except:
# If we can't check for the latest version info, skip and move on
pass
except:
# So here's the deal. On Python3 conversion, any datetime.datetime objects stored in Constance end up
# getting unpickled poorly. It's truly quite a pickle! Ahhhahahahaha, I crack myself up. Anyways, just
# overwrite it. Git check can happen on next refresh.
config.LAST_GIT_CHECK = now_time - datetime.timedelta(hours=18)
config.FIRMWARE_LIST_LAST_REFRESHED = now_time - datetime.timedelta(hours=72)
if not config.ALLOW_GIT_BRANCH_SWITCHING:
# Ths user is using one of the two "default" branches (dev or master). Make sure that the branch he/she is
# actually using is the same as the one that he/she wanted.
# Don't check if the user has custom branch switching though, as they should be allowed to pick whatever
# branch he/she wants.
if settings.GIT_BRANCH != config.GIT_UPDATE_TYPE:
if config.GIT_UPDATE_TYPE not in [x for x,_ in settings.CONSTANCE_ADDITIONAL_FIELDS['git_update_type_select'][1]['choices']]:
# TODO - Fix this to pick up the default
config.GIT_UPDATE_TYPE = "dev"
else:
messages.warning(request, "You selected to update from the {} code ".format(config.GIT_UPDATE_TYPE) +
"branch, but you are currently using the {} branch. ".format(settings.GIT_BRANCH) +
'Click <a href="/upgrade">here</a> to update to the correct branch.')
# This is a good idea to do, but unfortunately sshwarn doesn't get removed when the password is changed, only when
# the user logs in a second time. Once I have time to make a "help" page for this, I'll readd this check
# TODO - Readd this check
# if os.path.isfile("/var/run/sshwarn"):
# messages.warning(request, "You have SSH enabled on the Raspberry Pi, but the default (pi) user's password is "
# "<PASSWORD>! This is potentially a major security issue. Please SSH in, change the "
# "password, and SSH in one more time to test that it worked. Otherwise, we'll keep "
# "annoying you until you do.")
# Siteroot is a lazy way of determining where to direct the user when they go to http://devicename.local/
def siteroot(request):
# In addition to requiring the site to be configured, we require that there be a user account. Due to the
# setup workflow, the user will generally be created before constance configuration takes place, but if
# the user account gets deleted (for example, in the admin) we want the user to go through that portion
# of account setup.
num_users=User.objects.all().count()
if not config.USER_HAS_COMPLETED_CONFIGURATION or num_users <= 0:
# If things aren't configured, redirect to the guided setup workflow
return redirect('setup_splash')
else:
# Notify the user of things like git being out of date, issues with SSH, etc.
error_notifications(request)
# The default screen is the "lcd list" screen
return render(request, template_name="siteroot.html")
# return device_lcd_list(request=request)
@login_required
@site_is_configured # Checks if the user completed constance configuration
def add_device(request):
# TODO - Add user permissioning
# if not request.user.has_perm('app.add_device'):
# messages.error(request, 'Your account is not permissioned to add devices. Please contact an admin')
# return redirect("/")
if request.POST:
form = device_forms.DeviceForm(request.POST)
if form.is_valid():
# TODO - Add support for editing to this
new_device = BrewPiDevice(
device_name=form.cleaned_data['device_name'],
temp_format=form.cleaned_data['temp_format'],
data_point_log_interval=form.cleaned_data['data_point_log_interval'],
useInetSocket=form.cleaned_data['useInetSocket'],
socketPort=form.cleaned_data['socketPort'],
socketHost=form.cleaned_data['socketHost'],
serial_port=form.cleaned_data['serial_port'],
serial_alt_port=form.cleaned_data['serial_alt_port'],
board_type=form.cleaned_data['board_type'],
socket_name=form.cleaned_data['socket_name'],
connection_type=form.cleaned_data['connection_type'],
wifi_host=form.cleaned_data['wifi_host'],
wifi_port=form.cleaned_data['wifi_port'],
prefer_connecting_via_udev=form.cleaned_data['prefer_connecting_via_udev'],
)
new_device.save()
# Once the device is added, go ahead and autodetect the udev serial number (if we're connecting via serial)
if new_device.connection_type == BrewPiDevice.CONNECTION_SERIAL:
new_device.set_udev_from_port()
messages.success(request, u'Device {} Added.<br>Please wait a few seconds for controller to start'.format(new_device))
return redirect("/")
else:
return render(request, template_name='setup/device_add.html', context={'form': form})
else:
# We don't want two devices to have the same port, and the port number doesn't really matter. Just
# randomize it.
random_port = random.randint(2000,3000)
initial_values = {'socketPort': random_port, 'temp_format': config.TEMPERATURE_FORMAT}
form = device_forms.DeviceForm(initial=initial_values)
return render(request, template_name='setup/device_add.html', context={'form': form})
@site_is_configured
@login_if_required_for_dashboard
def device_lcd_list(request):
# This handles generating the list of LCD screens for each device.
# Loading the actual data for the LCD screens is handled by Vue.js which loads the data via calls to api/lcd.py
return render(request, template_name="device_lcd_list.html")
@login_required
@site_is_configured
def device_control_constants_legacy(request, device_id, control_constants):
# TODO - Add user permissioning
# if not request.user.has_perm('app.add_device'):
# messages.error(request, 'Your account is not permissioned to add devices. Please contact an admin')
# return redirect("/")
active_device = BrewPiDevice.objects.get(id=device_id)
if request.POST:
form = device_forms.OldCCModelForm(request.POST)
if form.is_valid():
# Generate the new_control_constants object from the form data
new_control_constants = form.save(commit=False)
# At this point, we have both the OLD control constants (control_constants) and the NEW control constants
# TODO - Modify the below to only send constants that have changed to the controller
if not new_control_constants.save_all_to_controller(active_device):
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
# TODO - Make it so if we added a preset name we save the new preset
# new_device.save()
messages.success(request, u'Control constants updated for device {}'.format(active_device))
return redirect("/")
else:
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
else:
form = device_forms.OldCCModelForm(instance=control_constants)
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
@login_required
@site_is_configured
def device_control_constants_modern(request, device_id, control_constants):
# TODO - Add user permissioning
# if not request.user.has_perm('app.add_device'):
# messages.error(request, 'Your account is not permissioned to add devices. Please contact an admin')
# return redirect("/")
active_device = BrewPiDevice.objects.get(id=device_id)
if request.POST:
form = device_forms.NewCCModelForm(request.POST)
if form.is_valid():
# Generate the new_control_constants object from the form data
new_control_constants = form.save(commit=False)
# At this point, we have both the OLD control constants (control_constants) and the NEW control constants
# TODO - Modify the below to only send constants that have changed to the controller
if not new_control_constants.save_all_to_controller(active_device):
return render(request, template_name='device_control_constants_new.html',
context={'form': form, 'active_device': active_device})
# TODO - Make it so if we added a preset name we save the new preset
# new_device.save()
messages.success(request, u'Control constants updated for device {}'.format(active_device))
return redirect("/")
else:
return render(request, template_name='device_control_constants_new.html',
context={'form': form, 'active_device': active_device})
else:
form = device_forms.OldCCModelForm(instance=control_constants)
return render(request, template_name='device_control_constants_new.html',
context={'form': form, 'active_device': active_device})
@login_required
@site_is_configured
def device_control_constants(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except ObjectDoesNotExist:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
control_constants, is_legacy = active_device.retrieve_control_constants()
if control_constants is None:
# We weren't able to retrieve the version from the controller.
messages.error(request, u"Unable to reach brewpi-script for device {}".format(active_device))
return redirect('device_dashboard', device_id=device_id)
elif is_legacy:
return device_control_constants_legacy(request, device_id, control_constants)
else:
return device_control_constants_modern(request, device_id, control_constants)
@login_required
@site_is_configured
def sensor_list(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except ObjectDoesNotExist:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
devices_loaded = active_device.load_sensors_from_device()
if devices_loaded:
for this_device in active_device.available_devices:
data = {'device_function': this_device.device_function, 'invert': this_device.invert,
'address': this_device.address, 'pin': this_device.pin}
this_device.device_form = device_forms.SensorFormRevised(data)
for this_device in active_device.installed_devices:
data = {'device_function': this_device.device_function, 'invert': this_device.invert,
'address': this_device.address, 'pin': this_device.pin, 'installed': True,
'perform_uninstall': True}
this_device.device_form = device_forms.SensorFormRevised(data)
else:
# If we weren't able to load devices, we should have set an error message instead. Display it.
# (we can't display it directly from load_sensors_from_device() because we aren't passing request)
messages.error(request, active_device.error_message)
return render(request, template_name="pin_list.html",
context={'available_devices': active_device.available_devices, 'active_device': active_device,
'installed_devices': active_device.installed_devices, 'devices_loaded': devices_loaded})
@login_required
@site_is_configured
def sensor_config(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except ObjectDoesNotExist:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
active_device.load_sensors_from_device()
if request.POST:
form = device_forms.SensorFormRevised(request.POST)
if form.is_valid():
# OK. Here is where things get a bit tricky - We can't just rely on the form to generate the sensor object
# as all the form really does is specify what about the sensor to change. Let's locate the sensor we need
# to | |
<reponame>ZihanWangKi/scratch
import collections
import os
import unicodedata
from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BasicTokenizer(object):
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to lowercase the input when tokenizing.
never_split (:obj:`Iterable`, `optional`):
Collection of tokens which will never be split during tokenization. Only has an effect when
:obj:`do_basic_tokenize=True`
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this `issue
<https://github.com/huggingface/transformers/issues/328>`__).
strip_accents: (:obj:`bool`, `optional`):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for :obj:`lowercase` (as in the original BERT).
"""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see
WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
:func:`PreTrainedTokenizer.tokenize`) List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if token not in never_split:
if self.do_lower_case:
token = token.lower()
if self.strip_accents is not False:
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
import random
import numpy as np
import torch
from tqdm import tqdm
class CharBertTokenizer:
def __init__(self, max_char_position_embeddings=20, max_word_position_embeddings=128):
# <pad> <bos> <eos> <bow> <eow> <unk> <mask>
self.pad_token_id = 0
self.bos_token_id = 1
self.eos_token_id = 2
self.bow_token_id = 3
self.eow_token_id = 4
self.unk_token_id = 5
self.mask_token_id = 6
self.id_to_char = {0: '<pad>', 1: '<bos>', 2: '<eos>', 3: '<bow>', 4: '<eow>', 5: '<unk>', 6: '<mask>'}
for ascii_i in range(33, 127):
ascii_c = chr(ascii_i)
self.id_to_char[ascii_i] = ascii_c
self.char_to_id = {
v: k for k, v in self.id_to_char.items()
}
self.basic_tokenizer = BasicTokenizer(do_lower_case=False)
self.max_chars_in_word = max_char_position_embeddings
self.max_words_in_sentence = max_word_position_embeddings
self.bos_word = [self.bow_token_id, self.bos_token_id, self.eow_token_id]
self.eos_word = [self.bow_token_id, self.eos_token_id, self.eow_token_id]
self.mask_word = [self.bow_token_id, self.mask_token_id, self.eow_token_id]
self.pad_word = [self.pad_token_id] * self.max_chars_in_word
def decode_id(self, id):
assert isinstance(id, int)
return self.id_to_char.get(id, '¿')
def decode_word_ids(self, word_ids, char_att_mask=None):
n_chars = len(word_ids)
if char_att_mask is None:
char_att_mask = [1 for _ in range(n_chars)]
return "".join([self.decode_id(id) for id, char_mask in zip(word_ids, char_att_mask) if char_mask == 1])
def decode_sentence_ids(self, sentence_ids, char_attention_mask=None, word_attention_mask=None):
n_words = len(sentence_ids)
n_chars = len(sentence_ids[0])
if char_attention_mask is None:
char_attention_mask = [[1 for _ in range(n_chars)] for _ in range(n_words)]
if word_attention_mask is None:
word_attention_mask = [1 for _ in range(n_words)]
return " ".join([self.decode_word_ids(word_ids, char_att_mask) for
word_ids, word_att_mask, char_att_mask in
zip(sentence_ids, word_attention_mask, char_attention_mask) if word_att_mask == 1])
def tokenize_word(self, word):
if len(word) > self.max_chars_in_word - 2:
word = word[: self.max_chars_in_word - 2]
return [self.bow_token_id] + [self.char_to_id.get(c, self.unk_token_id) for c in word] + [self.eow_token_id]
def tokenize_sentence(self, sentence):
words = self.basic_tokenizer.tokenize(sentence)
if len(words) > self.max_words_in_sentence - 2:
words = words[: self.max_words_in_sentence - 2]
return [self.bos_word] + [self.tokenize_word(word) for word in words] + [self.eos_word]
def tokenize(self, sentences):
input_ids = []
for sentence in sentences:
input_ids.append(self.tokenize_sentence(sentence))
return {
"input_ids": input_ids,
}
def tokenize_sentence_pair(self, sentence_1, sentence_2=None):
assert isinstance(sentence_1, str), sentence_1
assert sentence_2 is None or isinstance(sentence_1, str), sentence_2
if sentence_2 is None:
return self.tokenize_sentence(sentence_1)
words_1 = self.basic_tokenizer.tokenize(sentence_1)
words_2 = self.basic_tokenizer.tokenize(sentence_2)
if len(words_1) > self.max_words_in_sentence // 2 - 2:
words_1 = words_1[: self.max_words_in_sentence // 2 - 2]
if len(words_2) > self.max_words_in_sentence // 2 - 2:
words_2 = words_2[: self.max_words_in_sentence // 2 - 2]
return [self.bos_word] + [self.tokenize_word(word) for word in words_1] + [self.eos_word] + \
[self.bos_word] + [self.tokenize_word(word) for word in words_2] + [self.eos_word]
def tokenize_for_classification(self, sentences_1, sentences_2=None):
input_ids = []
for i in range(len(sentences_1)):
input_ids.append(self.tokenize_sentence_pair(sentences_1[i], sentences_2[i] if sentences_2 is not None else None))
return {
"input_ids": input_ids,
}
def padding(self, input_ids):
padded_input_ids = []
char_attention_mask = []
word_attention_mask = []
for word_ids in input_ids:
length = len(word_ids)
char_attention_mask.append([1] * length + [0] * (self.max_chars_in_word - length))
padded_input_ids.append(word_ids + [self.pad_token_id] * (self.max_chars_in_word - length))
word_attention_mask.append(1)
for _ in range(self.max_words_in_sentence - len(input_ids)):
padded_input_ids.append(self.pad_word)
char_attention_mask.append([0] * self.max_chars_in_word)
word_attention_mask.append(0)
return {
"padded_input_ids": padded_input_ids,
"char_attention_mask": char_attention_mask,
"word_attention_mask": word_attention_mask
}
def perform_padding_for_classification(self, batched_input):
input_ids = []
char_attention_mask = []
word_attention_mask = []
decoder_input_ids = []
decoder_attention_mask = []
labels = []
n_batches = len(batched_input)
for batch_no in range(n_batches):
batch_input_ids = batched_input[batch_no]["input_ids"]
shifted_batch_input_ids = [
[word_ids[-1]] + word_ids[: -1] for word_ids in batch_input_ids
]
padded = self.padding(shifted_batch_input_ids)
decoder_input_ids.append(padded["padded_input_ids"])
decoder_attention_mask.append(padded["char_attention_mask"])
padded = self.padding(batch_input_ids)
input_ids.append(padded["padded_input_ids"])
char_attention_mask.append(padded["char_attention_mask"])
word_attention_mask.append(padded["word_attention_mask"])
labels.append(batched_input[batch_no]["label"])
input_ids = torch.tensor(input_ids)
char_attention_mask = torch.tensor(char_attention_mask)
word_attention_mask = torch.tensor(word_attention_mask)
decoder_input_ids = torch.tensor(decoder_input_ids)
decoder_attention_mask = torch.tensor(decoder_attention_mask)
labels = torch.tensor(labels)
return {
"input_ids": input_ids,
"char_attention_mask": char_attention_mask,
"word_attention_mask": word_attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"labels": labels,
}
def perform_masking(self, batched_input, apply_noise_prob=0.2, mask_word_cutoff=0.3, shuffle_cutoff=0.6, swap_cutoff=0.9):
input_ids = []
char_attention_mask = []
word_attention_mask = []
decoder_input_ids = []
decoder_attention_mask = []
labels = []
n_batches = len(batched_input)
for batch_no in range(n_batches):
batch_input_ids = batched_input[batch_no]["input_ids"]
shifted_batch_input_ids = [
[word_ids[-1]] + word_ids[: | |
cannot really do much to recover, because cluster is not in usable state anyway
metadata = flink.get("metadata")
labels = metadata.get("labels")
config_sha = labels.get(paasta_prefixed("config_sha"))
if config_sha is None:
raise ValueError(f"expected config sha on Flink, but received {metadata}")
if config_sha.startswith("config"):
config_sha = config_sha[6:]
output.append(f" Config SHA: {config_sha}")
status_config = status["config"]
if verbose:
output.append(
f" Flink version: {status_config['flink-version']} {status_config['flink-revision']}"
)
else:
output.append(f" Flink version: {status_config['flink-version']}")
# Annotation "flink.yelp.com/dashboard_url" is populated by flink-operator
dashboard_url = metadata["annotations"].get("flink.yelp.com/dashboard_url")
output.append(f" URL: {dashboard_url}/")
color = PaastaColors.green if status["state"] == "running" else PaastaColors.yellow
output.append(f" State: {color(status['state'].title())}")
pod_running_count = pod_evicted_count = pod_other_count = 0
# default for evicted in case where pod status is not available
evicted = f"{pod_evicted_count}"
for pod in status["pod_status"]:
if pod["phase"] == "Running":
pod_running_count += 1
elif pod["phase"] == "Failed" and pod["reason"] == "Evicted":
pod_evicted_count += 1
else:
pod_other_count += 1
evicted = (
PaastaColors.red(f"{pod_evicted_count}")
if pod_evicted_count > 0
else f"{pod_evicted_count}"
)
output.append(
" Pods:"
f" {pod_running_count} running,"
f" {evicted} evicted,"
f" {pod_other_count} other"
)
if not should_job_info_be_shown(status["state"]):
# In case where the jobmanager of cluster is in crashloopbackoff
# The pods for the cluster will be available and we need to show the pods.
# So that paasta status -v and kubectl get pods show the same consistent result.
if verbose and len(status["pod_status"]) > 0:
append_pod_status(status["pod_status"], output)
output.append(f" No other information available in non-running state")
return 0
output.append(
" Jobs:"
f" {status['overview']['jobs-running']} running,"
f" {status['overview']['jobs-finished']} finished,"
f" {status['overview']['jobs-failed']} failed,"
f" {status['overview']['jobs-cancelled']} cancelled"
)
output.append(
" "
f" {status['overview']['taskmanagers']} taskmanagers,"
f" {status['overview']['slots-available']}/{status['overview']['slots-total']} slots available"
)
# Avoid cutting job name. As opposed to default hardcoded value of 32, we will use max length of job name
if status["jobs"]:
max_job_name_length = max(
[len(get_flink_job_name(job)) for job in status["jobs"]]
)
else:
max_job_name_length = 10
# Apart from this column total length of one row is around 52 columns, using remaining terminal columns for job name
# Note: for terminals smaller than 90 columns the row will overflow in verbose printing
allowed_max_job_name_length = min(
max(10, shutil.get_terminal_size().columns - 52), max_job_name_length
)
output.append(f" Jobs:")
if verbose > 1:
output.append(
f' {"Job Name": <{allowed_max_job_name_length}} State Job ID Started'
)
else:
output.append(
f' {"Job Name": <{allowed_max_job_name_length}} State Started'
)
# Use only the most recent jobs
unique_jobs = (
sorted(jobs, key=lambda j: -j["start-time"])[0]
for _, jobs in groupby(
sorted(
(j for j in status["jobs"] if j.get("name") and j.get("start-time")),
key=lambda j: j["name"],
),
lambda j: j["name"],
)
)
allowed_max_jobs_printed = 3
job_printed_count = 0
for job in unique_jobs:
job_id = job["jid"]
if verbose > 1:
fmt = """ {job_name: <{allowed_max_job_name_length}.{allowed_max_job_name_length}} {state: <11} {job_id} {start_time}
{dashboard_url}"""
else:
fmt = " {job_name: <{allowed_max_job_name_length}.{allowed_max_job_name_length}} {state: <11} {start_time}"
start_time = datetime.fromtimestamp(int(job["start-time"]) // 1000)
if verbose or job_printed_count < allowed_max_jobs_printed:
job_printed_count += 1
color_fn = (
PaastaColors.green
if job.get("state") and job.get("state") == "RUNNING"
else PaastaColors.red
if job.get("state") and job.get("state") in ("FAILED", "FAILING")
else PaastaColors.yellow
)
job_info_str = fmt.format(
job_id=job_id,
job_name=get_flink_job_name(job),
allowed_max_job_name_length=allowed_max_job_name_length,
state=color_fn((job.get("state").title() or "Unknown")),
start_time=f"{str(start_time)} ({humanize.naturaltime(start_time)})",
dashboard_url=PaastaColors.grey(f"{dashboard_url}/#/jobs/{job_id}"),
)
output.append(job_info_str)
else:
output.append(
PaastaColors.yellow(
f" Only showing {allowed_max_jobs_printed} Flink jobs, use -v to show all"
)
)
break
if verbose > 1 and job_id in status["exceptions"]:
exceptions = status["exceptions"][job_id]
root_exception = exceptions["root-exception"]
if root_exception is not None:
output.append(f" Exception: {root_exception}")
ts = exceptions["timestamp"]
if ts is not None:
exc_ts = datetime.fromtimestamp(int(ts) // 1000)
output.append(
f" {str(exc_ts)} ({humanize.naturaltime(exc_ts)})"
)
if verbose and len(status["pod_status"]) > 0:
append_pod_status(status["pod_status"], output)
if verbose == 1 and status["exceptions"]:
output.append(PaastaColors.yellow(f" Use -vv to view exceptions"))
return 0
def print_kubernetes_status_v2(
cluster: str,
service: str,
instance: str,
output: List[str],
status: InstanceStatusKubernetesV2,
verbose: int = 0,
) -> int:
instance_state = get_instance_state(status)
output.append(f" State: {instance_state}")
output.append(" Running versions:")
output.append(" " + PaastaColors.green("Rerun with -v to see all replicas"))
output.extend([f" {line}" for line in get_versions_table(status.replicasets)])
if status.error_message:
output.append(" " + PaastaColors.red(status.error_message))
return 1
else:
return 0
# TODO: Make an enum class or similar for the various instance states
def get_instance_state(status: InstanceStatusKubernetesV2) -> str:
num_replicasets = len(status.replicasets)
num_ready_replicas = sum(r.ready_replicas for r in status.replicasets)
if status.desired_state == "stop":
if num_replicasets == 1 and status.replicasets[0].replicas == 0:
return PaastaColors.red("Stopped")
else:
return PaastaColors.red("Stopping")
elif status.desired_state == "start":
if num_replicasets == 0:
return PaastaColors.yellow("Starting")
if num_replicasets == 1:
if num_ready_replicas < status.desired_instances:
return PaastaColors.yellow("Launching replicas")
else:
return PaastaColors.green("Running")
else:
replicasets = sorted(status.replicasets, key=lambda x: x.create_timestamp)
git_shas = {r.git_sha for r in replicasets}
config_shas = {r.config_sha for r in replicasets}
bouncing_to = []
if len(git_shas) > 1:
bouncing_to.append(replicasets[0].git_sha[:8])
if len(config_shas) > 1:
bouncing_to.append(replicasets[0].config_sha)
bouncing_to_str = ", ".join(bouncing_to)
return PaastaColors.yellow(f"Bouncing to {bouncing_to_str}")
else:
return PaastaColors.red("Unknown")
def get_versions_table(replicasets: List[KubernetesReplicaSetV2]) -> List[str]:
if len(replicasets) == 0:
return [PaastaColors.red("There are no running versions for this instance")]
elif len(replicasets) == 1:
return get_version_table_entry(replicasets[0])
else:
replicasets = sorted(replicasets, key=lambda x: x.create_timestamp)
config_shas = {r.config_sha for r in replicasets}
if len(config_shas) > 1:
show_config_sha = True
else:
show_config_sha = False
table: List[str] = []
table.extend(
get_version_table_entry(
replicasets[0],
version_name_suffix="new",
show_config_sha=show_config_sha,
)
)
for replicaset in replicasets[1:]:
table.extend(
get_version_table_entry(
replicaset,
version_name_suffix="old",
show_config_sha=show_config_sha,
)
)
return table
def get_version_table_entry(
replicaset: KubernetesReplicaSetV2,
version_name_suffix: str = None,
show_config_sha: bool = False,
) -> List[str]:
version_name = replicaset.git_sha[:8]
if show_config_sha:
version_name += f", {replicaset.config_sha}"
if version_name_suffix is not None:
version_name += f" ({version_name_suffix})"
version_name = PaastaColors.blue(version_name)
start_datetime = datetime.fromtimestamp(replicaset.create_timestamp)
humanized_start_time = humanize.naturaltime(start_datetime)
entry = [f"{version_name} - Started {start_datetime} ({humanized_start_time})"]
replica_states = get_replica_states(replicaset.pods)
entry.append(f" Replica States: {replica_states}")
return entry
# TODO(PAASTA-17287): Implement replica states
def get_replica_states(pods: List[KubernetesPodV2]) -> str:
return PaastaColors.green(f"{len(pods)} Running")
def print_kubernetes_status(
cluster: str,
service: str,
instance: str,
output: List[str],
kubernetes_status,
verbose: int = 0,
) -> int:
bouncing_status = bouncing_status_human(
kubernetes_status.app_count, kubernetes_status.bounce_method
)
desired_state = desired_state_human(
kubernetes_status.desired_state, kubernetes_status.expected_instance_count
)
output.append(f" State: {bouncing_status} - Desired state: {desired_state}")
status = KubernetesDeployStatus.fromstring(kubernetes_status.deploy_status)
deploy_status = kubernetes_app_deploy_status_human(
status, kubernetes_status.deploy_status_message
)
output.append(
" {}".format(
status_kubernetes_job_human(
service=service,
instance=instance,
deploy_status=deploy_status,
desired_app_id=kubernetes_status.app_id,
app_count=kubernetes_status.app_count,
running_instances=kubernetes_status.running_instance_count,
normal_instance_count=kubernetes_status.expected_instance_count,
evicted_count=kubernetes_status.evicted_count,
)
)
)
if kubernetes_status.create_timestamp and verbose > 0:
create_datetime = datetime.fromtimestamp(kubernetes_status.create_timestamp)
output.append(
" App created: {} ({}). Namespace: {}".format(
create_datetime,
humanize.naturaltime(create_datetime),
kubernetes_status.namespace,
)
)
if kubernetes_status.pods and len(kubernetes_status.pods) > 0:
output.append(" Pods:")
pods_table = format_kubernetes_pod_table(kubernetes_status.pods, verbose)
output.extend([f" {line}" for line in pods_table])
if kubernetes_status.replicasets and len(kubernetes_status.replicasets) > 0:
output.append(" ReplicaSets:")
replicasets_table = format_kubernetes_replicaset_table(
kubernetes_status.replicasets
)
output.extend([f" {line}" for line in replicasets_table])
autoscaling_status = kubernetes_status.autoscaling_status
if autoscaling_status and verbose > 0:
output.append(" Autoscaling status:")
output.append(f" min_instances: {autoscaling_status['min_instances']}")
output.append(f" max_instances: {autoscaling_status['max_instances']}")
output.append(
f" Desired instances: {autoscaling_status['desired_replicas']}"
)
output.append(
f" Last scale time: {autoscaling_status['last_scale_time']}"
)
output.append(f" Dashboard: y/sfx-autoscaling")
NA = PaastaColors.red("N/A")
if len(autoscaling_status["metrics"]) > 0:
output.append(f" Metrics:")
metrics_table: List[List[str]] = [["Metric", "Current", "Target"]]
for metric in autoscaling_status["metrics"]:
current_metric = (
NA
if getattr(metric, "current_value") is None
else getattr(metric, "current_value")
)
target_metric = (
NA
if getattr(metric, "target_value") is None
else getattr(metric, "target_value")
)
metrics_table.append([metric["name"], current_metric, target_metric])
output.extend([" " + s for s in format_table(metrics_table)])
if kubernetes_status.smartstack is not None:
smartstack_status_human = get_smartstack_status_human(
kubernetes_status.smartstack.registration,
kubernetes_status.smartstack.expected_backends_per_location,
kubernetes_status.smartstack.locations,
)
output.extend([f" {line}" for line in smartstack_status_human])
if kubernetes_status.envoy is not None:
envoy_status_human = get_envoy_status_human(
kubernetes_status.envoy.registration,
kubernetes_status.envoy.expected_backends_per_location,
kubernetes_status.envoy.locations,
)
output.extend([f" {line}" for line in envoy_status_human])
error_message = kubernetes_status.error_message
if error_message:
output.append(" " + PaastaColors.red(error_message))
return 1
return 0
def print_tron_status(
cluster: str,
service: str,
instance: str,
output: List[str],
tron_status,
verbose: int = 0,
) -> int:
output.append(f" Tron job: {tron_status.job_name}")
if verbose:
output.append(f" Status: {tron_status.job_status}")
output.append(f" Schedule: {tron_status.job_schedule}")
output.append(" Dashboard: {}".format(PaastaColors.blue(tron_status.job_url)))
output.append(f" Action: {tron_status.action_name}")
output.append(f" Status: {tron_status.action_state}")
if verbose:
output.append(f" Start time: {tron_status.action_start_time}")
output.append(f" Command: {tron_status.action_command}")
if verbose > 1:
output.append(f" Raw Command: {tron_status.action_raw_command}")
output.append(f" Stdout: \n{tron_status.action_stdout}")
output.append(f" Stderr: \n{tron_status.action_stderr}")
return 0
def print_kafka_status(
cluster: str,
service: str,
instance: str,
output: List[str],
kafka_status: Mapping[str, Any],
verbose: int = 0,
) -> int:
status = kafka_status.get("status")
if status is None:
output.append(PaastaColors.red(" Kafka cluster is not available yet"))
return 1
# print kafka view url before operator status because if the kafka cluster is not available for some reason
# atleast the user can get a hold the kafka view url
if status.get("kafka_view_url") is not None:
output.append(f" Kafka View Url: {status.get('kafka_view_url')}")
output.append(f" Zookeeper: {status['zookeeper']}")
annotations = kafka_status.get("metadata").get("annotations")
desired_state = annotations.get(paasta_prefixed("desired_state"))
if desired_state is None:
raise ValueError(
f"expected desired state in kafka annotation, but received none"
| |
<filename>environment.py
from tkinter import *
from tkinter import ttk
import time
import numpy as np
import copy
import random
from mujoco_py import load_model_from_path, MjSim, MjViewer
from gym.envs.robotics import rotations, utils
from gym.utils import seeding
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def goal_distance(goal_a, goal_b):
assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a - goal_b, axis=-1)
def bound_angle(angle):
bounded_angle = np.absolute(angle) % (2*np.pi)
if angle < 0:
bounded_angle = -bounded_angle
return bounded_angle
class Environment():
def __init__(self, model_name, goal_space_train, goal_space_test, project_state_to_end_goal, end_goal_thresholds, initial_state_space, subgoal_bounds, project_state_to_subgoal, subgoal_thresholds, max_actions = 1200, num_frames_skip = 10, show = False):
self.name = model_name
# Create Mujoco Simulation
self.model = load_model_from_path("./mujoco_files/" + model_name)
self.sim = MjSim(self.model)
# Set dimensions and ranges of states, actions, and goals in order to configure actor/critic networks
if model_name == "pendulum.xml":
self.state_dim = 2*len(self.sim.data.qpos) + len(self.sim.data.qvel)
else:
self.state_dim = len(self.sim.data.qpos) + len(self.sim.data.qvel) # State will include (i) joint angles and (ii) joint velocities
self.action_dim = len(self.sim.model.actuator_ctrlrange) # low-level action dim
self.action_bounds = self.sim.model.actuator_ctrlrange[:,1] # low-level action bounds
self.action_offset = np.zeros((len(self.action_bounds))) # Assumes symmetric low-level action ranges
self.end_goal_dim = len(goal_space_test)
self.subgoal_dim = len(subgoal_bounds)
self.subgoal_bounds = subgoal_bounds
# Projection functions
self.project_state_to_end_goal = project_state_to_end_goal
self.project_state_to_subgoal = project_state_to_subgoal
# Convert subgoal bounds to symmetric bounds and offset. Need these to properly configure subgoal actor networks
self.subgoal_bounds_symmetric = np.zeros((len(self.subgoal_bounds)))
self.subgoal_bounds_offset = np.zeros((len(self.subgoal_bounds)))
for i in range(len(self.subgoal_bounds)):
self.subgoal_bounds_symmetric[i] = (self.subgoal_bounds[i][1] - self.subgoal_bounds[i][0])/2
self.subgoal_bounds_offset[i] = self.subgoal_bounds[i][1] - self.subgoal_bounds_symmetric[i]
# End goal/subgoal thresholds
self.end_goal_thresholds = end_goal_thresholds
self.subgoal_thresholds = subgoal_thresholds
# Set inital state and goal state spaces
self.initial_state_space = initial_state_space
self.goal_space_train = goal_space_train
self.goal_space_test = goal_space_test
self.subgoal_colors = ["Magenta", "Green", "Red", "Blue", "Cyan", "Orange", "Maroon", "Gray", "White", "Black"]
self.max_actions = max_actions
# Implement visualization if necessary
self.visualize = show # Visualization boolean
if self.visualize:
self.viewer = MjViewer(self.sim)
self.num_frames_skip = num_frames_skip
# The following variables are hardcoded for the tower building env:
self.n_objects = 1 # try out 2-3 later
self.min_tower_height = 1 # try out 2 later
self.max_tower_height = 4 # try out 2-3 later
self.initial_state = copy.deepcopy(self.sim.get_state())
self.block_gripper = False
self.n_substeps = 20
self.gripper_extra_height = 0.0
self.target_in_the_air = True # maybe make dynamic?
self.target_offset = 0.0
self.obj_range = 0.15
self.target_range = 0.15
self.distance_threshold = 0.05
self.initial_qpos = {
'robot0:slide0': 0.0,
'robot0:slide1': 0.0,
'robot0:slide2': 0.0,
'object0:joint': [0.1, 0.0, 0.05, 1., 0., 0., 0.],
'object1:joint': [0.2, 0.0, 0.05, 1., 0., 0., 0.],
'object2:joint': [0.3, 0.0, 0.05, 1., 0., 0., 0.],
'object3:joint': [0.4, 0.0, 0.05, 1., 0., 0., 0.],
'object4:joint': [0.5, 0.0, 0.05, 1., 0., 0., 0.],
}
self.reward_type = 'sparse'
self.gripper_goal = 'gripper_none' # can be 'gripper_none', 'gripper_random' and 'gripper_above'
self.goal_size = (self.n_objects * 3)
if self.gripper_goal != 'gripper_none':
self.goal_size += 3
gripperSkipIdx = 0 if self.gripper_goal == "gripper_none" else 3
objectIdx = gripperSkipIdx + self.n_objects * 3
self.table_height = 0.5
self.obj_height = 0.05
if self.name == "assets/fetch/build_tower.xml":
self._env_setup(self.initial_qpos)
self.state_dim = self._get_obs_len()
self.action_dim = 4
#self.action_bounds = np.array([[-1, 1], [-1, 1], [-1, 1], [-1, 1]])#self.sim.model.actuator_ctrlrange[:, 1]
self.action_bounds = np.ones((self.action_dim))
self.action_offset = np.zeros((self.action_dim)) # Assumes symmetric low-level action ranges
self.end_goal_dim = self.goal_size
self.project_state_to_end_goal = self._obs2goal_tower
self.end_goal_thresholds = [0.05 for i in range(self.goal_size)]
self.use_full_state_space_as_subgoal_space = True
if not self.use_full_state_space_as_subgoal_space:
self.subgoal_dim = self.goal_size
self.project_state_to_subgoal = self._obs2goal_tower
self.subgoal_bounds_offset = np.concatenate([[1, 0.25, 0.55] for i in range(self.subgoal_dim // 3)])
self.subgoal_bounds_symmetric = np.ones((self.subgoal_dim))
self.subgoal_bounds = [([self.table_height, 1.5] if (i+1) % 3 == 0 else [0, 1.5]) for i in
range(self.subgoal_dim)]
else:
#self.subgoal_dim = self.state_dim
self.subgoal_dim = 3 + self.n_objects * 3
self.project_state_to_subgoal = self._obs2goal_subgoal
self.subgoal_bounds = [[0, 1.5] for _ in range(self.subgoal_dim)]
#self.subgoal_bounds_offset = np.zeros((self.subgoal_dim))
#objects_offset = np.concatenate([[1, 0.25, 0.55] for i in range(self.n_objects)])
#self.subgoal_bounds_offset = np.zeros(self.subgoal_dim)
#self.subgoal_bounds_offset[3:3+3*self.n_objects] = objects_offset
#self.subgoal_bounds_offset = np.concatenate([[1, 0.25, 0.55] if gripperSkipIdx <= i <= objectIdx
# else [0, 0, 0] for i in range(0, self.subgoal_dim, 3)])
#self.subgoal_bounds_symmetric = np.ones((self.subgoal_dim)) * 2
#self.subgoal_bounds = [([self.table_height, 1.5] if (i+1) % 3 == 0 else [0, 1.5]) if
# 3 <= i < 3+3*self.n_objects else [-7, 7] for i in range(self.subgoal_dim)]
# in ur5 subgoal bound: np.array([[-2 * np.pi, 2 * np.pi], [-2 * np.pi, 2 * np.pi], [-2 * np.pi, 2 * np.pi], [-4, 4], [-4, 4], [-4, 4]])
# Try the following, borrowed from original code
self.subgoal_bounds_symmetric = np.zeros((len(self.subgoal_bounds)))
self.subgoal_bounds_offset = np.zeros((len(self.subgoal_bounds)))
for i in range(len(self.subgoal_bounds)):
self.subgoal_bounds_symmetric[i] = (self.subgoal_bounds[i][1] - self.subgoal_bounds[i][0]) / 2
self.subgoal_bounds_offset[i] = self.subgoal_bounds[i][1] - self.subgoal_bounds_symmetric[i]
print("Subgoal offset:", self.subgoal_bounds_offset)
print("Subgoal bounds:", self.subgoal_bounds)
#pos_threshold = 0.05
#angle_threshold = np.deg2rad(10)
#velo_threshold = 2
#objects_threshold = np.concatenate([[pos_threshold for _ in range(3)] for _ in range(self.n_objects * 2)])
#objects_rotation = np.concatenate([[angle_threshold for _ in range(3)] for _ in range(self.n_objects)])
#objects_velo_pos = np.concatenate([[velo_threshold for _ in range(3)] for _ in range(self.n_objects)])
#self.subgoal_thresholds = np.concatenate((np.array([pos_threshold for i in range(3)]), objects_threshold,
# np.array([pos_threshold for i in range(2)]), objects_rotation,
# objects_velo_pos, objects_rotation * 4,
# np.array([velo_threshold for i in range(3)]),
# np.array([velo_threshold for i in range(2)])))
self.subgoal_thresholds = [0.05 for i in range(self.subgoal_dim)]
print("Subgoal thresholds: ", self.subgoal_thresholds)
print("Shape thresholds:", np.shape(self.subgoal_thresholds))
if __debug__:
print("Action bounds: ", self.action_bounds)
def _is_success(self, achieved_goal, desired_goal):
d = goal_distance(achieved_goal, desired_goal)
return (d < self.distance_threshold).astype(np.float32)
def get_agent_params(self, agent_params):
agent_params["subgoal_noise"] = [0.03 for i in range(self.subgoal_dim)]
return agent_params
def _obs2goal_tower(self, sim, obs):
if self.gripper_goal != 'gripper_none':
goal = obs[:self.goal_size]
else:
goal = obs[3:self.goal_size + 3]
return goal
def _obs2goal_subgoal(self, sim, obs):
#return np.concatenate((np.array([bound_angle(sim.data.qpos[i]) for i in range(len(sim.data.qpos))]),
# np.array([4 if sim.data.qvel[i] > 4 else -4 if sim.data.qvel[i] < -4 else sim.data.qvel[i] for i in
# range(len(sim.data.qvel))])))
#return obs
return obs[:3+self.n_objects*3]
# Get state for tower building env:
def _get_obs(self):
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
# positions
grip_pos = self.sim.data.get_site_xpos('robot0:grip')
grip_velp = self.sim.data.get_site_xvelp('robot0:grip') * dt
robot_qpos, robot_qvel = self.sim.data.qpos, self.sim.data.qvel
object_pos, object_rot, object_velp, object_velr = ([] for _ in range(4))
object_rel_pos = []
if self.n_objects > 0:
for n_o in range(self.n_objects):
oname = 'object{}'.format(n_o)
this_object_pos = self.sim.data.get_site_xpos(oname)
# rotations
this_object_rot = rotations.mat2euler(self.sim.data.get_site_xmat(oname))
# velocities
this_object_velp = self.sim.data.get_site_xvelp(oname) * dt
this_object_velr = self.sim.data.get_site_xvelr(oname) * dt
# gripper state
this_object_rel_pos = this_object_pos - grip_pos
this_object_velp -= grip_velp
object_pos = np.concatenate([object_pos, this_object_pos])
object_rot = np.concatenate([object_rot, this_object_rot])
object_velp = np.concatenate([object_velp, this_object_velp])
object_velr = np.concatenate([object_velr, this_object_velr])
object_rel_pos = np.concatenate([object_rel_pos, this_object_rel_pos])
else:
object_pos = object_rot = object_velp = object_velr = object_rel_pos = np.array(np.zeros(3))
gripper_state = robot_qpos[-2:]
gripper_vel = robot_qvel[-2:] * dt # change to a scalar if the gripper is made symmetric
obs = np.concatenate([
grip_pos, object_pos.ravel(), object_rel_pos.ravel(), gripper_state, object_rot.ravel(),
object_velp.ravel(), object_velr.ravel(), grip_velp, gripper_vel,
])
return obs
# Get state len:
def _get_obs_len(self):
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
# positions
grip_pos = self.sim.data.get_site_xpos('robot0:grip')
grip_velp = self.sim.data.get_site_xvelp('robot0:grip') * dt
robot_qpos, robot_qvel = self.sim.data.qpos, self.sim.data.qvel
object_pos, object_rot, object_velp, object_velr = ([] for _ in range(4))
object_rel_pos = []
if self.n_objects > 0:
for n_o in range(self.n_objects):
oname = 'object{}'.format(n_o)
this_object_pos = self.sim.data.get_site_xpos(oname)
# rotations
this_object_rot = rotations.mat2euler(self.sim.data.get_site_xmat(oname))
# velocities
this_object_velp = self.sim.data.get_site_xvelp(oname) * dt
this_object_velr = self.sim.data.get_site_xvelr(oname) * dt
# gripper state
this_object_rel_pos = this_object_pos - grip_pos
this_object_velp -= grip_velp
object_pos = np.concatenate([object_pos, this_object_pos])
object_rot = np.concatenate([object_rot, this_object_rot])
object_velp = np.concatenate([object_velp, this_object_velp])
object_velr = np.concatenate([object_velr, this_object_velr])
object_rel_pos = np.concatenate([object_rel_pos, this_object_rel_pos])
else:
object_pos = object_rot = object_velp = object_velr = object_rel_pos = np.array(np.zeros(3))
gripper_state = robot_qpos[-2:]
gripper_vel = robot_qvel[-2:] * dt # change to a scalar if the gripper is made symmetric
obs = np.concatenate([
grip_pos, object_pos.ravel(), object_rel_pos.ravel(), gripper_state, object_rot.ravel(),
object_velp.ravel(), object_velr.ravel(), grip_velp, gripper_vel,
])
print("obs len: ", len(obs))
return len(obs)
# Get state, which concatenates joint positions and velocities
def get_state(self):
if self.name == "pendulum.xml":
return np.concatenate([np.cos(self.sim.data.qpos),np.sin(self.sim.data.qpos),
self.sim.data.qvel])
elif self.name == "assets/fetch/build_tower.xml":
return self._get_obs()
else:
return np.concatenate((self.sim.data.qpos, self.sim.data.qvel))
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
utils.reset_mocap_welds(self.sim)
self.sim.forward()
# Move end effector into position.
gripper_target = np.array([-0.498, 0.005, -0.431 + self.gripper_extra_height]) + self.sim.data.get_site_xpos(
'robot0:grip')
gripper_rotation = np.array([1., 0., 1., 0.])
self.sim.data.set_mocap_pos('robot0:mocap', gripper_target)
self.sim.data.set_mocap_quat('robot0:mocap', gripper_rotation)
for _ in range(10):
self.sim.step()
# Extract information for sampling goals.
self.initial_gripper_xpos = self.sim.data.get_site_xpos('robot0:grip').copy()
print("inital gripper xpos", self.initial_gripper_xpos)
if self.n_objects > 0:
self.height_offset = self.sim.data.get_site_xpos('object0')[2]
def _reset_sim_tower(self):
self.step_ctr = 0
self.sim.set_state(self.initial_state)
# Randomize start position of objects.
for o in range(self.n_objects):
oname = 'object{}'.format(o)
object_xpos = self.initial_gripper_xpos[:2]
close = True
while close:
# | |
<reponame>turicas/nltk
# Natural Language Toolkit: Concordance Application
#
# Copyright (C) 2001-2013 NLTK Project
# Author: <NAME> <<EMAIL>>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
import re
import threading
import tkFont
from Tkinter import (Tk, Button, END, Entry, Frame, IntVar, LEFT,
Label, Menu, OptionMenu, SUNKEN, Scrollbar,
StringVar, Text)
from nltk.corpus import (cess_cat, brown, nps_chat, treebank, sinica_treebank,
alpino, indian, floresta, mac_morpho, cess_esp)
from nltk.util import in_idle
from nltk.draw.util import ShowText
WORD_OR_TAG = '[^/ ]+'
BOUNDARY = r'\b'
CORPUS_LOADED_EVENT = '<<CL_EVENT>>'
SEARCH_TERMINATED_EVENT = '<<ST_EVENT>>'
SEARCH_ERROR_EVENT = '<<SE_EVENT>>'
ERROR_LOADING_CORPUS_EVENT = '<<ELC_EVENT>>'
# NB All corpora must be specified in a lambda expression so as not to be
# loaded when the module is imported.
_DEFAULT = 'English: Brown Corpus (Humor, simplified)'
_CORPORA = {
'Catalan: CESS-CAT Corpus (simplified)':
lambda: cess_cat.tagged_sents(simplify_tags=True),
'English: Brown Corpus':
lambda: brown.tagged_sents(),
'English: Brown Corpus (simplified)':
lambda: brown.tagged_sents(simplify_tags=True),
'English: Brown Corpus (Press, simplified)':
lambda: brown.tagged_sents(categories=['news', 'editorial', 'reviews'], simplify_tags=True),
'English: Brown Corpus (Religion, simplified)':
lambda: brown.tagged_sents(categories='religion', simplify_tags=True),
'English: Brown Corpus (Learned, simplified)':
lambda: brown.tagged_sents(categories='learned', simplify_tags=True),
'English: Brown Corpus (Science Fiction, simplified)':
lambda: brown.tagged_sents(categories='science_fiction', simplify_tags=True),
'English: Brown Corpus (Romance, simplified)':
lambda: brown.tagged_sents(categories='romance', simplify_tags=True),
'English: Brown Corpus (Humor, simplified)':
lambda: brown.tagged_sents(categories='humor', simplify_tags=True),
'English: NPS Chat Corpus':
lambda: nps_chat.tagged_posts(),
'English: NPS Chat Corpus (simplified)':
lambda: nps_chat.tagged_posts(simplify_tags=True),
'English: Wall Street Journal Corpus':
lambda: treebank.tagged_sents(),
'English: Wall Street Journal Corpus (simplified)':
lambda: treebank.tagged_sents(simplify_tags=True),
'Chinese: Sinica Corpus':
lambda: sinica_treebank.tagged_sents(),
'Chinese: Sinica Corpus (simplified)':
lambda: sinica_treebank.tagged_sents(simplify_tags=True),
'Dutch: Alpino Corpus':
lambda: alpino.tagged_sents(),
'Dutch: Alpino Corpus (simplified)':
lambda: alpino.tagged_sents(simplify_tags=True),
'Hindi: Indian Languages Corpus':
lambda: indian.tagged_sents(files='hindi.pos'),
'Hindi: Indian Languages Corpus (simplified)':
lambda: indian.tagged_sents(files='hindi.pos', simplify_tags=True),
'Portuguese: Floresta Corpus (Portugal)':
lambda: floresta.tagged_sents(),
'Portuguese: Floresta Corpus (Portugal, simplified)':
lambda: floresta.tagged_sents(simplify_tags=True),
'Portuguese: MAC-MORPHO Corpus (Brazil)':
lambda: mac_morpho.tagged_sents(),
'Portuguese: MAC-MORPHO Corpus (Brazil, simplified)':
lambda: mac_morpho.tagged_sents(simplify_tags=True),
'Spanish: CESS-ESP Corpus (simplified)':
lambda: cess_esp.tagged_sents(simplify_tags=True),
}
class ConcordanceSearchView(object):
_BACKGROUND_COLOUR='#FFF' #white
#Colour of highlighted results
_HIGHLIGHT_WORD_COLOUR='#F00' #red
_HIGHLIGHT_WORD_TAG='HL_WRD_TAG'
_HIGHLIGHT_LABEL_COLOUR='#C0C0C0' # dark grey
_HIGHLIGHT_LABEL_TAG='HL_LBL_TAG'
#Percentage of text left of the scrollbar position
_FRACTION_LEFT_TEXT=0.30
def __init__(self):
self.model = ConcordanceSearchModel()
self.model.add_listener(self)
self.top = Tk()
self._init_top(self.top)
self._init_menubar()
self._init_widgets(self.top)
self._bind_event_handlers()
self.load_corpus(self.model.DEFAULT_CORPUS)
def _init_top(self, top):
top.geometry('950x680+50+50')
top.title('NLTK Concordance Search')
top.bind('<Control-q>', self.destroy)
top.minsize(950,680)
def _init_widgets(self, parent):
self.main_frame = Frame(parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1))
self._init_corpus_select(self.main_frame)
self._init_query_box(self.main_frame)
self._init_results_box(self.main_frame)
self._init_paging(self.main_frame)
self._init_status(self.main_frame)
self.main_frame.pack(fill='both', expand=True)
def _init_menubar(self):
self._result_size = IntVar(self.top)
self._cntx_bf_len = IntVar(self.top)
self._cntx_af_len = IntVar(self.top)
menubar = Menu(self.top)
filemenu = Menu(menubar, tearoff=0, borderwidth=0)
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-q')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
rescntmenu = Menu(editmenu, tearoff=0)
rescntmenu.add_radiobutton(label='20', variable=self._result_size,
underline=0, value=20,
command=self.set_result_size)
rescntmenu.add_radiobutton(label='50', variable=self._result_size,
underline=0, value=50,
command=self.set_result_size)
rescntmenu.add_radiobutton(label='100', variable=self._result_size,
underline=0, value=100,
command=self.set_result_size)
rescntmenu.invoke(1)
editmenu.add_cascade(label='Result Count', underline=0, menu=rescntmenu)
cntxmenu = Menu(editmenu, tearoff=0)
cntxbfmenu = Menu(cntxmenu, tearoff=0)
cntxbfmenu.add_radiobutton(label='60 characters',
variable=self._cntx_bf_len,
underline=0, value=60,
command=self.set_cntx_bf_len)
cntxbfmenu.add_radiobutton(label='80 characters',
variable=self._cntx_bf_len,
underline=0, value=80,
command=self.set_cntx_bf_len)
cntxbfmenu.add_radiobutton(label='100 characters',
variable=self._cntx_bf_len,
underline=0, value=100,
command=self.set_cntx_bf_len)
cntxbfmenu.invoke(1)
cntxmenu.add_cascade(label='Before', underline=0, menu=cntxbfmenu)
cntxafmenu = Menu(cntxmenu, tearoff=0)
cntxafmenu.add_radiobutton(label='70 characters',
variable=self._cntx_af_len,
underline=0, value=70,
command=self.set_cntx_af_len)
cntxafmenu.add_radiobutton(label='90 characters',
variable=self._cntx_af_len,
underline=0, value=90,
command=self.set_cntx_af_len)
cntxafmenu.add_radiobutton(label='110 characters',
variable=self._cntx_af_len,
underline=0, value=110,
command=self.set_cntx_af_len)
cntxafmenu.invoke(1)
cntxmenu.add_cascade(label='After', underline=0, menu=cntxafmenu)
editmenu.add_cascade(label='Context', underline=0, menu=cntxmenu)
menubar.add_cascade(label='Edit', underline=0, menu=editmenu)
self.top.config(menu=menubar)
def set_result_size(self, **kwargs):
self.model.result_count = self._result_size.get()
def set_cntx_af_len(self, **kwargs):
self._char_after = self._cntx_af_len.get()
def set_cntx_bf_len(self, **kwargs):
self._char_before = self._cntx_bf_len.get()
def _init_corpus_select(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
self.var = StringVar(innerframe)
self.var.set(self.model.DEFAULT_CORPUS)
Label(innerframe, justify=LEFT, text=' Corpus: ',
background=self._BACKGROUND_COLOUR, padx = 2, pady = 1, border = 0).pack(side='left')
other_corpora = self.model.CORPORA.keys().remove(self.model.DEFAULT_CORPUS)
om = OptionMenu(innerframe, self.var, self.model.DEFAULT_CORPUS, command=self.corpus_selected, *self.model.non_default_corpora())
om['borderwidth'] = 0
om['highlightthickness'] = 1
om.pack(side='left')
innerframe.pack(side='top', fill='x', anchor='n')
def _init_status(self, parent):
self.status = Label(parent, justify=LEFT, relief=SUNKEN, background=self._BACKGROUND_COLOUR, border=0, padx = 1, pady = 0)
self.status.pack(side='top', anchor='sw')
def _init_query_box(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
another = Frame(innerframe, background=self._BACKGROUND_COLOUR)
self.query_box = Entry(another, width=60)
self.query_box.pack(side='left', fill='x', pady=25, anchor='center')
self.search_button = Button(another, text='Search', command=self.search, borderwidth=1, highlightthickness=1)
self.search_button.pack(side='left', fill='x', pady=25, anchor='center')
self.query_box.bind('<KeyPress-Return>', self.search_enter_keypress_handler)
another.pack()
innerframe.pack(side='top', fill='x', anchor='n')
def search_enter_keypress_handler(self, *event):
self.search()
def _init_results_box(self, parent):
innerframe = Frame(parent)
i1 = Frame(innerframe)
i2 = Frame(innerframe)
vscrollbar = Scrollbar(i1, borderwidth=1)
hscrollbar = Scrollbar(i2, borderwidth=1, orient='horiz')
self.results_box = Text(i1,
font=tkFont.Font(family='courier', size='16'),
state='disabled', borderwidth=1,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set, wrap='none', width='40', height = '20', exportselection=1)
self.results_box.pack(side='left', fill='both', expand=True)
self.results_box.tag_config(self._HIGHLIGHT_WORD_TAG, foreground=self._HIGHLIGHT_WORD_COLOUR)
self.results_box.tag_config(self._HIGHLIGHT_LABEL_TAG, foreground=self._HIGHLIGHT_LABEL_COLOUR)
vscrollbar.pack(side='left', fill='y', anchor='e')
vscrollbar.config(command=self.results_box.yview)
hscrollbar.pack(side='left', fill='x', expand=True, anchor='w')
hscrollbar.config(command=self.results_box.xview)
#there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!!
Label(i2, text=' ', background=self._BACKGROUND_COLOUR).pack(side='left', anchor='e')
i1.pack(side='top', fill='both', expand=True, anchor='n')
i2.pack(side='bottom', fill='x', anchor='s')
innerframe.pack(side='top', fill='both', expand=True)
def _init_paging(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
self.prev = prev = Button(innerframe, text='Previous', command=self.previous, width='10', borderwidth=1, highlightthickness=1, state='disabled')
prev.pack(side='left', anchor='center')
self.next = next = Button(innerframe, text='Next', command=self.next, width='10', borderwidth=1, highlightthickness=1, state='disabled')
next.pack(side='right', anchor='center')
innerframe.pack(side='top', fill='y')
self.current_page = 0
def previous(self):
self.clear_results_box()
self.freeze_editable()
self.model.prev(self.current_page - 1)
def next(self):
self.clear_results_box()
self.freeze_editable()
self.model.next(self.current_page + 1)
def about(self, *e):
ABOUT = ("NLTK Concordance Search Demo\n")
TITLE = 'About: NLTK Concordance Search Demo'
try:
from tkMessageBox import Message
Message(message=ABOUT, title=TITLE, parent=self.main_frame).show()
except:
ShowText(self.top, TITLE, ABOUT)
def _bind_event_handlers(self):
self.top.bind(CORPUS_LOADED_EVENT, self.handle_corpus_loaded)
self.top.bind(SEARCH_TERMINATED_EVENT, self.handle_search_terminated)
self.top.bind(SEARCH_ERROR_EVENT, self.handle_search_error)
self.top.bind(ERROR_LOADING_CORPUS_EVENT, self.handle_error_loading_corpus)
def handle_error_loading_corpus(self, event):
self.status['text'] = 'Error in loading ' + self.var.get()
self.unfreeze_editable()
self.clear_all()
self.freeze_editable()
def handle_corpus_loaded(self, event):
self.status['text'] = self.var.get() + ' is loaded'
self.unfreeze_editable()
self.clear_all()
self.query_box.focus_set()
def handle_search_terminated(self, event):
#todo: refactor the model such that it is less state sensitive
results = self.model.get_results()
self.write_results(results)
self.status['text'] = ''
if len(results) == 0:
self.status['text'] = 'No results found for ' + self.model.query
else:
self.current_page = self.model.last_requested_page
self.unfreeze_editable()
self.results_box.xview_moveto(self._FRACTION_LEFT_TEXT)
def handle_search_error(self, event):
self.status['text'] = 'Error in query ' + self.model.query
self.unfreeze_editable()
def corpus_selected(self, *args):
new_selection = self.var.get()
self.load_corpus(new_selection)
def load_corpus(self, selection):
if self.model.selected_corpus != selection:
self.status['text'] = 'Loading ' + selection + '...'
self.freeze_editable()
self.model.load_corpus(selection)
def search(self):
self.current_page = 0
self.clear_results_box()
self.model.reset_results()
query = self.query_box.get()
if (len(query.strip()) == 0): return
self.status['text'] = 'Searching for ' + query
self.freeze_editable()
self.model.search(query, self.current_page + 1, )
def write_results(self, results):
self.results_box['state'] = 'normal'
row = 1
for each in results:
sent, pos1, pos2 = each[0].strip(), each[1], each[2]
if len(sent) != 0:
if (pos1 < self._char_before):
sent, pos1, pos2 = self.pad(sent, pos1, pos2)
sentence = sent[pos1-self._char_before:pos1+self._char_after]
if not row == len(results):
sentence += '\n'
self.results_box.insert(str(row) + '.0', sentence)
word_markers, label_markers = self.words_and_labels(sent, pos1, pos2)
for marker in word_markers: self.results_box.tag_add(self._HIGHLIGHT_WORD_TAG, str(row) + '.' + str(marker[0]), str(row) + '.' + str(marker[1]))
for marker in label_markers: self.results_box.tag_add(self._HIGHLIGHT_LABEL_TAG, str(row) + '.' + str(marker[0]), str(row) + '.' + str(marker[1]))
row += 1
self.results_box['state'] = 'disabled'
def words_and_labels(self, sentence, pos1, pos2):
search_exp = sentence[pos1:pos2]
words, labels = [], []
labeled_words = search_exp.split(' ')
index = 0
for each in labeled_words:
if each == '':
index += 1
else:
word, label = each.split('/')
words.append((self._char_before + index, self._char_before + index + len(word)))
index += len(word) + 1
labels.append((self._char_before + index, self._char_before + index + len(label)))
index += len(label)
index += 1
return words, labels
def pad(self, sent, hstart, hend):
if hstart >= self._char_before:
return sent, hstart, hend
d = self._char_before - hstart
sent = ''.join([' '] * d) + sent
return sent, hstart + d, hend + d
def destroy(self, *e):
if self.top is None: return
self.top.destroy()
self.top = None
def clear_all(self):
self.query_box.delete(0, END)
self.model.reset_query()
self.clear_results_box()
def clear_results_box(self):
self.results_box['state'] = 'normal'
self.results_box.delete("1.0", END)
self.results_box['state'] = 'disabled'
def freeze_editable(self):
self.query_box['state'] = 'disabled'
self.search_button['state'] = 'disabled'
self.prev['state'] = 'disabled'
self.next['state'] = 'disabled'
def unfreeze_editable(self):
self.query_box['state'] = 'normal'
self.search_button['state'] = 'normal'
self.set_paging_button_states()
def set_paging_button_states(self):
if self.current_page == 0 or self.current_page == 1:
self.prev['state'] = 'disabled'
else:
self.prev['state'] = 'normal'
if self.model.has_more_pages(self.current_page):
self.next['state'] = 'normal'
else:
self.next['state'] = 'disabled'
def fire_event(self, event):
#Firing an event so that rendering of widgets happen in the mainloop thread
self.top.event_generate(event, when='tail')
def mainloop(self, *args, **kwargs):
if in_idle(): return
self.top.mainloop(*args, **kwargs)
class ConcordanceSearchModel(object):
def __init__(self):
self.listeners = []
self.CORPORA = _CORPORA
self.DEFAULT_CORPUS = _DEFAULT
self.selected_corpus = None
self.reset_query()
self.reset_results()
self.result_count = None
self.last_sent_searched = 0
def non_default_corpora(self):
copy = []
copy.extend(self.CORPORA.keys())
copy.remove(self.DEFAULT_CORPUS)
copy.sort()
return copy
def load_corpus(self, name):
self.selected_corpus = name
self.tagged_sents = []
runner_thread = self.LoadCorpus(name, self)
runner_thread.start()
def search(self, query, page):
self.query = query
self.last_requested_page = page
self.SearchCorpus(self, page, self.result_count).start()
def next(self, page):
self.last_requested_page = page
if len(self.results) < page:
self.search(self.query, page)
else:
self.notify_listeners(SEARCH_TERMINATED_EVENT)
def prev(self, page):
self.last_requested_page = page
self.notify_listeners(SEARCH_TERMINATED_EVENT)
def add_listener(self, listener):
self.listeners.append(listener)
def notify_listeners(self, event):
for each in self.listeners:
each.fire_event(event)
def reset_results(self):
self.last_sent_searched = 0
self.results = []
self.last_page = None
def reset_query(self):
self.query = | |
= reward_network.estimate_temporal_difference(mu1[t1_i].unsqueeze(0), mu1[t2_i].unsqueeze(0))
est_dt_j = reward_network.estimate_temporal_difference(mu2[t1_j].unsqueeze(0), mu2[t2_j].unsqueeze(0))
real_dt_i = (times_i[t2_i] - times_i[t1_i])/100.0
real_dt_j = (times_j[t2_j] - times_j[t1_j])/100.0
actions_1 = reward_network.estimate_inverse_dynamics(mu1[0:-1], mu1[1:])
actions_2 = reward_network.estimate_inverse_dynamics(mu2[0:-1], mu2[1:])
target_actions_1 = torch.LongTensor(actions_i[1:]).to(device)
target_actions_2 = torch.LongTensor(actions_j[1:]).to(device)
#print((actions_1, target_actions_1))
#print((actions_2, target_actions_2))
inverse_dynamics_loss_1 = inverse_dynamics_loss(actions_1, target_actions_1)/1.9
inverse_dynamics_loss_2 = inverse_dynamics_loss(actions_2, target_actions_2)/1.9
forward_dynamics_distance = 5 #1 if epoch <= 1 else np.random.randint(1, min(1, max(epoch, 4)))
forward_dynamics_actions1 = target_actions_1
forward_dynamics_actions2 = target_actions_2
forward_dynamics_onehot_actions_1 = torch.zeros((num_frames-1, ACTION_SPACE_SIZE), dtype=torch.float32, device=device)
forward_dynamics_onehot_actions_2 = torch.zeros((num_frames-1, ACTION_SPACE_SIZE), dtype=torch.float32, device=device)
forward_dynamics_onehot_actions_1.scatter_(1, forward_dynamics_actions1.unsqueeze(1), 1.0)
forward_dynamics_onehot_actions_2.scatter_(1, forward_dynamics_actions2.unsqueeze(1), 1.0)
forward_dynamics_1 = reward_network.forward_dynamics(mu1[:-forward_dynamics_distance], forward_dynamics_onehot_actions_1[:(num_frames-forward_dynamics_distance)])
forward_dynamics_2 = reward_network.forward_dynamics(mu2[:-forward_dynamics_distance], forward_dynamics_onehot_actions_2[:(num_frames-forward_dynamics_distance)])
for fd_i in range(forward_dynamics_distance-1):
forward_dynamics_1 = reward_network.forward_dynamics(forward_dynamics_1, forward_dynamics_onehot_actions_1[fd_i+1:(num_frames-forward_dynamics_distance+fd_i+1)])
forward_dynamics_2 = reward_network.forward_dynamics(forward_dynamics_2, forward_dynamics_onehot_actions_2[fd_i+1:(num_frames-forward_dynamics_distance+fd_i+1)])
forward_dynamics_loss_1 = 100 * forward_dynamics_loss(forward_dynamics_1, mu1[forward_dynamics_distance:])
forward_dynamics_loss_2 = 100 * forward_dynamics_loss(forward_dynamics_2, mu2[forward_dynamics_distance:])
#print("est_dt: " + str(est_dt_i) + ", real_dt: " + str(real_dt_i))
#print("est_dt: " + str(est_dt_j) + ", real_dt: " + str(real_dt_j))
dt_loss_i = 4*temporal_difference_loss(est_dt_i, torch.tensor(((real_dt_i,),), dtype=torch.float32, device=device))
dt_loss_j = 4*temporal_difference_loss(est_dt_j, torch.tensor(((real_dt_j,),), dtype=torch.float32, device=device))
#l1_loss = 0.5 * (torch.norm(z1, 1) + torch.norm(z2, 1))
#trex_loss = loss_criterion(outputs, labels)
#loss = trex_loss + l1_reg * abs_rewards + reconstruction_loss_1 + reconstruction_loss_2 + dt_loss_i + dt_loss_j + inverse_dynamics_loss_1 + inverse_dynamics_loss_2
#reconstruction_loss_1 + reconstruction_loss_2 +
loss = dt_loss_i + dt_loss_j + (inverse_dynamics_loss_1 + inverse_dynamics_loss_2) + forward_dynamics_loss_1 + forward_dynamics_loss_2 + reconstruction_loss_1 + reconstruction_loss_2
if i < len(training_labels) * validation_split:
print("TRAINING LOSS", end=" ")
else:
print("VALIDATION LOSS", end=" ")
print("dt_loss", dt_loss_i.item(), dt_loss_j.item(), "inverse_dynamics", inverse_dynamics_loss_1.item(), inverse_dynamics_loss_2.item(), "forward_dynamics", forward_dynamics_loss_1.item(), forward_dynamics_loss_2.item(), "reconstruction", reconstruction_loss_1.item(), reconstruction_loss_2.item(), end=" ")
#loss = dt_loss_i + dt_loss_j + inverse_dynamics_loss_1 + inverse_dynamics_loss_2 + forward_dynamics_loss_1 + forward_dynamics_loss_2 + l1_loss
#loss = forward_dynamics_loss_1 + forward_dynamics_loss_2
#loss = inverse_dynamics_loss_1 + inverse_dynamics_loss_2
#TODO add l2 reg
#print("!LOSSDATA " + str(reconstruction_loss_1.data.numpy()) + " " + str(reconstruction_loss_2.data.numpy()) + " " + str(dt_loss_i.data.numpy()) + " " + str(dt_loss_j.data.numpy()) + " " + str(trex_loss.data.numpy()) + " " + str(loss.data.numpy()) + " " + str(inverse_dynamics_loss_1.data.numpy()) + " " + str(inverse_dynamics_loss_2.data.numpy()))
#print("!LOSSDATA " + str(reconstruction_loss_1.data.numpy()) + " " + str(reconstruction_loss_2.data.numpy()) + " " + str(dt_loss_i.data.numpy()) + " " + str(dt_loss_j.data.numpy()) + " " + str(loss.data.numpy()) + " " + str(inverse_dynamics_loss_1.data.numpy()) + " " + str(inverse_dynamics_loss_2.data.numpy()) + " " + str(forward_dynamics_loss_1.data.numpy()) + " " + str(forward_dynamics_loss_2.data.numpy()))
#loss = inverse_dynamics_loss_1 + inverse_dynamics_loss_2
#print(loss.data.numpy())
#sys.stdout.flush()
if i < len(training_labels) * validation_split:
loss.backward()
optimizer.step()
#print stats to see if learning
item_loss = loss.item()
print("total", item_loss)
cum_loss += item_loss
if i % 100 == 99:
#print(i)
print("epoch {}:{} loss {}".format(epoch,i, cum_loss))
print(abs_rewards)
cum_loss = 0.0
print("check pointing")
torch.save(reward_net.state_dict(), checkpoint_dir)
print("finished training")
def calc_accuracy(reward_network, training_inputs, training_outputs):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
loss_criterion = nn.CrossEntropyLoss()
num_correct = 0.
with torch.no_grad():
for i in range(len(training_inputs)):
label = training_outputs[i]
traj_i, traj_j = training_inputs[i]
traj_i = np.array(traj_i)
traj_j = np.array(traj_j)
traj_i = torch.from_numpy(traj_i).float().to(device)
traj_j = torch.from_numpy(traj_j).float().to(device)
#forward to get logits
outputs, abs_return, z1, z2, _, _, _, _ = reward_network.forward(traj_i, traj_j)
_, pred_label = torch.max(outputs,0)
if pred_label.item() == label:
num_correct += 1.
return num_correct / len(training_inputs)
def predict_reward_sequence(net, traj):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
rewards_from_obs = []
with torch.no_grad():
for s in traj:
r = net.cum_return(torch.from_numpy(np.array([s])).float().to(device))[0].item()
rewards_from_obs.append(r)
return rewards_from_obs
def predict_traj_return(net, traj):
return sum(predict_reward_sequence(net, traj))
from tkinter import Tk, Text, TOP, BOTH, X, Y, N, LEFT, RIGHT, Frame, Label, Entry, Scale, HORIZONTAL, Listbox, END, Button, Canvas
"""
from PIL import Image, ImageTk
#from tkinter.ttk import Frame, Label, Entry, Style
import os
import sys
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " <folder>")
sys.exit()
ENCODING_DIMS = 64
folder_name = sys.argv[1]
vals = os.listdir(folder_name)
for nname in vals:
if "zz_run" in nname:
continue
if "_data" in nname:
continue
if ".zip" in nname:
continue
if not nname.endswith(".params"):
continue
file_name = folder_name + "/" + nname
data_name = file_name + "_data/"
if os.path.exists(data_name):
print("Already exists: " + data_name)
continue
os.mkdir(data_name)
state_dict = torch.load(file_name)
action_space_size, encoding_dims_times_two = state_dict['inverse_dynamics1.weight'].shape
if encoding_dims_times_two % 2 != 0:
print("uh ohhhhh")
encoding_dims = encoding_dims_times_two // 2
net = Net(encoding_dims, action_space_size)
#net.cum_return(torch.zeros((1, 84, 84, 4)))
net.load_state_dict(state_dict)
with torch.no_grad():
x = [0] * ENCODING_DIMS
tarray = torch.FloatTensor(x).unsqueeze(dim=0)
decoded = (net.decode(tarray).permute(0, 3, 1, 2).reshape(84*4, 84).numpy() * 255).astype(np.uint8)
img = Image.fromarray(decoded)
img.save(data_name + "zero.png")
first_frames = []
noise_multiplier = 1
with open(data_name + "noise.txt", "w") as f:
f.write("Noise multiplier: " + str(noise_multiplier))
for k in range(4):
for i in range(ENCODING_DIMS):
x[i] = np.random.randn() * noise_multiplier
tarray = torch.FloatTensor(x).unsqueeze(dim=0)
decoded = (net.decode(tarray).permute(0, 3, 1, 2).reshape(84*4, 84).numpy() * 255).astype(np.uint8)
img = Image.fromarray(decoded)
img.save(data_name + "sample_" + str(k) + ".png")
first_frames.append((net.decode(tarray).permute(0, 3, 1, 2)[0][0].numpy() * 255).astype(np.uint8))
Image.fromarray(np.concatenate(first_frames, axis=1)).save(data_name + "first_frame_sample.png")
os.mkdir(data_name + "forward_dynamics")
for k in range(action_space_size):
for i in range(ENCODING_DIMS):
x[i] = np.random.randn() * 2
fwd_name = data_name + "forward_dynamics/action_" + str(k) + "/"
os.mkdir(fwd_name)
tarray = torch.FloatTensor(x).unsqueeze(dim=0)
actions = [0] * action_space_size
actions[k] = 1
taction = torch.FloatTensor(actions).unsqueeze(dim=0)
for l in range(11):
decoded = (net.decode(tarray).permute(0, 3, 1, 2).reshape(84*4, 84).numpy() * 255).astype(np.uint8)
img = Image.fromarray(decoded)
img.save(fwd_name + "index_" + str(l) + ".png")
tarray = net.forward_dynamics(tarray, taction)
tarray = torch.FloatTensor(x).unsqueeze(dim=0)
zero_out = (net.decode(tarray).permute(0, 3, 1, 2).reshape(84*4, 84).numpy() * 255).astype(np.uint8)
best_dims = []
for dim in range(ENCODING_DIMS):
for i in range(ENCODING_DIMS):
x[i] = 0
total_diff = 0
for v in np.linspace(-12, 12, 4):
x[dim] = v
tarray = torch.FloatTensor(x).unsqueeze(dim=0)
decoded = (net.decode(tarray).permute(0, 3, 1, 2).reshape(84*4, 84).numpy() * 255).astype(np.uint8)
total_diff += np.sum(np.absolute(zero_out - decoded))
best_dims.append((dim, total_diff))
best_dims.sort(key=lambda k: -k[1])
with open(data_name + "best_dims.txt", "w") as f:
f.write(str(best_dims))
os.mkdir(data_name + "special_dims")
special = []
if "spaceinvaders" in data_name:
special = [53, 1]
for m in range(5):
if best_dims[m][0] not in special:
special.append(best_dims[m][0])
for sp in special:
spdir = data_name + "special_dims/" + str(sp) + "/"
os.mkdir(spdir)
for i in range(ENCODING_DIMS):
x[i] = 0
index = 0
for v in np.linspace(-12, 12, 6):
x[sp] = v
tarray = torch.FloatTensor(x).unsqueeze(dim=0)
decoded = (net.decode(tarray).permute(0, 3, 1, 2).reshape(84*4, 84).numpy() * 255).astype(np.uint8)
img = Image.fromarray(decoded)
img.save(spdir + str(index) + ".png")
index += 1
"""
#s = Style()
#s.configure('My.Red', background='red')
#s.configure('My.Blue', background='blue')
class Example(Frame):
def __init__(self):
super().__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.initUI()
def set_boxes(self):
raw = [x.strip() for x in self.entry.get().split(",")]
ones_to_add = []
if len(raw[0]) > 0:
for x in raw:
if "-" in x:
start, end = [int(y) for y in x.split("-")]
for k in range(start, end+1):
if k not in ones_to_add:
ones_to_add.append(k)
else:
if int(x) not in ones_to_add:
ones_to_add.append(int(x))
for slider in self.sliders:
if slider[2] and slider[3] not in ones_to_add:
slider[0].pack_forget()
slider[2] = False
elif slider[2] == False and slider[3] in ones_to_add:
slider[0].pack()
slider[2] = True
def update_img(self):
with torch.no_grad():
tarray = torch.FloatTensor(self.slider_data).unsqueeze(dim=0)
decoded = net.decode(tarray).permute(0, 3, 1, 2).reshape(84*4, 84).numpy() * 255
img = ImageTk.PhotoImage(image=Image.fromarray(decoded))
self.canvas.itemconfig(self.image_on_canvas, image=img)
self.canvas.image = img
def make_array_setter(self, array, index):
def ret(value):
array[index] = float(value)
self.update_img()
return ret
def make_set_to_zero(self):
def set_to_zero():
for x in range(0, len(self.slider_data)):
if self.sliders[x][2]:
self.slider_data[x] = 0
self.sliders[x][1].set(0)
self.update_img()
return set_to_zero
def make_set_to_random(self):
def set_to_random():
for x in range(0, len(self.slider_data)):
if self.sliders[x][2]:
self.slider_data[x] = np.random.randn() * 3.5
self.sliders[x][1].set(self.slider_data[x])
self.update_img()
return set_to_random
def initUI(self):
self.master.title("Latent space visualizer")
self.pack(fill=BOTH, expand=True)
#frame1 = Frame(self, bg="red")
#frame1.pack(fill=Y, side=LEFT)
array = np.ones((84*4,84))*200
img = ImageTk.PhotoImage(image=Image.fromarray(array))
#img.pack(fill=Y, side=LEFT, expand=TRUE)
self.canvas = Canvas(self,width=84,height=84*4)
self.canvas.pack(side=LEFT)
self.image_on_canvas = self.canvas.create_image(0, 0, anchor="nw", image=img)
self.canvas.image = img
#lbl1 = Label(frame1, text="Title", width=6)
#lbl1.pack(padx=5, pady=5, expand=True)
frame2 = Frame(self)
frame2.pack(fill=BOTH, expand=True)
list_container = Frame(frame2)
list_container.pack()
Label(list_container, text="Which dims to explore").pack(side=LEFT)
Button(list_container, text="Clear", command=self.make_set_to_zero()).pack(side=RIGHT)
Button(list_container, text="Randomize", command=self.make_set_to_random()).pack(side=RIGHT)
Label(list_container, text="|").pack(side=RIGHT)
Button(list_container, text="Set", command=lambda: self.set_boxes()).pack(side=RIGHT)
self.entry = Entry(list_container)
self.entry.insert(0, "4, 2, 0-" + str(ENCODING_DIMS-1))
self.entry.pack()
slider_container = Frame(frame2)
slider_container.pack()
self.sliders = []
self.slider_data = []
for x in range(0, ENCODING_DIMS):
scale_frame = Frame(slider_container)
Label(scale_frame, text="Dim " + str(x)).pack(side=LEFT)
scale_frame.pack()
self.slider_data.append(0)
scale = Scale(scale_frame, from_=-12.0, to=12.0, length=600, resolution=0.01, orient=HORIZONTAL, command=self.make_array_setter(self.slider_data, x))
self.sliders.append([scale_frame, scale, True, x])
scale.pack()
self.update_img()
" ""
entry1 = Entry(frame1)
entry1.pack(fill=X, padx=5, expand=True)
frame2 = Frame(self)
frame2.pack(fill=X)
lbl2 = Label(frame2, text="Author", width=6)
lbl2.pack(side=LEFT, padx=5, pady=5)
entry2 = Entry(frame2)
entry2.pack(fill=X, padx=5, expand=True)
frame3 = Frame(self)
frame3.pack(fill=BOTH, expand=True)
lbl3 = Label(frame3, text="Review", width=6)
lbl3.pack(side=LEFT, anchor=N, padx=5, pady=5)
txt = Text(frame3)
txt.pack(fill=BOTH, pady=5, padx=5, expand=True)
" ""
def main():
root = Tk()
root.geometry("800x600+300+100")
app = Example()
root.mainloop()
if __name__ == '__main__':
main()
"""
"""
if __name__=="__main__":
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--env_name', default='', help='Select the environment name to run, i.e. pong')
parser.add_argument('--reward_model_path', default='', | |
'sections in model': sec_in_model,
'recorded sections': recorded_sections,
'spacings': spacing, 'max spacing': max(spacing), 'min spacing': min(spacing)}
line_color_coding, puls_method_map, puls_sp_or_up_map = \
{}, {None: 0, 'buckling': 0.5, 'ultimate': 1}, {None:0, 'SP': 0.5, 'UP': 1}
cmap_sections = plt.get_cmap('jet')
thk_sort_unique = return_dict['color code']['all thicknesses']
spacing_sort_unique = return_dict['color code']['spacings']
structure_type_unique = return_dict['color code']['structure types map']
tot_weight, weight_mult_dist_x, weight_mult_dist_y = 0, 0,0
for line, line_data in self._line_to_struc.items():
if self._PULS_results is None:
puls_color, buc_uf, puls_uf, puls_method, puls_sp_or_up = 'black', 0, 0, None, None
elif self._PULS_results.get_utilization(line, self._line_to_struc[line][1].get_puls_method(),
self._new_puls_uf.get()) == None:
puls_color, buc_uf, puls_uf, puls_method, puls_sp_or_up = 'black', 0,0, None, None
else:
puls_method = self._line_to_struc[line][1].get_puls_method()
puls_uf = self._PULS_results.get_utilization(
line, puls_method,
self._new_puls_uf.get())
puls_color = matplotlib.colors.rgb2hex(cmap_sections(puls_uf))
puls_sp_or_up = self._line_to_struc[line][1].get_puls_sp_or_up()
rp_uf = rec_for_color[line]['rp buckling']
tot_uf_rp = max([rec_for_color[line]['fatigue'], rp_uf,
rec_for_color[line]['section modulus'], rec_for_color[line]['shear'],
rec_for_color[line]['plate thickness']])
tot_uf_puls = max([rec_for_color[line]['fatigue'], puls_uf,
rec_for_color[line]['section modulus'], rec_for_color[line]['shear'],
rec_for_color[line]['plate thickness']])
try:
this_pressure = self.get_highest_pressure(line)['normal']
except KeyError:
this_pressure = 0
rp_util = max(list(return_dict['utilization'][line].values()))
res = list()
for stress_list, this_stress in zip([sig_x, sig_y1, sig_y2, tau_xy],
[line_data[1].get_sigma_x(), line_data[1].get_sigma_y1(),
line_data[1].get_sigma_y2(), line_data[1].get_tau_xy()]):
if len(stress_list) == 1:
res.append(1)
elif max(stress_list) == 0 and min(stress_list) == 0:
res.append(0)
elif this_stress < 0:
res.append(this_stress /min(stress_list))
elif this_stress >= 0:
res.append(this_stress/ max(stress_list))
sig_x_uf, sig_y1_uf, sig_y2_uf , tau_xy_uf = res
line_color_coding[line] = {'plate': matplotlib.colors.rgb2hex(cmap_sections(thk_sort_unique.index(round(line_data[1]
.get_pl_thk(),10))/len(thk_sort_unique))),
'spacing': matplotlib.colors.rgb2hex(
cmap_sections(spacing_sort_unique.index(round(line_data[1]
.get_s(), 10)) / len(
spacing_sort_unique))),
'section': matplotlib.colors.rgb2hex(cmap_sections(sec_in_model[line_data[1]
.get_beam_string()]
/len(list(recorded_sections)))),
'structure type': matplotlib.colors.rgb2hex(
cmap_sections(structure_type_unique.index(line_data[1].get_structure_type())
/len(structure_type_unique))),
'pressure color': 'black' if all_pressures in [[0],[0,1]] else matplotlib.colors.rgb2hex(cmap_sections(
this_pressure/highest_pressure)),
'pressure': this_pressure,
'rp uf color': matplotlib.colors.rgb2hex(cmap_sections(rp_util)),
'rp uf': rp_util,
'PULS method': puls_method,
'PULS sp or up':puls_sp_or_up,
'section modulus color': matplotlib.colors.rgb2hex(
cmap_sections(rec_for_color[line]['section modulus'])),
'fatigue color': matplotlib.colors.rgb2hex(
cmap_sections(rec_for_color[line]['fatigue'])),
'Total uf color rp' : matplotlib.colors.rgb2hex(
cmap_sections(tot_uf_rp)),
'Total uf rp': tot_uf_rp,
'Total uf color puls': matplotlib.colors.rgb2hex(
cmap_sections(tot_uf_puls)),
'Total uf puls': tot_uf_puls,
'PULS uf': round(puls_uf,2),
'PULS uf color': puls_color,
'fatigue uf' : rec_for_color[line]['fatigue'],
'section uf' : rec_for_color[line]['section modulus'],
'sigma x': matplotlib.colors.rgb2hex(cmap_sections(sig_x_uf)),
'sigma y1': matplotlib.colors.rgb2hex(cmap_sections(sig_y1_uf)),
'sigma y2': matplotlib.colors.rgb2hex(cmap_sections(sig_y2_uf)),
'tau xy':matplotlib.colors.rgb2hex(cmap_sections(tau_xy_uf)),
}
return_dict['color code']['lines'] = line_color_coding
# COG calculations
# Steel
tot_weight += return_dict['weights'][line]['line weight']
weight_mult_dist_x += return_dict['weights'][line]['line weight']\
*return_dict['weights'][line]['mid_coord'][0]
weight_mult_dist_y += return_dict['weights'][line]['line weight']\
*return_dict['weights'][line]['mid_coord'][1]
tot_cog = [weight_mult_dist_x/tot_weight, weight_mult_dist_y/tot_weight]
else:
tot_cog = [0,0]
tot_weight = 0
return_dict['COG'] = tot_cog
return_dict['Total weight'] = tot_weight
return return_dict
def draw_canvas(self, state = None, event = None):
'''
Canvas is drawn here.
'''
self._main_canvas.delete('all')
color = 'black' #by default
# Drawing the shifted lines
if any([self._new_shift_viz_coord_hor.get()!=0, self._new_shift_viz_coord_ver.get()!= 0]) and self._new_shifted_coords.get():
self._main_canvas.create_line(self._canvas_draw_origo[0]+self._canvas_scale*self._new_shift_viz_coord_hor.get()/1000, 0,
self._canvas_draw_origo[0]+self._canvas_scale*self._new_shift_viz_coord_hor.get()/1000,
self._canvas_dim[1] + 500,
stipple='gray50', fill = 'peru')
self._main_canvas.create_line(0, self._canvas_draw_origo[1]-self._canvas_scale*self._new_shift_viz_coord_ver.get()/1000,
self._canvas_dim[0] + 500,
self._canvas_draw_origo[1]-self._canvas_scale*self._new_shift_viz_coord_ver.get()/1000,
stipple='gray50', fill = 'peru')
else:
# Drawing lines at (0, 0)
self._main_canvas.create_line(self._canvas_draw_origo[0], 0, self._canvas_draw_origo[0], self._canvas_dim[1]+500,
stipple= 'gray50')
self._main_canvas.create_line(0, self._canvas_draw_origo[1], self._canvas_dim[0] +500, self._canvas_draw_origo[1],
stipple='gray50')
self._main_canvas.create_text(self._canvas_draw_origo[0] - 30 * 1,
self._canvas_draw_origo[1] + 12 * 1, text='(0,0)',
font='Text 10')
# Drawing COG and COB
if self._new_show_cog.get():
pt_size = 5
if 'COG' in state.keys():
if self._new_shifted_coords.get():
point_coord_x = self._canvas_draw_origo[0] + (state['COG'][0] +
self._new_shift_viz_coord_hor.get()/1000) * \
self._canvas_scale
point_coord_y = self._canvas_draw_origo[1] - (state['COG'][1] +
self._new_shift_viz_coord_ver.get()/1000) * \
self._canvas_scale
else:
point_coord_x = self._canvas_draw_origo[0] + state['COG'][0]*self._canvas_scale
point_coord_y = self._canvas_draw_origo[1] - state['COG'][1]*self._canvas_scale
self._main_canvas.create_oval(point_coord_x - pt_size + 2,
point_coord_y - pt_size + 2,
point_coord_x + pt_size + 2,
point_coord_y + pt_size + 2, fill='yellow')
self._main_canvas.create_text(point_coord_x + 5,
point_coord_y - 14, text='steel COG: x=' + str(round(state['COG'][0], 2)) +
' y=' +str(round(state['COG'][1],2)),
font=self._text_size["Text 8 bold"], fill='black')
if self._center_of_buoyancy != {}:
for draft, cob in self._center_of_buoyancy.items():
if self._new_shifted_coords.get():
point_coord_x = self._canvas_draw_origo[0] + (cob[1] +
self._new_shift_viz_coord_hor.get() / 1000) * \
self._canvas_scale
point_coord_y = self._canvas_draw_origo[1] - (cob[0] +
self._new_shift_viz_coord_ver.get() / 1000) * \
self._canvas_scale
else:
point_coord_x = self._canvas_draw_origo[0] + cob[1] * self._canvas_scale
point_coord_y = self._canvas_draw_origo[1] - cob[0] * self._canvas_scale
self._main_canvas.create_oval(point_coord_x - pt_size + 2,
point_coord_y - pt_size + 2,
point_coord_x + pt_size + 2,
point_coord_y + pt_size + 2, fill='blue')
self._main_canvas.create_text(point_coord_x + 5,
point_coord_y + 14,
text='COB d='+str(draft) +': x=' + str(round(cob[1], 2)) +
' y=' + str(round(cob[0], 2)),
font=self._text_size["Text 8"], fill='blue')
chk_box_active = [self._new_colorcode_beams.get(), self._new_colorcode_plates.get(),
self._new_colorcode_pressure.get(), self._new_colorcode_utilization.get(),
self._new_colorcode_sigmax.get(), self._new_colorcode_sigmay1.get(), self._new_colorcode_sigmay2.get(),
self._new_colorcode_tauxy.get(), self._new_colorcode_structure_type.get(),
self._new_colorcode_fatigue.get(), self._new_colorcode_section_modulus.get(),
self._new_colorcode_total.get(), self._new_colorcode_puls_acceptance.get(),
self._new_colorcode_puls_sp_or_up.get(), self._new_colorcode_spacing.get()].count(True)> 0
if chk_box_active and state != None:
self.color_code_text(state)
# Drawing shortcut information if selected.
if self._new_shortcut_backdrop.get() == True:
self._main_canvas.create_text(self._main_canvas.winfo_width()*0.87, self._main_canvas.winfo_height()*0.16,
text = self._shortcut_text,
font=self._text_size["Text 8"],
fill = 'black')
# drawing the point dictionary
pt_size = 3
for key, value in self._point_dict.items():
if self._new_shifted_coords.get():
x_coord = round(self.get_point_actual_coord(key)[0] - self._new_shift_viz_coord_hor.get() / 1000, 3)
y_coord = round(self.get_point_actual_coord(key)[1] - self._new_shift_viz_coord_ver.get() / 1000, 3)
coord_color = 'peru'
else:
x_coord = round(self.get_point_actual_coord(key)[0], 3)
y_coord = round(self.get_point_actual_coord(key)[1], 3)
coord_color = 'black'
if self._point_is_active and key == self._active_point :
self._main_canvas.create_oval(self.get_point_canvas_coord(key)[0] - pt_size+2,
self.get_point_canvas_coord(key)[1] - pt_size+2,
self.get_point_canvas_coord(key)[0] + pt_size+2,
self.get_point_canvas_coord(key)[1] + pt_size+2, fill='blue')
if self._new_draw_point_name.get():
# drawing the name of the point
self._main_canvas.create_text(self.get_point_canvas_coord(key)[0] + 5,
self.get_point_canvas_coord(key)[1] - 14, text='pt.'+str(get_num(key)),
font=self._text_size["Text 12 bold"], fill = 'red')
# drawing the coordinates of the point
self._main_canvas.create_text(self.get_point_canvas_coord(key)[0]+30,
self.get_point_canvas_coord(key)[1]-40,
text='(' + str(x_coord) + ' , ' +
str(y_coord) + ')',
font="Text 14", fill = 'red')
else:
self._main_canvas.create_oval(self.get_point_canvas_coord(key)[0] - pt_size,
self.get_point_canvas_coord(key)[1] - pt_size,
self.get_point_canvas_coord(key)[0] + pt_size,
self.get_point_canvas_coord(key)[1] + pt_size, fill='red')
if self._new_draw_point_name.get():
#printing 'pt.#'
self._main_canvas.create_text(self.get_point_canvas_coord(key)[0] + 15,
self.get_point_canvas_coord(key)[1] - 10, text='pt.'+str(get_num(key)),
font="Text 10")
#printing the coordinates of the point
self._main_canvas.create_text(self.get_point_canvas_coord(key)[0]+35,
self.get_point_canvas_coord(key)[1]+10 ,
text='(' + str(x_coord) + ' , ' +
str(y_coord) + ')',
font="Text 10", fill = coord_color)
# drawing the line dictionary.
if len(self._line_dict) != 0:
for line, value in self._line_dict.items():
coord1 = self.get_point_canvas_coord('point' + str(value[0]))
coord2 = self.get_point_canvas_coord('point' + str(value[1]))
if not chk_box_active and state != None:
try:
if self._new_buckling_slider.get() == 2:
if 'black' in state['PULS colors'][line].values():
color = 'black'
else:
col1, col2 = state['PULS colors'][line]['buckling'], \
state['PULS colors'][line]['ultimate']
if self._line_to_struc[line][1].get_puls_method() == 'buckling':
color = 'red' if any([col1 == 'red', col2 == 'red']) else 'green'
else:
color = col2
if color == 'green':
color = 'green' if all([state['colors'][line][key] == 'green' for key in
['fatigue', 'section', 'shear','thickness']]) else 'red'
elif self._new_buckling_slider.get() == 1:
color = 'red' if 'red' in state['colors'][line].values() else 'green'
elif self._new_buckling_slider.get() == 3:
if 'black' in state['ML buckling colors'][line].values():
color = 'black'
else:
col1, col2 = state['ML buckling colors'][line]['buckling'], \
state['ML buckling colors'][line]['ultimate']
if self._line_to_struc[line][1].get_puls_method() == 'buckling':
color = col1
else:
color = col2
if color == 'green':
color = 'green' if all([state['colors'][line][key] == 'green' for key in
['fatigue', 'section', 'shear','thickness']]) else 'red'
except (KeyError, TypeError):
color = 'black'
elif chk_box_active and state != None and self._line_to_struc != {}:
color = self.color_code_line(state, line, coord1, [coord2[0] - coord1[0], coord2[1] - coord1[1]])
else:
color = 'black'
vector = [coord2[0] - coord1[0], coord2[1] - coord1[1]]
# drawing a bold line if it is selected
if line == self._active_line and self._line_is_active:
self._main_canvas.create_line(coord1, coord2, width=6, fill = color)
if self._new_line_name.get():
self._main_canvas.create_text(coord1[0] + vector[0] / 2 + 5, coord1[1] + vector[1] / 2+10,
text='Line ' + str(get_num(line)),
font=self._text_size["Text 10 bold"],
fill = 'red')
else:
self._main_canvas.create_line(coord1, coord2, width=3, fill = color)
if self._new_line_name.get():
self._main_canvas.create_text(coord1[0]-20 + vector[0] / 2 + 5, coord1[1] + vector[1] / 2+10,
text='l.' + str(get_num(line)), font="Text 8", fill = 'black')
if line in self._multiselect_lines:
self._main_canvas.create_text(coord1[0] + vector[0] / 2 +5, coord1[1] + vector[1] / 2 -10,
text=self._new_toggle_var.get(),
font=self._text_size["Text 8 bold"],
fill='orange')
# drawing waterline
if len(self._load_dict) != 0:
for load, data in self._load_dict.items():
if data[0].is_static():
draft = self.get_canvas_coords_from_point_coords((0,data[0].get_static_draft()))[1]
self._main_canvas.create_line(0,draft,self._canvas_dim[0]+500,draft, fill="blue", dash=(4, 4))
self._main_canvas.create_text(900,draft-10,text=str(get_num(data[0].get_name()))+' [m]',fill ='blue')
else:
pass
def color_code_text(self, state):
'''
return_dict['color code'] = {'thickest plate': thickest_plate, 'thickness map': thk_map,
'highest pressure': highest_pressure, 'lowest pressure': lowest_pressure,
'pressure map': press_map,
'all utilizations': all_utils, 'utilization map': util_map,
'max sigma x': max(sig_x), 'min sigma x': min(sig_x), 'sigma x map': sig_x_map,
'max sigma y1': max(sig_y1), 'min sigma y1': min(sig_y1),
'sigma y1 map': sig_y1_map,
'max sigma y2': max(sig_y2), 'min sigma y2': min(sig_y2),
'sigma y2 map': sig_y2_map,
'max tau xy': max(tau_xy), 'min tau xy': min(tau_xy), 'tau_xy map': tau_xy_map,
'structure types map': set(structure_type), 'sections in model': sec_in_model,
'recorded sections': recorded_sections}
}
:param state:
:return:
'''
cc_state = state['color code']
if cc_state == {}:
return
start_text, start_text_shift = 190,191
cmap_sections = plt.get_cmap('jet')
if self._new_colorcode_beams.get() == True and self._line_to_struc != {}:
sec_in_model = cc_state['sections in model']
for section, idx in sec_in_model.items():
if section =='length':
continue
self._main_canvas.create_text(11, start_text_shift+20*idx, text=section,
font=self._text_size["Text 10 bold"],
fill='black',
anchor="nw")
self._main_canvas.create_text(10, start_text+20*idx, text=section,
font=self._text_size["Text 10 bold"],
fill=matplotlib.colors.rgb2hex(cmap_sections(idx/sec_in_model['length'])),
anchor="nw")
elif self._new_colorcode_plates.get() | |
defined one. Note that this may happen
# on a first poll after server side config for a bot has changed. The bot
# doesn't know about new server-assigned dimensions yet in this case. Also
# don't report ['default'], bot sends it in the handshake before it knows
# anything at all.
for dim_key, from_cfg in bot_group_cfg.dimensions.iteritems():
from_bot = sorted(dimensions.get(dim_key) or [])
from_cfg = sorted(from_cfg)
if from_bot and from_bot != ['default'] and from_bot != from_cfg:
logging.warning(
'Dimensions in bots.cfg don\'t match ones provided by the bot\n'
'bot_id: "%s", key: "%s", from_bot: %s, from_cfg: %s',
bot_id, dim_key, from_bot, from_cfg)
dimensions[dim_key] = from_cfg
# Fill in all result fields except 'quarantined_msg'.
result = _ProcessResult(
request=request,
bot_id=bot_id,
version=version,
state=state,
dimensions=dimensions,
bot_group_cfg=bot_group_cfg,
lease_expiration_ts=lease_expiration_ts,
leased_indefinitely=leased_indefinitely,
maintenance_msg=state.get('maintenance'))
# The bot may decide to "self-quarantine" itself. Accept both via
# dimensions or via state. See bot_management._BotCommon.quarantined for
# more details.
if (bool(dimensions.get('quarantined')) or
bool(state.get('quarantined'))):
result.quarantined_msg = 'Bot self-quarantined'
return result
quarantined_msg = None
# Use a dummy 'for' to be able to break early from the block.
for _ in [0]:
quarantined_msg = has_unexpected_keys(
self.EXPECTED_KEYS, request, 'keys')
if quarantined_msg:
break
quarantined_msg = has_missing_keys(
self.REQUIRED_STATE_KEYS, state, 'state')
if quarantined_msg:
break
if not bot_id:
quarantined_msg = 'Missing bot id'
break
if not dimensions.get('pool'):
quarantined_msg = 'Missing \'pool\' dimension'
break
if not all(
config.validate_dimension_key(key) and
isinstance(values, list) and
all(config.validate_dimension_value(value) for value in values) and
len(values) == len(set(values))
for key, values in dimensions.iteritems()):
quarantined_msg = (
'Invalid dimensions type:\n%s' % json.dumps(dimensions,
sort_keys=True, indent=2, separators=(',', ': ')))
break
if quarantined_msg:
line = 'Quarantined Bot\nhttps://%s/restricted/bot/%s\n%s' % (
app_identity.get_default_version_hostname(), bot_id,
quarantined_msg)
ereporter2.log_request(self.request, source='bot', message=line)
result.quarantined_msg = quarantined_msg
return result
# Look for admin enforced quarantine.
if bool(bot_settings and bot_settings.quarantined):
result.quarantined_msg = 'Quarantined by admin'
return result
# TODO(maruel): Parallelise.
bot_root_key = bot_management.get_root_key(bot_id)
task_queues.assert_bot_async(bot_root_key, dimensions).get_result()
return result
class BotHandshakeHandler(_BotBaseHandler):
"""First request to be called to get initial data like bot code version.
The bot is server-controlled so the server doesn't have to support multiple
API version. When running a task, the bot sync the version specific URL.
Once a bot finishes its currently running task, it'll be immediately upgraded
on its next poll.
This endpoint does not return commands to the bot, for example to upgrade
itself. It'll be told so when it does its first poll.
Response body is a JSON dict:
{
"bot_version": <sha-1 of swarming_bot.zip uncompressed content>,
"server_version": "138-193f1f3",
"bot_group_cfg_version": "0123abcdef",
"bot_group_cfg": {
"dimensions": { <server-defined dimensions> },
}
}
"""
@auth.public # auth happens in self._process()
def post(self):
res = self._process()
bot_management.bot_event(
event_type='bot_connected', bot_id=res.bot_id,
external_ip=self.request.remote_addr,
authenticated_as=auth.get_peer_identity().to_bytes(),
dimensions=res.dimensions, state=res.state,
version=res.version, quarantined=bool(res.quarantined_msg),
maintenance_msg=res.maintenance_msg,
task_id='', task_name=None, message=res.quarantined_msg)
data = {
'bot_version': bot_code.get_bot_version(self.request.host_url)[0],
'server_version': utils.get_app_version(),
'bot_group_cfg_version': res.bot_group_cfg.version,
'bot_group_cfg': {
# Let the bot know its server-side dimensions (from bots.cfg file).
'dimensions': res.bot_group_cfg.dimensions,
},
}
if res.bot_group_cfg.bot_config_script_content:
logging.info(
'Injecting %s: %d bytes',
res.bot_group_cfg.bot_config_script,
len(res.bot_group_cfg.bot_config_script_content))
data['bot_config'] = res.bot_group_cfg.bot_config_script_content
self.send_response(data)
class BotPollHandler(_BotBaseHandler):
"""The bot polls for a task; returns either a task, update command or sleep.
In case of exception on the bot, this is enough to get it just far enough to
eventually self-update to a working version. This is to ensure that coding
errors in bot code doesn't kill all the fleet at once, they should still be up
just enough to be able to self-update again even if they don't get task
assigned anymore.
"""
@auth.public # auth happens in self._process()
def post(self):
"""Handles a polling request.
Be very permissive on missing values. This can happen because of errors
on the bot, *we don't want to deny them the capacity to update*, so that the
bot code is eventually fixed and the bot self-update to this working code.
It makes recovery of the fleet in case of catastrophic failure much easier.
"""
logging.debug('Request started')
if config.settings().force_bots_to_sleep_and_not_run_task:
# Ignore everything, just sleep. Tell the bot it is quarantined to inform
# it that it won't be running anything anyway. Use a large streak so it
# will sleep for 60s.
self._cmd_sleep(1000, True)
return
res = self._process()
sleep_streak = res.state.get('sleep_streak', 0)
quarantined = bool(res.quarantined_msg)
# Note bot existence at two places, one for stats at 1 minute resolution,
# the other for the list of known bots.
def bot_event(event_type, task_id=None, task_name=None):
bot_management.bot_event(
event_type=event_type, bot_id=res.bot_id,
external_ip=self.request.remote_addr,
authenticated_as=auth.get_peer_identity().to_bytes(),
dimensions=res.dimensions, state=res.state,
version=res.version, quarantined=quarantined,
maintenance_msg=res.maintenance_msg, task_id=task_id,
task_name=task_name, message=res.quarantined_msg)
# Bot version is host-specific because the host URL is embedded in
# swarming_bot.zip
logging.debug('Fetching bot code version')
expected_version, _ = bot_code.get_bot_version(
self.request.host_url)
if res.version != expected_version:
bot_event('request_update')
self._cmd_update(expected_version)
return
if quarantined:
bot_event('request_sleep')
self._cmd_sleep(sleep_streak, quarantined)
return
# If the server-side per-bot config for the bot has changed, we need
# to restart this particular bot, so it picks up new config in /handshake.
# Do this check only for bots that know about server-side per-bot configs
# already (such bots send 'bot_group_cfg_version' state attribute).
cur_bot_cfg_ver = res.state.get('bot_group_cfg_version')
if cur_bot_cfg_ver and cur_bot_cfg_ver != res.bot_group_cfg.version:
bot_event('request_restart')
self._cmd_bot_restart('Restarting to pick up new bots.cfg config')
return
#
# At that point, the bot should be in relatively good shape since it's
# running the right version. It is still possible that invalid code was
# pushed to the server, so be diligent about it.
#
# If a bot advertise itself with a key state 'maintenance', do not give
# a task to it until this key is removed.
#
# It's an 'hack' because this is not listed in the DB as a separate state,
# which hinders system monitoring. See bot_management.BotInfo. In practice,
# ts_mon_metrics.py can look a BotInfo.get('maintenance') to determine if a
# bot is in maintenance or idle.
if res.state.get('maintenance'):
bot_event('request_sleep')
# Tell the bot it's considered quarantined.
self._cmd_sleep(sleep_streak, True)
return
# The bot is in good shape. Try to grab a task.
try:
# This is a fairly complex function call, exceptions are expected.
request, secret_bytes, run_result = task_scheduler.bot_reap_task(
res.dimensions, res.version, res.lease_expiration_ts)
if not request:
# No task found, tell it to sleep a bit.
bot_event('request_sleep')
self._cmd_sleep(sleep_streak, quarantined)
return
try:
# This part is tricky since it intentionally runs a transaction after
# another one.
if request.task_slice(
run_result.current_task_slice).properties.is_terminate:
bot_event('bot_terminate', task_id=run_result.task_id)
self._cmd_terminate(run_result.task_id)
else:
bot_event(
'request_task', task_id=run_result.task_id,
task_name=request.name)
self._cmd_run(
request, secret_bytes, run_result, res.bot_id, res.os,
res.bot_group_cfg)
except:
logging.exception('Dang, exception after reaping')
raise
except runtime.DeadlineExceededError:
# If the timeout happened before a task was assigned there is no problems.
# If the timeout occurred after a task was assigned, that task will
# timeout (BOT_DIED) since the bot didn't get the details required to
# run it) and it will automatically get retried (TODO) when the task times
# out.
# TODO(maruel): Note the task if possible and hand it out on next poll.
# https://code.google.com/p/swarming/issues/detail?id=130
self.abort(500, 'Deadline')
def _cmd_run(
self, request, secret_bytes, run_result, bot_id, oses, bot_group_cfg):
logging.info('Run: %s', request.task_id)
props = request.task_slice(run_result.current_task_slice).properties
caches = [c.to_dict() for c in props.caches]
names = [c.name for c in props.caches]
pool = props.dimensions['pool'][0]
# Warning: this is doing a DB GET on the cold path, which will increase the
# reap failure.
for i, hint in enumerate(named_caches.get_hints(pool, oses, names)):
caches[i]['hint'] = str(hint)
out = {
'cmd': 'run',
'manifest': {
'bot_id': bot_id,
'bot_authenticated_as': auth.get_peer_identity().to_bytes(),
'caches': caches,
'cipd_input': {
'client_package': props.cipd_input.client_package.to_dict(),
'packages': [p.to_dict() for p in props.cipd_input.packages],
'server': props.cipd_input.server,
} if props.cipd_input else None,
'command': props.command,
'dimensions': props.dimensions,
'env': props.env,
'env_prefixes': props.env_prefixes,
'extra_args': props.extra_args,
'grace_period': props.grace_period_secs,
'hard_timeout': props.execution_timeout_secs,
'host': utils.get_versioned_hosturl(),
'io_timeout': props.io_timeout_secs,
'secret_bytes': (secret_bytes.secret_bytes.encode('base64')
if secret_bytes else None),
'isolated': {
'input': props.inputs_ref.isolated,
'namespace': props.inputs_ref.namespace,
'server': props.inputs_ref.isolatedserver,
} if props.inputs_ref else None,
'outputs': props.outputs,
'relative_cwd': props.relative_cwd,
'service_accounts': {
'system': {
# 'none', 'bot' or email. Bot interprets 'none' and 'bot' locally.
# When it sees something else, it uses /oauth_token API endpoint to
# grab tokens through server.
'service_account': bot_group_cfg.system_service_account or 'none',
},
'task': {
# Same here.
'service_account': request.service_account,
},
},
'task_id': task_pack.pack_run_result_key(run_result.key),
},
}
self.send_response(utils.to_json_encodable(out))
def _cmd_sleep(self, sleep_streak, quarantined):
duration = task_scheduler.exponential_backoff(sleep_streak)
logging.debug(
'Sleep: streak: %d; duration: %ds; quarantined: %s',
sleep_streak, duration, | |
if X_embedding is None:
if vkey == "velocity_S":
X_embedding = adata.obsm["X_" + basis]
else:
adata = reduceDimension(adata, layer=layer, reduction_method=basis)
X_embedding = adata.obsm[layer + "_" + basis]
if X.shape[0] != X_embedding.shape[0] and X.shape[1] > X_embedding.shape[1]:
raise Exception(f"X and X_embedding doesn't have the same sample dimension or "
f"X doesn't have the higher feature dimension!")
V_mat = V_mat.A if issparse(V_mat) else V_mat
X = X.A if issparse(X) else X
finite_inds = get_finite_inds(V_mat)
X, V_mat = X[:, finite_inds], V_mat[:, finite_inds]
if method == 'kmc' and n_pca_components is None: n_pca_components = 30
if n_pca_components is not None:
X = log1p_(adata, X)
X_plus_V = log1p_(adata, X + V_mat)
if (
"velocity_pca_fit" not in adata.uns_keys()
or type(adata.uns["velocity_pca_fit"]) == str
):
pca = PCA(
n_components=min(n_pca_components, X.shape[1] - 1),
svd_solver="arpack",
random_state=0,
)
pca_fit = pca.fit(X)
X_pca = pca_fit.transform(X)
adata.uns["velocity_pca_fit"] = pca_fit
adata.uns["velocity_PCs"] = pca_fit.components_.T
adata.obsm["X_velocity_pca"] = X_pca
X_pca, PCs, pca_fit = (
adata.obsm["X_velocity_pca"],
adata.uns["velocity_PCs"],
adata.uns["velocity_pca_fit"],
)
Y_pca = pca_fit.transform(X_plus_V)
V_pca = Y_pca - X_pca
# V_pca = (V_mat - V_mat.mean(0)).dot(PCs)
adata.obsm["velocity_pca_raw"] = V_pca
X, V_mat = X_pca[:, :n_pca_components], V_pca[:, :n_pca_components]
if neighbors_from_basis:
if X.shape[0] > 200000 and X.shape[1] > 2:
from pynndescent import NNDescent
nbrs = NNDescent(X, metric='eulcidean', n_neighbors=30, n_jobs=-1,
random_state=19490110, **kwargs)
indices, _ = nbrs.query(X, k=30)
else:
alg = "ball_tree" if X.shape[1] > 10 else 'kd_tree'
nbrs = NearestNeighbors(n_neighbors=30, algorithm=alg, n_jobs=-1).fit(X)
_, indices = nbrs.kneighbors(X)
# add both source and sink distribution
if method == "kmc":
if method + '_transition_matrix' in adata.uns_keys() and not enforce:
T = adata.uns[method + '_transition_matrix']
kmc = KernelMarkovChain(P=T)
else:
kmc = KernelMarkovChain()
kmc_args = {
"n_recurse_neighbors": 2,
"M_diff": 2,
"epsilon": None,
"adaptive_local_kernel": True,
"tol": 1e-7,
}
kmc_args = update_dict(kmc_args, kmc_kwargs)
if method + '_transition_matrix' not in adata.uns_keys() or not enforce:
kmc.fit(
X,
V_mat,
neighbor_idx=indices,
sample_fraction=sample_fraction,
**kmc_args
) #
T = kmc.P
if correct_density:
delta_X = kmc.compute_density_corrected_drift(
X_embedding, kmc.Idx, normalize_vector=True, scale=scale
) # indices, k = 500
else:
delta_X = kmc.compute_drift(
X_embedding, num_prop=1, scale=scale
) # indices, k = 500
# P = kmc.compute_stationary_distribution()
# adata.obs['stationary_distribution'] = P
X_grid, V_grid, D = velocity_on_grid(
X_embedding, delta_X, xy_grid_nums=xy_grid_nums
)
if calc_rnd_vel:
kmc = KernelMarkovChain()
permute_rows_nsign(V_mat)
kmc.fit(X, V_mat, **kmc_args) # neighbor_idx=indices,
T_rnd = kmc.P
if correct_density:
delta_X_rnd = kmc.compute_density_corrected_drift(
X_embedding, kmc.Idx, normalize_vector=True
) # indices, k = 500
else:
delta_X_rnd = kmc.compute_drift(X_embedding)
# P_rnd = kmc.compute_stationary_distribution()
# adata.obs['stationary_distribution_rnd'] = P_rnd
X_grid_rnd, V_grid_rnd, D_rnd = velocity_on_grid(
X_embedding, delta_X_rnd, xy_grid_nums=xy_grid_nums
)
adata.uns["kmc"] = kmc
elif method in ["pearson", "cosine"]:
vs_kwargs = {"n_recurse_neighbors": 2,
"max_neighs": None,
"transform": 'sqrt',
"use_neg_vals": True,
}
vs_kwargs = update_dict(vs_kwargs, other_kernels_dict)
if method + '_transition_matrix' in adata.uns_keys() and not enforce:
T = adata.uns[method + '_transition_matrix']
delta_X = projection_with_transition_matrix(X.shape[0], T, X_embedding)
X_grid, V_grid, D = velocity_on_grid(
X_embedding[:, :2], (X_embedding + delta_X)[:, :2], xy_grid_nums=xy_grid_nums
)
else:
T, delta_X, X_grid, V_grid, D = kernels_from_velocyto_scvelo(
X, X_embedding, V_mat, indices, neg_cells_trick, xy_grid_nums, neighbors,
method, **vs_kwargs
)
if calc_rnd_vel:
permute_rows_nsign(V_mat)
T_rnd, delta_X_rnd, X_grid_rnd, V_grid_rnd, D_rnd = kernels_from_velocyto_scvelo(
X, X_embedding, V_mat, indices, neg_cells_trick, xy_grid_nums, neighbors,
method, **vs_kwargs
)
elif method == "transform":
umap_trans, n_pca_components = (
adata.uns["umap_fit"]["fit"],
adata.uns["umap_fit"]["n_pca_components"],
)
if "pca_fit" not in adata.uns_keys() or type(adata.uns["pca_fit"]) == str:
CM = adata.X[:, adata.var.use_for_dynamics.values]
from ..preprocessing.utils import pca
adata, pca_fit, X_pca = pca(adata, CM, n_pca_components, "X")
adata.uns["pca_fit"] = pca_fit
X_pca, pca_fit = adata.obsm["X"], adata.uns["pca_fit"]
V = (
adata[:, adata.var.use_for_dynamics.values].layers[vkey]
if vkey in adata.layers.keys()
else None
)
CM, V = CM.A if issparse(CM) else CM, V.A if issparse(V) else V
V[np.isnan(V)] = 0
Y_pca = pca_fit.transform(CM + V)
Y = umap_trans.transform(Y_pca)
delta_X = Y - X_embedding
X_grid, V_grid, D = velocity_on_grid(
X_embedding, delta_X, xy_grid_nums=xy_grid_nums
),
if preserve_len:
basis_len, high_len = np.linalg.norm(delta_X, axis=1), np.linalg.norm(V_mat, axis=1)
scaler = np.nanmedian(basis_len) / np.nanmedian(high_len)
for i in tqdm(range(adata.n_obs), desc=f"rescaling velocity norm..."):
idx = T[i].indices
high_len_ = high_len[idx]
T_i = T[i].data
delta_X[i] *= T_i.dot(high_len_) / basis_len[i] * scaler
if key is None:
adata.uns[method + "_transition_matrix"] = T
adata.obsm["velocity_" + basis] = delta_X
adata.uns["grid_velocity_" + basis] = {"X_grid": X_grid, "V_grid": V_grid, "D": D}
else:
adata.uns[key + '_' + method + "_transition_matrix"] = T
adata.obsm[key + '_' + basis] = delta_X
adata.uns["grid_" + key + '_' + basis] = {"X_grid": X_grid, "V_grid": V_grid, "D": D}
if calc_rnd_vel:
if key is None:
adata.uns[method + "_transition_matrix_rnd"] = T_rnd
adata.obsm["X_" + basis + "_rnd"] = X_embedding
adata.obsm["velocity_" + basis + "_rnd"] = delta_X_rnd
adata.uns["grid_velocity_" + basis + "_rnd"] = {
"X_grid": X_grid_rnd,
"V_grid": V_grid_rnd,
"D": D_rnd,
}
else:
adata.uns[key + '_' + method + "_transition_matrix_rnd"] = T_rnd
adata.obsm["X_" + key + "_" + basis + "_rnd"] = X_embedding
adata.obsm[key + "_" + basis + "_rnd"] = delta_X_rnd
adata.uns["grid_" + key + '_' + basis + "_rnd"] = {
"X_grid": X_grid_rnd,
"V_grid": V_grid_rnd,
"D": D_rnd,
}
return adata
def cell_accelerations(adata,
vf_basis='pca',
basis='umap',
enforce=True,
preserve_len=True,
other_kernels_dict={},
**kwargs):
"""Compute transition probability and project high dimension acceleration vector to existing low dimension embedding.
In classical physics, including fluidics and aerodynamics, velocity and acceleration vector fields are used as
fundamental tools to describe motion or external force of objects, respectively. In analogy, RNA velocity or
accelerations estimated from single cells can be regarded as samples in the velocity (La Manno et al. 2018) or
acceleration vector field (Gorin, Svensson, and Pachter 2019). In general, a vector field can be defined as a
vector-valued function f that maps any points (or cells’ expression state) x in a domain Ω with D dimension (or the
gene expression system with D transcripts / proteins) to a vector y (for example, the velocity or acceleration for
different genes or proteins), that is f(x) = y.
In two or three dimensions, a velocity vector field is often visualised as a quiver plot where a collection of arrows
with a given magnitude and direction is drawn. For example, the velocity estimates of unspliced transcriptome of
sampled cells projected into two dimensions is drawn to show the prediction of the future cell states in RNA velocity
(La Manno et al. 2018). During the differentiation process, external signal factors perturb cells and thus change
the vector field. Since we perform genome-wide profiling of cell states and the experiments performed are often done
in a short time scale, we assume a constant vector field without loss of generality (See also Discussion). Assuming
an asymptotic deterministic system, the trajectory of the cells travelling in the gene expression space follows the
vector field and can be calculated using numerical integration methods, for example Runge-Kutta algorithm. In two or
three dimensions, a streamline plot can be used to visualize the paths of cells will follow if released in different
regions of the gene expression state space under a steady flow field. Another more intuitive way to visualize the
structure of vector field is the so called line integral convolution method or LIC (Cabral and Leedom 1993), which
works by adding random black-and-white paint sources on the vector field and letting the flowing particle on the
vector field picking up some texture to ensure the same streamline having similar intensity. Although we have not
provides such functionalities in dynamo, with vector field that changes over time, similar methods, for example,
streakline, pathline, timeline, etc. can be used to visualize the evolution of single cell or cell populations.
Arguments
---------
adata: :class:`~anndata.AnnData`
an Annodata object.
vf_basis: 'int' (optional, default `pca`)
The dictionary key that corresponds to the low dimensional embedding where the vector field function
reconstructed.
basis: 'int' (optional, default `umap`)
The dictionary key that corresponds to the reduced dimension in `.obsm` attribute.
enforce: `bool` (default: `False`)
Whether to enforce 1) redefining use_for_velocity column in obs attribute;
2) recalculation of transition matrix.
preserve_len: `bool` (default: `True`)
Whether to preserve the length of high dimension vector length. When set to be True, the length of low
dimension projected vector will be proportionally scaled to that of the high dimensional vector. Note that
when preserve_len is set to be True, the acceleration field may seem to be | |
" to method proxy_get_namespaced_node_19" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_get_namespaced_node_19`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `proxy_get_namespaced_node_19`")
resource_path = '/api/v1/proxy/nodes/{name}/{path}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'path' in params:
path_params['path'] = params['path']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_head_namespaced_node_20(self, name, path, **kwargs):
"""
proxy HEAD requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_head_namespaced_node_20(name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_head_namespaced_node_20" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_head_namespaced_node_20`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `proxy_head_namespaced_node_20`")
resource_path = '/api/v1/proxy/nodes/{name}/{path}'.replace('{format}', 'json')
method = 'HEAD'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'path' in params:
path_params['path'] = params['path']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_put_namespaced_node_21(self, name, path, **kwargs):
"""
proxy PUT requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_put_namespaced_node_21(name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_put_namespaced_node_21" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_put_namespaced_node_21`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `proxy_put_namespaced_node_21`")
resource_path = '/api/v1/proxy/nodes/{name}/{path}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'path' in params:
path_params['path'] = params['path']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_post_namespaced_node_22(self, name, path, **kwargs):
"""
proxy POST requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_post_namespaced_node_22(name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_post_namespaced_node_22" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_post_namespaced_node_22`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `proxy_post_namespaced_node_22`")
resource_path = '/api/v1/proxy/nodes/{name}/{path}'.replace('{format}', 'json')
method = 'POST'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'path' in params:
path_params['path'] = params['path']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_delete_namespaced_node_23(self, name, path, **kwargs):
"""
proxy DELETE requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_delete_namespaced_node_23(name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_delete_namespaced_node_23" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_delete_namespaced_node_23`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `proxy_delete_namespaced_node_23`")
resource_path = '/api/v1/proxy/nodes/{name}/{path}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'path' in params:
path_params['path'] = params['path']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_options_namespaced_node_24(self, name, path, **kwargs):
"""
proxy OPTIONS requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_options_namespaced_node_24(name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params | |
<gh_stars>10-100
import argparse
import glob
import logging
import os
import subprocess
from collections import defaultdict
from datetime import datetime
import pandas as pd
from tqdm import tqdm
from nanocompare.global_config import set_log_debug_level, pic_base_dir, logger
run_log_dir = '/projects/li-lab/Nanopore_compare/result/running-logs'
basedir = run_log_dir
tool_names = ['Tombo', 'DeepMod', 'DeepSignal', 'Nanopolish', 'Megalodon']
tool_list_on_sumner = ['Tombo', 'DeepMod', 'DeepSignal', 'Nanopolish']
tool_list_on_winter = ['Guppy', 'Megalodon']
ntarget_dict = {'HL60': 50, 'K562': 50, 'APL': 50, 'NA19240': 50, 'NA12878': 168}
pkldir = '/projects/li-lab/yang/results/share_prj/result/running-logs'
sunmer_pkl = os.path.join(pkldir, 'sumner.task.resource.summary.pkl')
winter_pkl = os.path.join(pkldir, 'winter.task.resource.summary.pkl')
batch_fast5_pkl = os.path.join(pkldir, 'dsname.batch.fast5.summary.pkl')
def get_jobid_and_taskid(fn):
"""
Sample input file name bascal.Guppy.K562.N50.batch49.22818.err
:param fn:
:return:
"""
fn = os.path.basename(fn)
last1_index = fn.rindex('.')
last2_index = fn[:last1_index].rindex('.')
taskid = fn[last2_index + 1: last1_index]
last3_index = fn[:last2_index].rindex('.')
batchstr = fn[last3_index + 1:last2_index].replace('batch', '')
return taskid, batchstr
def winter_task_summary():
dataset = defaultdict(list)
for dsname in ntarget_dict:
logging.info(dsname)
## Resquiggle collection
# HL60-Runs/HL60-N50-basecall/log
basecall_logdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-N{ntarget_dict[dsname]}-basecall', 'log')
pat_fns = os.path.join(basecall_logdir, 'bascal.*.out')
fnlist = glob.glob(pat_fns)
logging.info(f'Basecall collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
command = f"""
seff {taskid}
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
dataset['dsname'].append(dsname)
dataset['batchid'].append(int(batchid))
dataset['type'].append('basecall')
dataset['job.results'].append(ret)
## Tool methcall collection
for tool in tool_list_on_winter:
methcall_logdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-{tool}-N{ntarget_dict[dsname]}',
f'{dsname}-{tool}-N{ntarget_dict[dsname]}-methcall', 'log')
pat_fns = os.path.join(methcall_logdir, '*.mcal.*.batch*.*.out')
fnlist = glob.glob(pat_fns)
logging.info(f'Methcall of {tool} collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
# logging.debug(taskid)
command = f"""
seff {taskid}
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
dataset['dsname'].append(dsname)
dataset['batchid'].append(int(batchid))
dataset['type'].append(tool)
dataset['job.results'].append(ret)
df = pd.DataFrame.from_dict(dataset)
outfn = os.path.join(pic_base_dir, 'winter.task.resource.summary.pkl')
df.to_pickle(outfn)
logging.info(f'save to {outfn}')
outfn = os.path.join(pic_base_dir, 'winter.task.resource.summary.xlsx')
df.to_excel(outfn)
def winter_task_summary_na19240():
dsname = 'NA19240'
dataset = defaultdict(list)
logging.info(dsname)
## Resquiggle collection
# HL60-Runs/HL60-N50-basecall/log
basecall_logdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-N{ntarget_dict[dsname]}-basecall', 'log')
pat_fns = os.path.join(basecall_logdir, 'bascal.*.out')
fnlist = glob.glob(pat_fns)
logging.info(f'Basecall collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
command = f"""
seff {taskid}
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
rt_secs, mem_gb, jobid, array_jobid = str_extract_time_mem(ret)
if not rt_secs: # Failed tasks are not consider
continue
dataset['dsname'].append(dsname)
dataset['tool'].append('basecall')
dataset['batchid'].append(int(batchid))
dataset['running.time.seconds'].append(rt_secs)
dataset['mem.usage.gb'].append(mem_gb)
dataset['jobid'].append(jobid)
dataset['array.jobid'].append(array_jobid)
dataset['job.results'].append(ret)
## Tool methcall collection
for tool in tool_list_on_winter:
methcall_logdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-{tool}-N{ntarget_dict[dsname]}',
f'{dsname}-{tool}-N{ntarget_dict[dsname]}-methcall', 'log')
pat_fns = os.path.join(methcall_logdir, '*.mcal.*.batch*.*.out')
fnlist = glob.glob(pat_fns)
logging.info(f'Methcall of {tool} collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
# logging.debug(taskid)
command = f"""
seff {taskid}
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
rt_secs, mem_gb, jobid, array_jobid = str_extract_time_mem(ret)
if not rt_secs: # Failed tasks are not consider
continue
dataset['dsname'].append(dsname)
dataset['tool'].append(tool)
dataset['batchid'].append(int(batchid))
dataset['running.time.seconds'].append(rt_secs)
dataset['mem.usage.gb'].append(mem_gb)
dataset['jobid'].append(jobid)
dataset['array.jobid'].append(array_jobid)
dataset['job.results'].append(ret)
df = pd.DataFrame.from_dict(dataset)
outfn = os.path.join(pic_base_dir, 'na19240.winter.task.resource.summary.pkl')
df.to_pickle(outfn)
logging.info(f'save to {outfn}')
outfn = os.path.join(pic_base_dir, 'na19240.winter.task.resource.summary.xlsx')
df.to_excel(outfn)
def winter_megalodon_task_summary():
dataset = defaultdict(list)
for dsname in ntarget_dict:
logging.info(dsname)
methdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-Megalodon-N{ntarget_dict[dsname]}',
f'{dsname}-Megalodon-N{ntarget_dict[dsname]}-methcall', 'log')
pat_fns = os.path.join(methdir, '*.mcal.*.out')
fnlist = glob.glob(pat_fns)
logging.info(f'Megalodon of {dsname} collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
command = f"""
seff {taskid}
"""
jobret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
dataset['dsname'].append(dsname)
dataset['batchid'].append(int(batchid))
dataset['tool'].append('Megalodon')
dataset['job.results'].append(jobret)
runtime_seconds, mem_gb, jobid, array_jobid = str_extract_time_mem(jobret)
dataset['running.time.seconds'].append(runtime_seconds)
dataset['mem.usage.gb'].append(mem_gb)
dataset['jobid'].append(jobid)
dataset['array.jobid'].append(array_jobid)
df1 = pd.DataFrame.from_dict(dataset)
df2 = pd.read_pickle(batch_fast5_pkl)
df = df1.merge(df2, on=['dsname', 'batchid'], how='left')
dfout = df[
['dsname', 'tool', 'batchid', 'jobid', 'array.jobid', 'fast5', 'running.time.seconds', 'mem.usage.gb',
'job.results']]
outfn = os.path.join(pic_base_dir, 'recalculate.running.summary.Megalodon.xlsx')
dfout.to_excel(outfn)
dfout = df[
['dsname', 'tool', 'batchid', 'jobid', 'array.jobid', 'fast5', 'running.time.seconds', 'mem.usage.gb']]
outfn = os.path.join(pic_base_dir, 'recalculate.running.summary.Megalodon.csv')
dfout.to_csv(outfn)
def sunmer_task_summary():
dataset = defaultdict(list)
for dsname in ntarget_dict:
logging.info(dsname)
## Resquiggle collection
# HL60-Runs/HL60-N50-basecall/log
basecall_logdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-N{ntarget_dict[dsname]}-resquiggle', 'log')
pat_fns = os.path.join(basecall_logdir, 'rsquigl.*.out')
fnlist = glob.glob(pat_fns)
logging.info(f'Resquiggle collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
command = f"""
seff {taskid}
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
dataset['dsname'].append(dsname)
dataset['batchid'].append(int(batchid))
dataset['type'].append('resquiggle')
dataset['job.results'].append(ret)
## Tool methcall collection
for tool in tool_list_on_sumner:
# HL60-Runs/HL60-Nanopolish-N50/HL60-Nanopolish-N50-methcall/log
methcall_logdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-{tool}-N{ntarget_dict[dsname]}',
f'{dsname}-{tool}-N{ntarget_dict[dsname]}-methcall', 'log')
# logging.debug(meth_logdir)
pat_fns = os.path.join(methcall_logdir, '*.mcal.*.batch*.*.out')
fnlist = glob.glob(pat_fns)
logging.info(f'Methcall of {tool} collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
# logging.debug(taskid)
command = f"""
seff {taskid}
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
dataset['dsname'].append(dsname)
dataset['batchid'].append(int(batchid))
dataset['type'].append(tool)
dataset['job.results'].append(ret)
df = pd.DataFrame.from_dict(dataset)
# logging.info(df)
outfn = os.path.join(pic_base_dir, 'sumner.task.resource.summary.pkl')
df.to_pickle(outfn)
logging.info(f'save to {outfn}')
outfn = os.path.join(pic_base_dir, 'sumner.task.resource.summary.xlsx')
df.to_excel(outfn)
def sunmer_task_summary_na19240():
dsname = 'NA19240'
dataset = defaultdict(list)
resquiggle_dir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-N{ntarget_dict[dsname]}-resquiggle', 'log')
pat_fns = os.path.join(resquiggle_dir, 'rsquigl.*.out')
logger.debug(f'pat_fns={pat_fns}')
fnlist = glob.glob(pat_fns)
logging.info(f'Resquiggle collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
command = f"""
seff {taskid}
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
rt_secs, mem_gb, jobid, array_jobid = str_extract_time_mem(ret)
if not rt_secs: # Failed tasks are not consider
continue
dataset['dsname'].append(dsname)
dataset['tool'].append('resquiggle')
dataset['batchid'].append(int(batchid))
dataset['running.time.seconds'].append(rt_secs)
dataset['mem.usage.gb'].append(mem_gb)
dataset['jobid'].append(jobid)
dataset['array.jobid'].append(array_jobid)
dataset['job.results'].append(ret)
## Tool methcall collection
for tool in tool_list_on_sumner:
# HL60-Runs/HL60-Nanopolish-N50/HL60-Nanopolish-N50-methcall/log
methcall_logdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-{tool}-N{ntarget_dict[dsname]}',
f'{dsname}-{tool}-N{ntarget_dict[dsname]}-methcall', 'log')
# logging.debug(meth_logdir)
pat_fns = os.path.join(methcall_logdir, '*.mcal.*.batch*.*.out')
fnlist = glob.glob(pat_fns)
logging.info(f'Methcall of {tool} collect: {len(fnlist)}')
for fn in tqdm(fnlist):
taskid, batchid = get_jobid_and_taskid(fn)
# logging.debug(taskid)
command = f"""
seff {taskid}
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
rt_secs, mem_gb, jobid, array_jobid = str_extract_time_mem(ret)
if not rt_secs: # Failed tasks are not consider
continue
dataset['dsname'].append(dsname)
dataset['tool'].append(tool)
dataset['batchid'].append(int(batchid))
dataset['running.time.seconds'].append(rt_secs)
dataset['mem.usage.gb'].append(mem_gb)
dataset['jobid'].append(jobid)
dataset['array.jobid'].append(array_jobid)
dataset['job.results'].append(ret)
df = pd.DataFrame.from_dict(dataset)
# logging.info(df)
outfn = os.path.join(pic_base_dir, 'na19240.sumner.task.resource.summary.pkl')
df.to_pickle(outfn)
logging.info(f'save to {outfn}')
outfn = os.path.join(pic_base_dir, 'na19240.sumner.task.resource.summary.xlsx')
df.to_excel(outfn)
def dataset_batch_summary():
dataset = defaultdict(list)
for dsname in ntarget_dict:
logging.info(dsname)
for batchid in range(1, ntarget_dict[dsname] + 1):
septdir = os.path.join(basedir, f'{dsname}-Runs', f'{dsname}-N{ntarget_dict[dsname]}-sept', f'{batchid}')
command = f"""
ls {septdir}/*.fast5 | wc -l
"""
ret = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
logging.debug(f'batchid={batchid}, ret={ret}')
dataset['dsname'].append(dsname)
dataset['batchid'].append(batchid)
dataset['fast5'].append(int(ret.strip()))
df = pd.DataFrame.from_dict(dataset)
outfn = os.path.join(pic_base_dir, 'dsname.batch.fast5.summary.pkl')
df.to_pickle(outfn)
logging.info(f'save to {outfn}')
outfn = os.path.join(pic_base_dir, 'dsname.batch.fast5.summary.xlsx')
df.to_excel(outfn)
def parse_time(time_string):
start_time = datetime.strptime("00:00:00", '%H:%M:%S')
try:
end_time = datetime.strptime(time_string, '%H:%M:%S')
except:
end_time = datetime.strptime(time_string, '%d-%H:%M:%S')
duration_time = end_time - start_time
return duration_time
pass
def str_extract_time_mem(jobret):
"""
Example:
Job ID: 23443
Array Job ID: 23398_23
Cluster: winter
User/Group: liuya/jaxuser
State: COMPLETED (exit code 0)
Nodes: 1
Cores per node: 16
CPU Utilized: 00:24:01
CPU Efficiency: 9.26% of 04:19:28 core-walltime
Job Wall-clock time: 00:16:13
Memory Utilized: 2.52 GB
Memory Efficiency: 0.16% of 1.56 TB
:param jobret:
:return:
"""
cpu_use_time = None
wall_clock_time = None
mem_use = None
jobid = None
array_jobid = None
for line in jobret.splitlines():
# if line.strip().startswith('State: F') or line.strip().startswith('State: CANCELLED'):
# return None, None, None, None
if line.strip().startswith('Job ID:'):
jobid = line.strip().replace('Job ID:', '').strip()
if line.strip().startswith('Array Job ID:'):
array_jobid = line.strip().replace('Array Job ID:', '').strip()
if line.strip().startswith('CPU Utilized:'):
cpu_use_time = line.strip().replace('CPU Utilized:', '').strip()
elif line.strip().startswith('Memory Utilized:'):
mem_use = line.strip().replace('Memory Utilized:', '').strip()
elif line.strip().startswith('Job Wall-clock time:'):
wall_clock_time = line.strip().replace('Job Wall-clock time:', '').strip()
cpu_use_time = parse_time(cpu_use_time)
wall_clock_time = parse_time(wall_clock_time)
# 71.49 MB, we report GB
if mem_use.endswith('MB'):
mem_gb = float(mem_use.replace('MB', '').strip()) / 1000
elif mem_use.endswith('GB'):
mem_gb = float(mem_use.replace('GB', '').strip())
else:
raise Exception(f'Unrecognized mem={mem_use} from jobret={jobret}')
return cpu_use_time.total_seconds(), wall_clock_time.total_seconds(), mem_gb, jobid
def running_resouce_extraction(row):
"""
:param row:
:return:
"""
jobret = row['job.results']
cpu_time, wall_clock_time, mem_gb, jobid = str_extract_time_mem(jobret)
return pd.Series([cpu_time / 60 / 60, wall_clock_time / 60 / 60, mem_gb, jobid],
index=['cpu.time', 'wall.clock.time', 'mem.usage', 'jobid'])
def unify_data_df():
df1 = pd.read_pickle(winter_pkl)
df2 = pd.read_pickle(sunmer_pkl)
df3 = pd.read_pickle(batch_fast5_pkl)
df = pd.concat([df1, df2])
df = df.merge(df3, on=['dsname', 'batchid'], how='left')
df[['running.time', 'mem.usage', 'running.time.seconds', 'mem.usage.gb']] = df.apply(running_resouce_extraction,
axis=1)
logger.info(df)
logger.info(list(df.columns))
run_report_columns = ['dsname', 'batchid', 'type', 'fast5', 'running.time', 'mem.usage', 'running.time.seconds',
'mem.usage.gb', 'job.results']
outdf = df[run_report_columns]
outfn = os.path.join(pic_base_dir, 'running.summary.table.xlsx')
outdf.to_excel(outfn)
logger.info(f'save to {outfn}')
outdf = df[run_report_columns[:-1]]
outfn = os.path.join(pic_base_dir, 'running.summary.table.csv')
outdf.to_csv(outfn)
logger.info(f'save to {outfn}')
def recalculate(fnlist=['na19240.sumner.task.resource.summary.pkl', 'na19240.winter.task.resource.summary.pkl']):
dflist = []
for fn in fnlist:
dflist.append(pd.read_pickle(os.path.join(pkldir, fn)))
df = pd.concat(dflist)
df3 = pd.read_pickle(batch_fast5_pkl)
df = df.merge(df3, on=['dsname', 'batchid'], how='left')
logger.debug(df)
dataset = defaultdict(list)
for index, row in df.iterrows():
if row['tool'] not in tool_list_on_sumner + tool_list_on_winter:
continue
dsname = row['dsname']
batchid = row['batchid']
runt = row['running.time.seconds']
memg = row['mem.usage.gb']
basecall_row = df[(df['dsname'] == dsname) & (df['batchid'] == batchid) & (df['tool'] == 'basecall')].iloc[0, :]
resquiggle_row = df[(df['dsname'] == dsname) & (df['batchid'] == batchid) & (df['tool'] == 'resquiggle')].iloc[
0, :]
if row['tool'] in ['DeepSignal', 'Tombo']:
runt += basecall_row['running.time.seconds'] + resquiggle_row['running.time.seconds']
memg += basecall_row['mem.usage.gb'] + resquiggle_row['mem.usage.gb']
elif row['tool'] in ['Nanopolish', 'DeepMod']:
runt += basecall_row['running.time.seconds']
memg += basecall_row['mem.usage.gb']
dataset['dsname'].append(dsname)
dataset['tool'].append(row['tool'])
dataset['batchid'].append(row['batchid'])
dataset['fast5'].append(row['fast5'])
dataset['running.time.seconds'].append(runt)
dataset['mem.usage.gb'].append(memg)
outdf = pd.DataFrame.from_dict(dataset)
logger.info(outdf)
outfn = os.path.join(pic_base_dir, 'recalculate.running.summary.na19240.csv')
outdf.to_csv(outfn)
def parse_arguments():
"""
:return:
"""
parser = argparse.ArgumentParser(description='Resource-summary')
parser.add_argument('--cpu-task', action='store_true')
parser.add_argument('--gpu-task', action='store_true')
parser.add_argument('--dataset-batch', action='store_true')
parser.add_argument('--unify', action='store_true')
parser.add_argument('--recalculate', action='store_true')
parser.add_argument('--megalodon', action='store_true')
parser.add_argument('--na19240-winter', action='store_true')
parser.add_argument('--na19240-sumner', action='store_true')
parser.add_argument('--collect-data', action='store_true')
return parser.parse_args()
def collect_log_data():
fnlist = glob.glob(os.path.join(basedir, '*.summary*xlsx'))
logger.info(fnlist)
dflist = []
for fn in fnlist:
df = pd.read_excel(fn)
df = | |
<reponame>artbees/python-aiplatform
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation as ga_operation
from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers
from google.cloud.aiplatform_v1beta1.types import annotation
from google.cloud.aiplatform_v1beta1.types import annotation_spec
from google.cloud.aiplatform_v1beta1.types import data_item
from google.cloud.aiplatform_v1beta1.types import dataset
from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset
from google.cloud.aiplatform_v1beta1.types import dataset_service
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.protobuf import empty_pb2 as empty # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import struct_pb2 as struct # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from .transports.base import DatasetServiceTransport
from .transports.grpc import DatasetServiceGrpcTransport
class DatasetServiceClientMeta(type):
"""Metaclass for the DatasetService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DatasetServiceTransport]]
_transport_registry["grpc"] = DatasetServiceGrpcTransport
def get_transport_class(cls, label: str = None,) -> Type[DatasetServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DatasetServiceClient(metaclass=DatasetServiceClientMeta):
""""""
DEFAULT_OPTIONS = ClientOptions.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
{@api.name}: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Return a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, DatasetServiceTransport] = None,
client_options: ClientOptions.ClientOptions = DEFAULT_OPTIONS,
) -> None:
"""Instantiate the dataset service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DatasetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client.
"""
if isinstance(client_options, dict):
client_options = ClientOptions.from_dict(client_options)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DatasetServiceTransport):
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
host=client_options.api_endpoint or "aiplatform.googleapis.com",
)
def create_dataset(
self,
request: dataset_service.CreateDatasetRequest = None,
*,
parent: str = None,
dataset: gca_dataset.Dataset = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
r"""Creates a Dataset.
Args:
request (:class:`~.dataset_service.CreateDatasetRequest`):
The request object. Request message for
``DatasetService.CreateDataset``.
parent (:class:`str`):
Required. The resource name of the Location to create
the Dataset in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset (:class:`~.gca_dataset.Dataset`):
Required. The Dataset to create.
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.ga_operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`~.gca_dataset.Dataset`: A collection of
DataItems and Annotations on them.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([parent, dataset]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.CreateDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if dataset is not None:
request.dataset = dataset
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.create_dataset,
default_timeout=None,
client_info=_client_info,
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
gca_dataset.Dataset,
metadata_type=dataset_service.CreateDatasetOperationMetadata,
)
# Done; return the response.
return response
def get_dataset(
self,
request: dataset_service.GetDatasetRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dataset.Dataset:
r"""Gets a Dataset.
Args:
request (:class:`~.dataset_service.GetDatasetRequest`):
The request object. Request message for
``DatasetService.GetDataset``.
name (:class:`str`):
Required. The name of the Dataset
resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.dataset.Dataset:
A collection of DataItems and
Annotations on them.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = dataset_service.GetDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_dataset, default_timeout=None, client_info=_client_info,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_dataset(
self,
request: dataset_service.UpdateDatasetRequest = None,
*,
dataset: gca_dataset.Dataset = None,
update_mask: field_mask.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_dataset.Dataset:
r"""Updates a Dataset.
Args:
request (:class:`~.dataset_service.UpdateDatasetRequest`):
The request object. Request message for
``DatasetService.UpdateDataset``.
dataset (:class:`~.gca_dataset.Dataset`):
Required. The Dataset which replaces
the resource on the server.
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`~.field_mask.FieldMask`):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
[FieldMask](https://tinyurl.com/dev-google-protobuf#google.protobuf.FieldMask).
Updatable fields:
- ``display_name``
- ``description``
- ``labels``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
| |
# -*- coding: utf-8 -*-
"""Simple functions and lookup tables for nucleic acid and amino acid sequences.
"""
import logging
import re
from string import ascii_lowercase
_logger = logging.getLogger(__name__)
aa3_to_aa1_lut = {
"Ala": "A",
"Arg": "R",
"Asn": "N",
"Asp": "D",
"Cys": "C",
"Gln": "Q",
"Glu": "E",
"Gly": "G",
"His": "H",
"Ile": "I",
"Leu": "L",
"Lys": "K",
"Met": "M",
"Phe": "F",
"Pro": "P",
"Ser": "S",
"Thr": "T",
"Trp": "W",
"Tyr": "Y",
"Val": "V",
"Xaa": "X",
"Ter": "*",
"Sec": "",
}
aa1_to_aa3_lut = {v: k for k, v in aa3_to_aa1_lut.items()}
dna_to_aa1_lut = { # NCBI standard translation table
'AAA': 'K',
'AAC': 'N',
'AAG': 'K',
'AAT': 'N',
'ACA': 'T',
'ACC': 'T',
'ACG': 'T',
'ACT': 'T',
'AGA': 'R',
'AGC': 'S',
'AGG': 'R',
'AGT': 'S',
'ATA': 'I',
'ATC': 'I',
'ATG': 'M',
'ATT': 'I',
'CAA': 'Q',
'CAC': 'H',
'CAG': 'Q',
'CAT': 'H',
'CCA': 'P',
'CCC': 'P',
'CCG': 'P',
'CCT': 'P',
'CGA': 'R',
'CGC': 'R',
'CGG': 'R',
'CGT': 'R',
'CTA': 'L',
'CTC': 'L',
'CTG': 'L',
'CTT': 'L',
'GAA': 'E',
'GAC': 'D',
'GAG': 'E',
'GAT': 'D',
'GCA': 'A',
'GCC': 'A',
'GCG': 'A',
'GCT': 'A',
'GGA': 'G',
'GGC': 'G',
'GGG': 'G',
'GGT': 'G',
'GTA': 'V',
'GTC': 'V',
'GTG': 'V',
'GTT': 'V',
'TAA': '*',
'TAC': 'Y',
'TAG': '*',
'TAT': 'Y',
'TCA': 'S',
'TCC': 'S',
'TCG': 'S',
'TCT': 'S',
'TGA': '*',
'TGC': 'C',
'TGG': 'W',
'TGT': 'C',
'TTA': 'L',
'TTC': 'F',
'TTG': 'L',
'TTT': 'F',
}
complement_transtable = bytes.maketrans(b"ACGT", b"TGCA")
def aa_to_aa1(seq):
"""Coerces string of 1- or 3-letter amino acids to 1-letter representation.
Args:
seq (str): An amino acid sequence.
Returns:
str: The sequence as one of 1-letter amino acids.
Examples:
>>> aa_to_aa1("CATSARELAME")
'CATSARELAME'
>>> aa_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu")
'CATSARELAME'
>>> aa_to_aa1(None)
"""
if seq is None:
return None
return aa3_to_aa1(seq) if looks_like_aa3_p(seq) else seq
def aa_to_aa3(seq):
"""Coerces string of 1- or 3-letter amino acids to 3-letter representation.
Args:
seq (str): An amino acid sequence.
Returns:
str: The sequence as one of 3-letter amino acids.
Examples:
>>> aa_to_aa3("CATSARELAME")
'CysAlaThrSerAlaArgGluLeuAlaMetGlu'
>>> aa_to_aa3("CysAlaThrSerAlaArgGluLeuAlaMetGlu")
'CysAlaThrSerAlaArgGluLeuAlaMetGlu'
>>> aa_to_aa3(None)
"""
if seq is None:
return None
return aa1_to_aa3(seq) if not looks_like_aa3_p(seq) else seq
def aa1_to_aa3(seq):
"""Converts string of 1-letter amino acids to 3-letter amino acids.
Should only be used if the format of the sequence is known; otherwise use ``aa_to_aa3()``.
Args:
seq (str): An amino acid sequence as 1-letter amino acids.
Returns:
str: The sequence as 3-letter amino acids.
Raises:
KeyError: If the sequence is not of 1-letter amino acids.
Examples:
>>> aa1_to_aa3("CATSARELAME")
'CysAlaThrSerAlaArgGluLeuAlaMetGlu'
>>> aa1_to_aa3(None)
"""
if seq is None:
return None
return "".join(aa1_to_aa3_lut[aa1] for aa1 in seq)
def aa3_to_aa1(seq):
"""Converts string of 3-letter amino acids to 1-letter amino acids.
Should only be used if the format of the sequence is known; otherwise use ``aa_to_aa1()``.
Args:
seq (str): An amino acid sequence as 3-letter amino acids.
Returns:
str: The sequence as 1-letter amino acids.
Raises:
KeyError: If the sequence is not of 3-letter amino acids.
Examples:
>>> aa3_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu")
'CATSARELAME'
>>> aa3_to_aa1(None)
"""
if seq is None:
return None
return "".join(aa3_to_aa1_lut[aa3]
for aa3 in [seq[i:i + 3] for i in range(0, len(seq), 3)])
def complement(seq):
"""Retrieves the complement of a sequence.
Args:
seq (str): A nucleotide sequence.
Returns:
str: The complement of the sequence.
Examples:
>>> complement("ATCG")
'TAGC'
>>> complement(None)
"""
if seq is None:
return None
return seq.translate(complement_transtable)
def elide_sequence(s, flank=5, elision="..."):
"""Trims the middle of the sequence, leaving the right and left flanks.
Args:
s (str): A sequence.
flank (int, optional): The length of each flank. Defaults to five.
elision (str, optional): The symbol used to represent the part trimmed. Defaults to '...'.
Returns:
str: The sequence with the middle replaced by ``elision``.
Examples:
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
'ABCDE...VWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=3)
'ABC...XYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", elision="..")
'ABCDE..VWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=12)
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=12, elision=".")
'ABCDEFGHIJKL.OPQRSTUVWXYZ'
"""
elided_sequence_len = flank + flank + len(elision)
if len(s) <= elided_sequence_len:
return s
return s[:flank] + elision + s[-flank:]
def looks_like_aa3_p(seq):
"""Indicates whether a string looks like a 3-letter AA string.
Args:
seq (str): A sequence.
Returns:
bool: Whether the string is of the format of a 3-letter AA string.
"""
return (seq is not None and (len(seq) % 3 == 0) and
(len(seq) == 0 or seq[1] in ascii_lowercase))
def normalize_sequence(seq):
"""Converts sequence to normalized representation for hashing.
Essentially, removes whitespace and asterisks, and uppercases the string.
Args:
seq (str): The sequence to be normalized.
Returns:
str: The sequence as a string of uppercase letters.
Raises:
RuntimeError: If the sequence contains non-alphabetic characters (besides '*').
Examples:
>>> normalize_sequence("ACGT")
'ACGT'
>>> normalize_sequence(" A C G T * ")
'ACGT'
>>> normalize_sequence("ACGT1")
Traceback (most recent call last):
...
RuntimeError: Normalized sequence contains non-alphabetic characters
"""
nseq = re.sub(r"[\s\*]", "", seq).upper()
m = re.search("[^A-Z]", nseq)
if m:
_logger.debug("Original sequence: " + seq)
_logger.debug("Normalized sequence: " + nseq)
_logger.debug("First non-[A-Z] at {}".format(m.start()))
raise RuntimeError("Normalized sequence contains non-alphabetic characters")
return nseq
def reverse_complement(seq):
"""Converts a sequence to its reverse complement.
Args:
seq (str): A nucleotide sequence.
Returns:
str: The reverse complement of the sequence.
Examples:
>>> reverse_complement("ATCG")
'CGAT'
>>> reverse_complement(None)
"""
if seq is None:
return None
return "".join(reversed(complement(seq)))
def replace_t_to_u(seq):
"""Replaces the T's in a sequence with U's.
Args:
seq (str): A nucleotide sequence.
Returns:
str: The sequence with the T's replaced by U's.
Examples:
>>> replace_t_to_u("ACGT")
'ACGU'
>>> replace_t_to_u(None)
"""
if seq is None:
return None
return seq.replace("T", "U").replace("t", "u")
def replace_u_to_t(seq):
"""Replaces the U's in a sequence with T's.
Args:
seq (str): A nucleotide sequence.
Returns:
str: The sequence with the U's replaced by T's.
Examples:
>>> replace_u_to_t("ACGU")
'ACGT'
>>> replace_u_to_t(None)
"""
if seq is None:
return None
return seq.replace("U", "T").replace("u", "t")
def translate_cds(seq, full_codons=True, ter_symbol="*"):
"""Translates a DNA or RNA sequence into a single-letter amino acid sequence.
Uses the NCBI standard translation table.
Args:
seq (str): A nucleotide sequence.
full_codons (bool, optional): If ``True``, forces sequence to have length
that is a multiple of 3 and raises an error otherwise.
If False, ``ter_symbol`` will be added as the last amino acid.
This corresponds to biopython's behavior of padding the last codon with ``N``s.
Defaults to ``True``.
ter_symbol (str, optional): Placeholder for the last amino acid if
sequence length is not divisible by three and ``full_codons`` is False.
Defaults to ``'*'``
Returns:
str: The corresponding single letter amino acid sequence.
Raises:
ValueError: If ``full_codons`` and the sequence is not a multiple of three.
ValueError: If a codon is undefined in the table.
Examples:
>>> translate_cds("ATGCGA")
'MR'
>>> translate_cds("AUGCGA")
'MR'
>>> translate_cds(None)
>>> translate_cds("")
''
>>> translate_cds("AUGCG")
Traceback (most recent call last):
...
ValueError: Sequence length must be a multiple of three
>>> translate_cds("AUGCG", full_codons=False)
'M*'
>>> translate_cds("ATGTAN")
'MX'
>>> translate_cds("CCN")
'X'
>>> translate_cds("TRA")
'X'
>>> translate_cds("TRATA", full_codons=False)
'X*'
>>> translate_cds("AUGCGQ")
Traceback (most recent call last):
...
ValueError: Codon CGQ at position 4..6 is undefined in codon table
"""
if seq is None:
return None
if len(seq) == 0:
return ""
if full_codons and len(seq) % 3 != 0:
raise ValueError("Sequence length must be a multiple of three")
seq = replace_u_to_t(seq)
seq = seq.upper()
protein_seq = list()
for i in range(0, len(seq) - len(seq) % 3, 3):
try:
codon = seq[i:i + 3]
iupac_ambiguity_codes = "BDHVNUWSMKRYZ"
if any([iupac_ambiguity_code in codon for iupac_ambiguity_code in iupac_ambiguity_codes]):
aa = "X"
else:
aa = dna_to_aa1_lut[codon]
except KeyError:
raise ValueError("Codon {} at position {}..{} is undefined in codon table".format(
seq[i:i + 3], i+1, i+3))
protein_seq.append(aa)
# check for trailing bases and add the ter symbol if required
if not full_codons and len(seq) % 3 != 0:
protein_seq.append(ter_symbol)
return ''.join(protein_seq)
## <LICENSE>
## Copyright 2014 Bioutils Contributors
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## | |
<filename>mcos/optimizer.py
from __future__ import division
from abc import ABC, abstractmethod
from typing import Dict, List
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as sch
from numpy.linalg import inv, pinv
#Compute the (multiplicative) inverse of a matrix.
from pypfopt.efficient_frontier import EfficientFrontier
from scipy.optimize import minimize
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples
from mcos.covariance_transformer import cov_to_corr
class AbstractOptimizer(ABC):
"""Helper class that provides a standard way to create a new Optimizer using inheritance"""
@abstractmethod
def allocate(self, mu: np.array, cov: np.array) -> np.array:
"""
Create an optimal portfolio allocation given the expected returns vector and covariance matrix. See section 4.3
of the "A Robust Estimator of the Efficient Frontier" paper.
@param mu: Expected return vector
@param cov: Expected covariance matrix
@return Vector of weights
"""
pass
@property
@abstractmethod
def name(self) -> str:
"""
Name of this optimizer. The name will be displayed in the MCOS results DataFrame.
"""
pass
class MarkowitzOptimizer(AbstractOptimizer):
"""Optimizer based on the Modern Portfolio Theory pioneered by <NAME>owitz's paper 'Portfolio Selection'"""
def allocate(self, mu: np.array, cov: np.array) -> np.array:
ef = EfficientFrontier(mu, cov)
ef.max_sharpe()
weights = ef.clean_weights()
return np.array(list(weights.values()))
@property
def name(self) -> str:
return 'markowitz'
class NCOOptimizer(AbstractOptimizer):
"""
Nested clustered optimization (NCO) optimizer based on section 4.3 of "A Robust Estimator of the Efficient Frontier
"""
def __init__(self, max_num_clusters: int = None, num_clustering_trials=10):
"""
Set optional variables used during calculations
:param max_num_clusters: max number of clusters to use during KMeans clustering
:param num_clustering_trials: number of times to perform KMeans clustering with [1,max_num_clusters] clusters
"""
self.max_num_clusters = max_num_clusters
self.num_clustering_trials = num_clustering_trials
@property
def name(self) -> str:
return 'NCO'
def allocate(self, mu: np.array, cov: np.array) -> np.array:
"""
Perform the NCO method described in section 4.3 of "A Robust Estimator of the Efficient Frontier"
Excerpt from section 4.3:
The NCO method estimates 𝜔̂∗ while controlling for the signal-induced estimation errors
explained in section 3.2. NCO works as follows:
First, we cluster the covariance matrix into subsets of highly-correlated variables.
One possible clustering algorithm is the partitioning method discussed in López de Prado and Lewis [2019],
but hierarchical methods may also be applied. The result is a partition of the original set,
that is, a collection of mutually disjoint nonempty subsets of variables.
Second, we compute optimal allocations for each of these clusters separately.
This allows us to collapse the original covariance matrix into a reduced covariance matrix,
where each cluster is represented as a single variable. The collapsed correlation matrix is
closer to an identity matrix than the original correlation matrix was, and therefore more
amenable to optimization problems (recall the discussion in section 3.2).
Third, we compute the optimal allocations across the reduced covariance matrix.
Fourth,
the final allocations are the dot-product of the intra-cluster allocations and the inter-cluster allocations.
By splitting the problem into two separate tasks, NCO contains the instability within each cluster:
the instability caused by intra-cluster noise does not propagate across clusters.
See López de Prado [2019] for examples, code and additional details regarding NCO.
:param cov: Covariance matrix
:param mu: Expected return vector
:return: Min variance portfolio if mu is None, max sharpe ratio portfolio if mu is not None
"""
cov = pd.DataFrame(cov)
if mu is not None:
mu = pd.Series(mu.flatten())
assert mu.size == cov.shape[0], 'mu and cov dimension must be the same size'
# get correlation matrix
corr = cov_to_corr(cov)
# find the optimal partition of clusters
clusters = self._cluster_k_means_base(corr)
# calculate intra-cluster allocations by finding the optimal portfolio for each cluster
intra_cluster_allocations = pd.DataFrame(0, index=cov.index, columns=clusters.keys())
for cluster_id, cluster in clusters.items():
cov_ = cov.loc[cluster, cluster].values
mu_ = mu.loc[cluster].values.reshape(-1, 1) if mu is not None else None
intra_cluster_allocations.loc[cluster, cluster_id] = self._get_optimal_portfolio(cov_, mu_)
# reduce covariance matrix
cov = intra_cluster_allocations.T.dot(np.dot(cov, intra_cluster_allocations))
mu = intra_cluster_allocations.T.dot(mu) if mu is not None else None
# calculate inter_cluster allocations on reduced covariance matrix
inter_cluster_allocations = pd.Series(self._get_optimal_portfolio(cov, mu), index=cov.index)
# final allocations are the dot-product of the intra-cluster allocations and the inter-cluster allocations
return intra_cluster_allocations \
.mul(inter_cluster_allocations, axis=1) \
.sum(axis=1).values \
.reshape(-1, 1) \
.flatten()
def _cluster_k_means_base(self, corr: np.array) -> Dict[int, int]:
"""
Using KMeans clustering, group the matrix into groups of highly correlated variables.
The result is a partition of the original set,
that is, a collection of mutually disjoint nonempty subsets of variables.
:param corr: correlation matrix
:return: The optimal partition of clusters
"""
distance_matrix = ((1 - corr.fillna(0)) / 2.) ** .5
silhouettes = pd.Series()
max_num_clusters = self.max_num_clusters
if max_num_clusters is None:
# if the max number of clusters wasn't specified, declare it based on corr
max_num_clusters = corr.shape[0] // 2
for _ in range(self.num_clustering_trials):
for i in range(2, max_num_clusters + 1): # find optimal num clusters
kmeans_ = KMeans(n_clusters=i, n_jobs=1, n_init=1, random_state=42)
kmeans_ = kmeans_.fit(distance_matrix)
silhouettes_ = silhouette_samples(distance_matrix, kmeans_.labels_)
new_calc = silhouettes_.mean() / silhouettes_.std()
old_calc = silhouettes.mean() / silhouettes.std()
if np.isnan(old_calc) or new_calc > old_calc:
silhouettes, kmeans = silhouettes_, kmeans_
clusters = {
i: corr.columns[np.where(kmeans.labels_ == i)].tolist()
for i in np.unique(kmeans.labels_)
} # cluster members
return clusters
def _get_optimal_portfolio(self, cov: np.array, mu: np.array) -> np.array:
"""
compute the optimal allocations across the reduced covariance matrix
:param cov: covariance matrix
:param mu: vector of expected returns
:return: optimal portfolio allocation
"""
try:
inv = np.linalg.inv(cov)
except np.linalg.LinAlgError: # get the pseudo-inverse if the matrix is singular
inv = np.linalg.pinv(cov)
ones = np.ones(shape=(inv.shape[0], 1))
if mu is None:
mu = ones
w = np.dot(inv, mu)
w /= np.dot(ones.T, w)
return w.flatten()
class HRPOptimizer(AbstractOptimizer):
"""
Hierarichal Risk Parity Optimizer based on Dr. <NAME>'s paper 'Building Diversified Portfolios that
Outperform Out-of-Sample'
"""
def allocate(self, mu: np.array, cov: np.array) -> np.array:
"""
Gets position weights according to the hierarchical risk parity method as outlined in <NAME>
book
:param cov: covariance matrix
:param mu: vector of expected returns
:return: List of position weights.
"""
corr = cov_to_corr(cov)
dist = self._correlation_distance(corr)
link = sch.linkage(dist, 'single') # this step also calculates the Euclidean distance of 'dist'
sorted_indices = self._quasi_diagonal_cluster_sequence(link)
ret = self._hrp_weights(cov, sorted_indices)
if ret.sum() > 1.001 or ret.sum() < 0.999:
raise ValueError("Portfolio allocations don't sum to 1.")
return ret
@property
def name(self) -> str:
return 'HRP'
def _inverse_variance_weights(self, cov: np.ndarray) -> np.ndarray:
# Compute the inverse-variance portfolio
ivp = 1. / np.diag(cov)
ivp /= ivp.sum()
return ivp
def _cluster_sub_sequence(self, clustering_data: pd.DataFrame, combined_node: int) -> List:
# recurisvely extracts the list of cluster indices that that belong to the children of combined_node
row = clustering_data[clustering_data['combined_node'] == combined_node]
if row.empty:
return [combined_node]
return self._cluster_sub_sequence(clustering_data, row.iloc[0]['node1']) + \
self._cluster_sub_sequence(clustering_data, row.iloc[0]['node2'])
def _quasi_diagonal_cluster_sequence(self, link: np.ndarray) -> List:
# Sort clustered items by distance
num_items = link[-1, 3].astype('int')
clustering_data = pd.DataFrame(link[:, 0:2].astype('int'), columns=['node1', 'node2'])
clustering_data['combined_node'] = clustering_data.index + num_items
return self._cluster_sub_sequence(clustering_data, clustering_data.iloc[-1]['combined_node'])
def _cluster_var(self, cov: np.ndarray) -> np.ndarray:
# calculates the overall variance assuming the inverse variance portfolio weights of the constituents
w_ = self._inverse_variance_weights(cov).reshape(-1, 1)
return np.dot(np.dot(w_.T, cov), w_)[0, 0]
def _hrp_weights(self, cov: np.ndarray, sorted_indices: List) -> np.ndarray:
"""
Gets position weights using hierarchical risk parity
:param cov: covariance matrix
:param sorted_indices: clustering scheme
:return: array of position weights
"""
if len(sorted_indices) == 0:
raise ValueError('sorted_indices is empty')
if len(sorted_indices) == 1:
return np.array([1.])
split_indices = np.array_split(np.array(sorted_indices), 2)
left_var = self._cluster_var(cov[:, split_indices[0]][split_indices[0]])
right_var = self._cluster_var(cov[:, split_indices[1]][split_indices[1]])
alloc_factor = 1. - left_var / (left_var + right_var)
return np.concatenate([
np.multiply(self._hrp_weights(cov, split_indices[0]), alloc_factor),
np.multiply(self._hrp_weights(cov, split_indices[1]), 1. - alloc_factor)
])
def _correlation_distance(self, corr: np.ndarray) -> np.ndarray:
# A distance matrix based on correlation, where 0<=d[i,j]<=1
# This is a proper distance metric
dist = np.sqrt((1. - corr) / 2.)
for i in range(dist.shape[0]):
dist[i, i] = 0. # diagonals should always be 0, but sometimes it's only close to 0
return dist
class RiskParityOptimizer(AbstractOptimizer):
"""
Risk Parity Optimizer
"""
def __init__(self, target_risk: np.array = None):
self.target_risk = target_risk
def allocate(self, mu: np.array, cov: np.array) -> np.array:
"""
Gets position weights according to the risk parity method
:param cov: covariance matrix
:param mu: vector of expected returns
:return: List of position weights.
"""
if | |
Node.TEXT_NODE) :
data['fromPeriod']=node2.data
for node1 in xmlTree.getElementsByTagName("error") :
for node2 in node1.childNodes:
if(node2.nodeType == Node.TEXT_NODE) :
data['error']=node2.data
for node1 in xmlTree.getElementsByTagName("status") :
for node2 in node1.childNodes:
if(node2.nodeType == Node.TEXT_NODE) :
data['status']=node2.data
for node1 in xmlTree.getElementsByTagName("SECURITY_KEY") :
for node2 in node1.childNodes:
if(node2.nodeType == Node.TEXT_NODE) :
data['SECURITY_KEY']=node2.data
for node1 in xmlTree.getElementsByTagName("SECURITY_CODE") :
for node2 in node1.childNodes:
if(node2.nodeType == Node.TEXT_NODE) :
data['SECURITY_CODE']=node2.data
for node1 in xmlTree.getElementsByTagName("CIPHER") :
for node2 in node1.childNodes:
if(node2.nodeType == Node.TEXT_NODE) :
data['CIPHER']=node2.data
for node1 in xmlTree.getElementsByTagName("MESSAGE") :
for node2 in node1.childNodes:
if(node2.nodeType == Node.TEXT_NODE) :
data['MESSAGE']=node2.data
studentlist = xmlTree.getElementsByTagName('studentlist')
data['studentlist']=[]
for i in studentlist:
p={}
p[str(i.attributes['rollNumber'].value)]=0
data['studentlist'].append(p)
print(data)
elif str(config['METHOD']['REQ_METHOD']) == 'CSV':
# accepting data from request and storing it in file.txt and log the action
data = request.body
try:
data = data.decode('utf-8')
except:
logging.error(str(datetime.datetime.now())+"\tCSV Data not received in correct format.") #Logging Error message if data not received in correct format.
logging.info(str(datetime.datetime.now())+"\tCSV Data received in correct format.") #Logging message that data received in correct format.
with open('file.txt', 'w') as file:
file.write(data)
# extracting the data from the csv data structure and stroing it in JSON data structures
# Here we have the option of changing the delimiter
import csv
l=[]
with open('file.txt','r') as csvfile:
spamreader = csv.reader(csvfile,delimiter=config['METHOD']['DELIMITOR'],quotechar='|')
for row in spamreader:
l.append(row)
count=0
counter=0
x=0
y=0
data={}
for item in l:
count+=1
counter+=1
if item[0]=='roll numbers start':
x=count
if item[0]=='roll numbers end':
y=counter-1
if item[0]=='classRoom':
data[item[0]]=str(item[1])
if item[0]=='courseNumber':
data[item[0]]=str(item[1])
if item[0]=='attendanceDate':
data[item[0]]=str(item[1])
if item[0]=='fromPeriod':
data[item[0]]=str(item[1])
if item[0]=='toPeriod':
data[item[0]]=str(item[1])
if item[0]=='status':
data[item[0]]=""
if item[0]=='error':
data[item[0]]=""
if item[0]=='SECURITY_KEY':
data[item[0]]=str(item[1])
if item[0]=='SECURITY_CODE':
data[item[0]]=str(item[1])
if item[0]=='MESSAGE':
data[item[0]]=str(item[1])
if item[0]=='CIPHER':
data[item[0]]=str(item[1])
data["studentlist"]=[]
for i in range(x,y):
p={}
p[l[i][0]]=0
data["studentlist"].append(p)
print(data)
elif str(config['METHOD']['REQ_METHOD']) == 'JSON':
# accepting the data from request and storing it in a JSON data structure and log the action
data = request.body
try:
data = json.loads(str(data, 'utf-8'))
except :
logging.error(str(datetime.datetime.now())+"\tJSON Data not received in correct format.") #Logging Error message if data not received in correct format.
logging.info(str(datetime.datetime.now())+"\tJSON Data received in correct format.") #Logging message that data received in correct format.
print(data)
# data = {
# "classRoom": "102",
# "courseNumber": "ICS200",
# "attendanceDate": "08/06/2018",
# "fromPeriod": "07:00",
# "toPeriod": "07:30",
# "status": "",
# "error": "",
# "studentlist": [
# {
# "DSC_0688": 0
# },
# {
# "DSC_0626": 0
# },
# {
# "DSC_0011": 0
# },
# {
# "DSC_0847": 0
# },
# {
# "DSC_0824": 0
# }
# ],
# "SECURITY_KEY": "QWERTYUIOPASDFGH",
# "SECURITY_CODE": "ZXCVBNMASDFGHJKL",
# #"CIPHER": b':\xdd\n\x8b\xb5\xdf\xdfb\x07\xd8'
# "CIPHER": ':Ý\n\x8bµßßb\x07Ø',
# "MESSAGE": "Attendence"
# }
data1={}
for i in range(0,len(data["studentlist"])):
for key in data["studentlist"][i].keys():
data1[key]=0
data.update(studentlist=data1)
# encryption of cipher and checking it with config file data
# from Crypto.Cipher import AES
# obj = AES.new(data['SECURITY_KEY'], AES.MODE_CFB, data['SECURITY_CODE'])
# message = data['MESSAGE']
# cipher1 = obj.encrypt(message)
# obj2 = AES.new(config['SECURITY']['KEY'], AES.MODE_CFB, config['SECURITY']['CODE'])
# message2 = config['SECURITY']['MESSAGE']
# cipher2 = obj2.encrypt(message2)
#DECODED = obj2.decrypt(cipher).decode('utf-8')
#print(DECODED)
# chech the working of cipher later
if data['MESSAGE'] == 'Attendence':
PATH = str(config['PATHS']['Sessions']) + str(data['classRoom']) + '/' + str(data['courseNumber'])
# extracting the 5 frames from the video file (Equally spaced)
'''vidcap = cv2.VideoCapture(PATH + '/AttendenceVideo.mp4')
success,image = vidcap.read()
success = True
length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
div = math.ceil(length / 5)
count = 0
while success:
success,image = vidcap.read()
if count%div == 0 :
cv2.imwrite(PATH + '/Images/frame%d.jpg'%count,image) # storing the images in PATH = str(config['PATHS']['Sessions']) + str(data['classRoom']) + '/' + str(data['courseNumber'])/Images folder
count+=1'''
count = 0
# for all the images in the Images folder(group photos) face recognition is appilied
for image_file in os.listdir(PATH + '/Images'):
full_file_path = os.path.join(PATH + '/Images', image_file)
if config['USE']['DATABASE'] == 'YES':
# connecting to the database
import MySQLdb
conn = MySQLdb.connect(user=config['DATABASE']['USERNAME'], passwd=config['DATABASE']['PASSWORD'], db=config['DATABASE']['DB_NAME'])
cursor = conn.cursor()
# RAW mysql query for getting images and roll numbers
cursor.execute("SELECT " + config['DATABASE']['PHOTO_CLM'] + ',' + config['DATABASE']['ROLL_CLM'] + " FROM " + config['DATABASE']['TABLE_NAME'])
row = cursor.fetchone()
# accessing one row of the table at a time
while row is not None:
from PIL import Image
import base64
img_str = row[0]
roll = row[1]
# converting the bas64 str to image and saving the photo to KnoenImages directory
imgdata = base64.b64decode(img_str)
if not os.path.exists(PATH + '/KnownImages'):
os.mkdir(PATH + '/KnownImages')
if not os.path.exists(PATH + '/KnownImages/' + str(roll)):
os.mkdir(PATH + '/KnownImages/' + str(roll))
filename = PATH + '/KnownImages/' + str(roll) + '/' + str(roll) + '.jpg'
with open(filename, 'wb') as f:
f.write(imgdata)
row = cursor.fetchone()
# IF a trained classifier already exits for the that class training is skipped
if not os.path.exists(PATH + '/trained_knn_model.clf'):
print("Training KNN classifier...")
classifier = train(PATH + '/KnownImages', model_save_path=PATH + "/trained_knn_model.clf", n_neighbors=2)
print("Training complete!")
print("Looking for faces in {}".format(image_file))
# Find all people in the image using a trained classifier model
# Note: You can pass in either a classifier file name or a classifier model instance
predictions = predict(full_file_path, model_path=PATH + "/trained_knn_model.clf")
# Print results on the console
for name, (top, right, bottom, left) in predictions:
print("- Found {} at ({}, {})".format(name, left, top))
if name in data['studentlist']:
data1[name] += 1
count += 1
show_prediction_labels_on_image(os.path.join(PATH + '/Images', image_file), predictions,data, count)
# deleting the KnownImages folder after he attendence has been taken
# optional - delete the classifer after he attendence has been taken
if config['USE']['DATABASE'] == 'YES':
shutil.rmtree(PATH + '/KnownImages')
os.remove(PATH + '/trained_knn_model.clf')
elif config['USE']['DATABASE'] == 'NO':
os.remove(PATH + '/trained_knn_model.clf')
#print('ggwp')
# restructuring the data accorinnd to the need of ERP
data["studentlist"]=[]
for key in data1.keys():
p={}
p[key]=data1[key]
data["studentlist"].append(p)
data["imagepaths"]=[]
p={}
p["Frame1"]='Frame1.jpg'
p["Frame2"]='Frame2.jpg'
p["Frame3"]='Frame3.jpg'
p["Frame4"]='Frame4.jpg'
p["Frame5"]='Frame5.jpg'
data["imagepaths"].append(p)
# restructuring the data in XML format and rendering out XML response
if config['METHOD']['RSP_METHOD'] == 'XML':
import xml.etree.cElementTree as ET
root = ET.Element("data")
cr = ET.SubElement(root, "classRoom").text = data['classRoom']
cn = ET.SubElement(root, "courseNumber").text = data['courseNumber']
ad = ET.SubElement(root, "attendanceDate").text = data['attendanceDate']
fp = ET.SubElement(root, "fromPeriod").text = data['fromPeriod']
tp = ET.SubElement(root, "toPeriod").text = data['toPeriod']
err = ET.SubElement(root, "error").text = data['error']
sta = ET.SubElement(root, "status").text = data['status']
sec_key = ET.SubElement(root, "SECURITY_KEY").text = data['SECURITY_KEY']
sec_code = ET.SubElement(root, "SECURITY_CODE").text = data['SECURITY_CODE']
#ci = ET.SubElement(root, "CIPHER").text = data['CIPHER']
msg = ET.SubElement(root, "MESSAGE").text = data['MESSAGE']
for i in data['studentlist']:
for j in i.keys():
sl = ET.SubElement(root, "studentlist",rollNumber=j).text = str(i[j])
for i in data['imagepaths']:
for j in i.keys():
sl = ET.SubElement(root, "imagepaths",rollNumber=j).text = str(i[j])
tree = ET.ElementTree(root)
tree.write("output.xml")
logging.info(str(datetime.datetime.now())+"\t"+str(len(data['studentlist']))+" students XML data sent successfully.") #Logging info that data has been sent successfully.
return HttpResponse(open('output.xml').read())
# restructuring the data in CSV format and rendering out in plain text fromat
elif config['METHOD']['RSP_METHOD'] == 'CSV':
f = open('output.txt','w')
for i in data:
if(i=='studentlist'):
f.write('studentlist\nroll numbers start\n')
for j in data[i]:
for k in j.keys():
f.write(str(k)+str(config['METHOD']['DELIMITOR'])+str(j[k])+'\n')
f.write('roll numbers end\n')
elif(i=='imagepaths'):
f.write('imagepaths\nimagepath start\n')
for j in data[i]:
for k in j.keys():
f.write(str(k)+str(config['METHOD']['DELIMITOR'])+str(j[k])+'\n')
f.write('imagepath end\n')
else:
f.write(i+str(config['METHOD']['DELIMITOR'])+data[i]+'\n')
logging.info(str(datetime.datetime.now())+"\t"+str(len(data['studentlist']))+" students CSV data sent successfully.") #Logging info that data has been sent successfully.
with open('output.txt', 'r') as f:
data = f.read()
return HttpResponse(data, content_type='text/plain')
# rendering JSON response
elif config['METHOD']['RSP_METHOD'] == 'JSON':
logging.info(str(datetime.datetime.now())+"\t"+str(len(data['studentlist']))+" students JSON data sent successfully.") #Logging info that data has been sent successfully.
return JsonResponse(data)
# if authorisation failed while comparing token then error is rendered
else:
data['status'] = 'error occured during validation'
data['error'] = 'UNAUTHORISED ACCESS'
logging.info(str(datetime.datetime.now())+"\tUnauthorized user trying to send and receive data.") #Logging info that there was an unauthorized access
data["studentlist"]=[]
for key in data1.keys():
p={}
p[key]=data1[key]
data["studentlist"].append(p)
# restructuring the data in XML format and rendering out XML response
if config['METHOD']['RSP_METHOD'] == 'XML':
import xml.etree.cElementTree as ET
root = ET.Element("data")
cr = ET.SubElement(root, "classRoom").text = data['classRoom']
cn = ET.SubElement(root, "courseNumber").text = data['courseNumber']
ad = ET.SubElement(root, "attendanceDate").text = data['attendanceDate']
fp = ET.SubElement(root, "fromPeriod").text = data['fromPeriod']
tp = ET.SubElement(root, "toPeriod").text = data['toPeriod']
err = ET.SubElement(root, "error").text = data['error']
sta = ET.SubElement(root, "status").text = data['status']
sec_key = ET.SubElement(root, "SECURITY_KEY").text = data['SECURITY_KEY']
sec_code = ET.SubElement(root, "SECURITY_CODE").text = data['SECURITY_CODE']
#ci = ET.SubElement(root, "CIPHER").text = data['CIPHER']
msg = ET.SubElement(root, "MESSAGE").text = data['MESSAGE']
for i in data['studentlist']:
for j in i.keys():
sl | |
<gh_stars>10-100
# Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)
# Bespoke Link to Instruments and Small Satellites (BLISS)
#
# Copyright 2018, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
import os
import socket
import time
import traceback
import gevent
import gevent.queue
import gevent.socket
from ait.dsn.cfdp.events import Event
from ait.dsn.cfdp.machines import Receiver1, Sender1
from ait.dsn.cfdp.mib import MIB
from ait.dsn.cfdp.pdu import make_pdu_from_bytes, split_multiple_pdu_byte_array, Header
from ait.dsn.cfdp.primitives import RequestType, TransmissionMode, FileDirective, Role, ConditionCode
from ait.dsn.cfdp.request import create_request_from_type
from ait.dsn.cfdp.util import write_to_file
from .exceptions import InvalidTransaction
import ait.core
import ait.core.log
class CFDP(object):
"""CFDP processor class. Handles sending and receiving of PDUs and management of transactions.
"""
mib = MIB(ait.config.get('dsn.cfdp.mib.path', '/tmp/cfdp/mib'))
transaction_counter = 0
pdu_counter = 1
outgoing_pdu_queue = gevent.queue.Queue()
incoming_pdu_queue = gevent.queue.Queue()
def __init__(self, entity_id, *args, **kwargs):
"""
Initialize CFDP entity with specified entity ID.
Args
entity_id (int): unique entity identifier
**file_sys (bool): set to True to use file system instead of sockets for PDU transfer
"""
# State machines for current transactions (basically just transactions).
# Can be Class 1 or 2 sender or receiver
self._machines = {}
# set sending and receiving handlers depending on transfer method
if kwargs.get('file_sys', None):
self._read_pdu_handler = gevent.spawn(read_pdus_from_filesys, self)
self._sending_handler = gevent.spawn(send_to_filesys_handler, self)
else:
self._read_pdu_handler = gevent.spawn(read_pdus_from_socket, self)
self._sending_handler = gevent.spawn(send_to_socket_handler, self)
# Spawn handlers for incoming and outgoing data
self._receiving_handler = gevent.spawn(receiving_handler, self)
# cycle through transactions to progress state machines
self._transaction_handler = gevent.spawn(transaction_handler, self)
# set entity id in MIB
self.mib.load()
self.mib.local_entity_id = entity_id
# temporary list for holding PDUs that have been read from file
self.received_pdu_files = []
self._data_paths = {}
self._data_paths['pdusink'] = ait.config.get('dsn.cfdp.datasink.pdusink.path')
self._data_paths['outgoing'] = ait.config.get('dsn.cfdp.datasink.outgoing.path')
self._data_paths['incoming'] = ait.config.get('dsn.cfdp.datasink.incoming.path')
self._data_paths['tempfiles'] = ait.config.get('dsn.cfdp.datasink.tempfiles.path')
# create needed paths if they don't exist
for name, path in self._data_paths.items():
if not os.path.exists(path):
os.makedirs(path)
def connect(self, rcv_host, send_host=None):
"""
Connect to UDP sockets for sending and receiving PDUs. Will only connect to socket for
receiving if send socket is not specified.
Args
rcv_host (tuple): (hostname, port) to receive PDUs on.
send_host (Optional, tuple): (hostname, port) to send PDUs to, defaults to None.
"""
# setup receive socket
self.rcv_host = rcv_host
self._rcvr_socket = gevent.socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# setup send socket if specified
if send_host:
self.send_host = send_host
self._sender_socket = gevent.socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to receiver socket (no bind to sender socket)
connected = False
while not connected:
ait.core.log.info('Attempting CFDP socket connection...')
try:
self._rcvr_socket.bind(self.rcv_host)
connected = True
except socket.error as e:
ait.core.log.error('Error connecting to CFDP receive socket: {}'.format(e))
gevent.sleep(1)
ait.core.log.info('Connected to CFDP receiving socket')
def disconnect(self):
"""Close sockets, kill handlers, dump MIB"""
try:
self._rcvr_socket.close()
self._sender_socket.close()
except Exception:
pass
self._receiving_handler.kill()
self._sending_handler.kill()
self.mib.dump()
def _increment_tx_counter(self):
self.transaction_counter += 1
return self.transaction_counter
def send(self, pdu):
"""Send a PDU. Adds the PDU to the outbound queue.
Arguments:
pdu:
An instance of a PDU subclass (EOF, MD, etc)
"""
ait.core.log.debug('Adding pdu ' + str(pdu) + ' to queue')
self.outgoing_pdu_queue.put(pdu)
def put(self, destination_id, source_path, destination_path, transmission_mode=None):
"""Initiates a Put request by invoking Transaction Start procedures and Copy File procedures
Other parameters not yet implemented:
- segmentation control
- fault handler overrides
- flow label
- messages to user
- filestore requests
"""
# Do some file checks before starting anything
if source_path.startswith('/'):
ait.core.log.error('Source path should be a relative path.')
return
if destination_path.startswith('/'):
ait.core.log.error('Destination path should be a relative path.')
return
# (A) Transaction Start Notification Procedure
# 1. Issue Tx ID sequentially
transaction_num = self._increment_tx_counter()
# (B) Copy File Procedure
# Determine transmission mode so we know what kind of machine to make
# Use destination id to get the default MIB setting for that entity id
if transmission_mode is None:
transmission_mode = self.mib.transmission_mode(destination_id)
if transmission_mode == TransmissionMode.ACK:
# TODO raise invalid transmission mode since we don't support ACK right now
pass
# Create a `Request` which contains all the parameters for a Put.request
# This is passed to the machine to progress the state
request = create_request_from_type(RequestType.PUT_REQUEST,
destination_id=destination_id,
source_path=source_path,
destination_path=destination_path,
transmission_mode=transmission_mode)
# if transmission_mode == TransmissionMode.ACK:
# machine = Sender2(self, transaction_num, request=request)
# else:
machine = Sender1(self, transaction_num)
# Send the Put.request `Request` to the newly created machine
# This is where the rest of the Put request procedures are done
machine.update_state(event=Event.RECEIVED_PUT_REQUEST, request=request)
# Add transaction to list, indexed by Tx #
self._machines[transaction_num] = machine
return transaction_num
def ingest(self, pdu_path):
"""Ingest pdu from file
"""
if pdu_path not in self.received_pdu_files:
ait.core.log.debug("Ingesting PDU at path: {0}".format(pdu_path))
# cache file so that we know we read it
self.received_pdu_files.append(pdu_path)
# add to incoming so that receiving handler can deal with it
with open(pdu_path, 'rb') as pdu_file:
# add raw file contents to incoming queue
pdu_file_bytes = pdu_file.read()
self.incoming_pdu_queue.put(pdu_file_bytes)
def report(self, transaction_id):
"""Report.request -- user request for status report of transaction"""
request = create_request_from_type(RequestType.REPORT_REQUEST, transaction_id=transaction_id)
machine = self._machines.get(transaction_id, None)
if machine is None:
raise InvalidTransaction(transaction_id)
else:
machine.update_state(event=Event.RECEIVED_REPORT_REQUEST, request=request)
def cancel(self, transaction_id):
"""Cancel.request -- user request to cancel transaction"""
request = create_request_from_type(RequestType.CANCEL_REQUEST, transaction_id=transaction_id)
machine = self._machines.get(transaction_id, None)
if machine is None:
raise InvalidTransaction(transaction_id)
else:
machine.update_state(event=Event.RECEIVED_CANCEL_REQUEST, request=request)
def suspend(self, transaction_id):
"""Suspend.request -- user request to suspend transaction"""
request = create_request_from_type(RequestType.SUSPEND_REQUEST, transaction_id=transaction_id)
machine = self._machines.get(transaction_id, None)
if machine is None:
raise InvalidTransaction(transaction_id)
else:
machine.update_state(event=Event.RECEIVED_SUSPEND_REQUEST, request=request)
def resume(self, transaction_id):
"""Resume.request -- user request to resume transaction"""
request = create_request_from_type(RequestType.RESUME_REQUEST, transaction_id=transaction_id)
machine = self._machines.get(transaction_id, None)
if machine is None:
raise InvalidTransaction(transaction_id)
else:
machine.update_state(event=Event.RECEIVED_RESUME_REQUEST, request=request)
def read_pdus_from_filesys(instance):
"""Read PDUs that have been written to file (in place of receiving over socket)
"""
while True:
gevent.sleep(0)
try:
# Get files from pdusink directory in order of creation
pdusink_path = instance._data_paths['pdusink']
pdu_files = [os.path.join(pdusink_path, f) for f in os.listdir(pdusink_path) if f.endswith('.pdu')]
pdu_files.sort(key=lambda x: os.path.getmtime(x))
for pdu_filename in pdu_files:
if pdu_filename not in instance.received_pdu_files:
# cache file so that we know we read it
instance.received_pdu_files.append(pdu_filename)
# add to incoming so that receiving handler can deal with it
pdu_full_path = os.path.join(pdusink_path, pdu_filename)
with open(pdu_full_path, 'rb') as pdu_file:
# add raw file contents to incoming queue
pdu_file_bytes = pdu_file.read()
instance.incoming_pdu_queue.put(pdu_file_bytes)
break
except Exception as e:
ait.core.log.warn("EXCEPTION: " + e.message)
ait.core.log.warn(traceback.format_exc())
def read_pdus_from_socket(instance):
""" Read PDUs from a socket over UDP """
while True:
gevent.sleep(0)
try:
all_bytes, addr = instance._rcvr_socket.recvfrom(4096)
if all_bytes:
# create PDUs from bytes received
all_bytes = [b for b in bytearray(all_bytes)]
for pdu_bytes in split_multiple_pdu_byte_array(all_bytes):
pdu = make_pdu_from_bytes(pdu_bytes)
pdu_filename = 'entity{0}_tx{1}_{2}.pdu'.format(pdu.header.destination_entity_id,
pdu.header.transaction_id,
instance.pdu_counter)
# cache file so that we know we read it
instance.received_pdu_files.append(pdu_filename)
# add to incoming so that receiving handler can deal with it
instance.incoming_pdu_queue.put(pdu_bytes)
else:
break
except Exception as e:
ait.core.log.warn("EXCEPTION: " + str(e))
ait.core.log.warn(traceback.format_exc())
def receiving_handler(instance):
"""Receives incoming PDUs on `incoming_pdu_queue` and routes them to the intended state machine instance
"""
while True:
gevent.sleep(0)
try:
pdu_bytes = instance.incoming_pdu_queue.get(block=False)
pdu = read_incoming_pdu(pdu_bytes)
ait.core.log.debug('Incoming PDU Type: ' + str(pdu.header.pdu_type))
if pdu.header.destination_entity_id != instance.mib.local_entity_id:
ait.core.log.debug('Skipping PDU with mismatched destination entity id {0}'.format(pdu.header.destination_entity_id))
continue
transaction_num = pdu.header.transaction_id
machine = instance._machines[transaction_num] if transaction_num in instance._machines else None
if pdu.header.pdu_type == Header.FILE_DATA_PDU:
# If its file data we'll concat to file
ait.core.log.debug('Received File Data Pdu')
if machine is None:
ait.core.log.info(
'Ignoring File Data for transaction that doesn\'t exist: {}'.format(transaction_num))
else:
# Restart inactivity timer here when PDU is being given to a machine
machine.inactivity_timer.restart()
machine.update_state(Event.RECEIVED_FILEDATA_PDU, pdu=pdu)
elif pdu.header.pdu_type == Header.FILE_DIRECTIVE_PDU:
ait.core.log.debug('Received File Directive Pdu: ' + str(pdu.file_directive_code))
if pdu.file_directive_code == FileDirective.METADATA:
# If machine doesn't exist, create a machine for this transaction
transmission_mode = pdu.header.transmission_mode
if machine is None:
# if transmission_mode == TransmissionMode.NO_ACK:
machine = Receiver1(instance, transaction_num)
instance._machines[transaction_num] = machine
machine.update_state(Event.RECEIVED_METADATA_PDU, pdu=pdu)
elif pdu.file_directive_code == FileDirective.EOF:
if machine is None:
ait.core.log.info('Ignoring EOF for transaction that doesn\'t exist: {}'
.format(transaction_num))
else:
if pdu.condition_code == | |
<filename>util_write_cap.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utilities to prepare files for CAP
For reference see versions prior to Aug 25, 2016 for:
getwaveform_iris.py
getwaveform_llnl.py
20160825 cralvizuri <<EMAIL>>
"""
import obspy
from obspy.io.sac import SACTrace
import obspy.signal.rotate as rotate
import os
from scipy import signal
import numpy as np
import util_helpers
import json
import matplotlib.pyplot as plt
import shutil
from obspy.taup import TauPyModel
from obspy.geodetics import kilometer2degrees
import math
from obspy.core import UTCDateTime
from util_helpers import copy_trace, remove_trace
def zerophase_chebychev_lowpass_filter(trace, freqmax):
"""
Custom Chebychev type two zerophase lowpass filter useful for
decimation filtering.
This filter is stable up to a reduction in frequency with a factor of
10. If more reduction is desired, simply decimate in steps.
Partly based on a filter in ObsPy.
:param trace: The trace to be filtered.
:param freqmax: The desired lowpass frequency.
Will be replaced once ObsPy has a proper decimation filter.
"""
# rp - maximum ripple of passband, rs - attenuation of stopband
rp, rs, order = 1, 96, 1e99
ws = freqmax / (trace.stats.sampling_rate * 0.5) # stop band frequency
wp = ws # pass band frequency
while True:
if order <= 12:
break
wp *= 0.99
order, wn = signal.cheb2ord(wp, ws, rp, rs, analog=0)
b, a = signal.cheby2(order, rs, wn, btype="low", analog=0, output="ba")
# Apply twice to get rid of the phase distortion.
trace.data = signal.filtfilt(b, a, trace.data)
#------------rotations---------------------
def rotate2ENZ(stream, evname_key, isave_ENZ=True, icreateNull=False, ifverbose = False):
print('\n---> Try rotate components. Function rotate2ENZ')
outdir = evname_key
if not os.path.exists(outdir):
os.makedirs(outdir)
# Directory is made, now rotate
# Sorted stream makes for structured loop
stream.sort()
# Get list of unique stations + location (example: 'KDAK.00')
stalist = []
for tr in stream.traces:
#stalist.append(tr.stats.station)
stalist.append(tr.stats.network + '.' + tr.stats.station +'.'+ tr.stats.location + '.'+ tr.stats.channel[:-1])
stalist = list(set(stalist))
# Initialize stream object
# For storing extra traces in case there are less than 3 compnents
st_new = obspy.Stream()
for stn in stalist:
# split STNM.LOC
tmp = stn.split('.')
netw = tmp[0]
station = tmp[1]
location = tmp[2]
chan = tmp[3] + '*'
# Get 3 traces (subset based on matching station name and location code)
substr = stream.select(network=netw,station=station,
location=location,channel=chan)
substr.sort()
#stakey = '%s.%s.%s.%s' % (netw, station, location, chan)
stakey = '%s' % substr[0].id
print('Working on station %s' % stakey)
# what components are in substr?
components = list()
for trace in substr:
components.append(trace.stats.channel[-1].upper())
components.sort()
if len(substr)<3:
if not icreateNull:
continue
# Station is missing one or more components. Checking to see if the
# remaining components are usable
if len(components) < 3:
print('\nWARNING. Missing components. Available: ', components)
print('substr: ', substr)
# 2021-05-19 fixed: use permutation in case components not always in same order.
# Maybe use itertools to apply each permutations.
if components == ['N', 'Z'] or components == ['Z', 'N'] \
or components == ['E', 'Z'] or components == ['Z', 'E'] \
or components == ['1', 'Z'] or components == ['Z', '1'] \
or components == ['2', 'Z'] or components == ['Z', '2'] \
or components == ['Z']:
print('WARNING: Missing horizontal component(s). Substituting with zeros')
for component in ['N', 'E', '1', '2']:
remove_trace(substr, component)
trace = copy_trace(substr, component='Z')
trace.data[:] = 0.
trace.stats.channel = trace.stats.channel[:-1]+'E'
trace.stats.sac['cmpaz'] = 90.
trace.stats.sac['cmpinc'] = 0.
substr.append(trace)
trace = copy_trace(substr, component='Z')
trace.data[:] = 0.
trace.stats.channel = trace.stats.channel[:-1]+'N'
trace.stats.sac['cmpaz'] = 0.
trace.stats.sac['cmpinc'] = 0.
substr.append(trace)
substr.sort()
elif components==['E', 'N']:
print('WARNING: Missing vertical component. Substituting with zeros')
trace = copy_trace(substr, component='N')
trace.data[:] = 0.
trace.stats.channel = trace.stats.channel[:-1]+'Z'
trace.stats.sac['cmpinc'] = -90.
substr.append(trace)
elif components==['1', '2']:
print('WARNING: Missing vertical component. Substituting with zeros')
trace = copy_trace(substr)
trace.data[:] = 0.
trace.stats.channel = trace.stats.channel[:-1]+'Z'
trace.stats.sac['cmpaz'] = 0.
trace.stats.sac['cmpinc'] = -90.
substr.append(trace)
else:
print('\nWARNING: No usable components found. Skipping (CHECK IF YOU WANT TO SKIP!)\n')
continue
# Rotate to NEZ first
# Sometimes channels are not orthogonal (example: 12Z instead of NEZ)
# Run iex = 5 (run_getwaveform.py) for one such case
d1 = substr[0].data
d2 = substr[1].data
d3 = substr[2].data
az1 = substr[0].stats.sac['cmpaz']
az2 = substr[1].stats.sac['cmpaz']
az3 = substr[2].stats.sac['cmpaz']
dip1 = substr[0].stats.sac['cmpinc']
dip2 = substr[1].stats.sac['cmpinc']
dip3 = substr[2].stats.sac['cmpinc']
if ifverbose:
print_rotation_parameters(substr)
# 2021-05-11
# Not clear why some stations can't rotate.
# TODO Find bug that crashes rotation for some stations.
# Still cleaning up vipuls codes.
# The rotation crashes often and breaks the downloading.
# Use `try` to recover and continue with other stations, but the issue still not resolved.
# It may be the dummy zeros created for the hirozontal components when station only has vertical component.
try:
data_array = rotate.rotate2zne(d1, az1, dip1, d2, az2, dip2, d3, az3, dip3)
except:
print ("ERROR: rotate.rotate2zne(...) failed, skipping...")
continue
# Rotates an arbitrarily oriented three-component vector to ZNE( [0]-Z, [1]-N, [2]-E)
# XXX: Check 012 in correct order?
substr[0].data = data_array[2] # E
substr[1].data = data_array[1] # N
substr[2].data = data_array[0] # Z
# Fix the channel names in the traces.stats
if len(substr[0].stats.channel)==3:
substr[0].stats.channel = substr[0].stats.channel[0:2] + 'E'
substr[1].stats.channel = substr[0].stats.channel[0:2] + 'N'
substr[2].stats.channel = substr[0].stats.channel[0:2] + 'Z'
else: # sometimes channel code are R,T,V instead of BHE,BHN,BHZ (or HFE,HFN,HFZ).
# This needs to be done so that rotation can happen
substr[0].stats.channel = 'XXE'
substr[1].stats.channel = 'XXN'
substr[2].stats.channel = 'XXZ'
#print(substr[0].stats.channel,substr[1].stats.channel,substr[2].stats.channel)
# Fix the sac headers since the traces have been rotated now
substr[0].stats.sac['cmpaz'] = 90.0
substr[1].stats.sac['cmpaz'] = 0.0
substr[2].stats.sac['cmpaz'] = 0.0
# matlab files had following cmpinc: E = 90, N = 90, Z = 0
# XXX: Will this cause problem??
substr[0].stats.sac['cmpinc'] = 0.0
substr[1].stats.sac['cmpinc'] = 0.0
substr[2].stats.sac['cmpinc'] = -90.0
# Fix sac headers
substr[0].stats.sac['kcmpnm'] = substr[0].stats.channel
substr[1].stats.sac['kcmpnm'] = substr[1].stats.channel
substr[2].stats.sac['kcmpnm'] = substr[2].stats.channel
if ifverbose:
print_rotation_parameters(substr)
# save NEZ waveforms
if isave_ENZ:
# Create output directory if it doesn't exist
outdir_enz = os.path.join(outdir, 'ENZ')
if not(os.path.exists(outdir_enz)):
os.makedirs(outdir_enz)
for tr in substr:
outfnam = os.path.join(outdir_enz, evname_key + '.' \
+ tr.stats.network + '.' + tr.stats.station + '.' \
+ tr.stats.location + '.' + tr.stats.channel[:-1] + '.' \
+ tr.stats.channel[-1].lower())
tr.write(outfnam, format='SAC')
# append substream to the main stream
st_new = st_new + substr
# replace stream object
stream = st_new
return stream
def rotate2UVW(stream, evname_key):
print('\n---> Try rotate components. Function rotate2UVW')
# Directory is made, now rotate
# Sorted stream makes for structured loop
stream.sort()
# Get list of unique stations + location (example: 'KDAK.00')
stalist = []
for tr in stream.traces:
#stalist.append(tr.stats.station)
stalist.append(tr.stats.network + '.' + tr.stats.station +'.'+ tr.stats.location + '.'+ tr.stats.channel[:-1])
stalist = list(set(stalist))
for stn in stalist:
# split STNM.LOC
netw, station, location, tmp = stn.split('.')
chan = tmp + '*'
# Get 3 traces (subset based on matching station name and location code)
substr = stream.select(network=netw,station=station,
location=location,channel=chan)
substr.sort()
substr2 = substr.copy()
rotate2UVW_station(substr2,evname_key)
def rotate2RTZ(stream, evname_key, ifverbose=False):
print('\n---> Try rotate components. Function rotate2RTZ')
outdir = evname_key
# Directory is made, now rotate
# Sorted stream makes for structured loop
stream.sort()
# Get list of unique stations + location (example: 'KDAK.00')
stalist = []
for tr in stream.traces:
#stalist.append(tr.stats.station)
stalist.append(tr.stats.network + '.' + tr.stats.station +'.'+ tr.stats.location + '.'+ tr.stats.channel[:-1])
stalist = list(set(stalist))
# XXX: Perhaps not using the subtream can speed up the rotation
for stn in stalist:
# split STNM.LOC
netw, station, location, tmp = stn.split('.')
chan = tmp + '*'
# Get 3 traces (subset based on matching station name and location code)
substr = stream.select(network=netw, station=station,
location=location, channel=chan)
substr.sort()
# stream.rotate('NE->RT') #And then boom, obspy rotates everything!
stakey = '%s.%s.%s.%s' % (netw, station, location, chan)
#stakey = '%s' % substr[0].id
print('Working on station %s. Rotating ENZ to RTZ' % stakey)
try:
if ifverbose:
print_rotation_parameters(substr)
substr.rotate('NE->RT')
if ifverbose:
print_rotation_parameters(substr)
# Fix cmpaz metadata for Radial and Transverse components
for tr in substr.traces:
if tr.stats.channel[-1] == 'R':
tr.stats.sac['kcmpnm'] = tr.stats.channel[0:2] + 'R'
tr.stats.sac['cmpaz'] = tr.stats.sac['az']
elif tr.stats.channel[-1] == 'T':
tr.stats.sac['kcmpnm'] = tr.stats.channel[0:2] + 'T'
tr.stats.sac['cmpaz'] = tr.stats.sac['az']+90.0
if tr.stats.sac['cmpaz'] > 360.0:
tr.stats.sac['cmpaz'] += -360
# Now Write
# 20160805 <EMAIL> -- some llnl stations have traces with
# multiple channel types. The | |
import argparse
import collections
import copy
import json
import os
import pprint
import re
from operator import itemgetter
from warnings import warn
import pandas as pd
import plotly
import plotly.graph_objs as go
from docopt import docopt
from matplotlib import cm
from matplotlib.colors import rgb2hex
from munkres import Munkres
import commonFunctions
LINENUM = "lineNum"
STMT_PENALTY = 5
class Block:
""" Class to represent information about a route-policy term/clause.
:ivar lineNum: The line number in the sequence of terms.
:ivar termJson: The representation of a parsed term in JSON.
"""
def __init__(self, lineNum, guard, trueStatements):
self.guardCmds = list()
self.trueCmds = list()
self.action = {}
# Default actions is taken as permit (then next term)
self.action["type"] = " permit "
self.action[LINENUM] = lineNum[0]
lineNum[0] += 1
if guard:
if "Conjunction" in guard["class"] and "conjuncts" in guard:
for cmd in guard["conjuncts"]:
innerConjuncts = False
if "Disjunction" in cmd["class"]:
copyCmd = self.checkGuardCmdSyntax(self.flattenDisjuncts(cmd["disjuncts"]))
elif "Conjunction" in cmd["class"] and "conjuncts" in cmd:
innerConjuncts = True
# TODO: Remove duplicate guard cmds generated by conjuncts
for cmd2 in cmd["conjuncts"]:
copyCmd = self.checkGuardCmdSyntax((cmd2))
copyCmd[LINENUM] = lineNum[0]
lineNum[0] += 1
self.guardCmds.append(copyCmd)
else:
copyCmd = self.checkGuardCmdSyntax((cmd))
if not innerConjuncts:
copyCmd[LINENUM] = lineNum[0]
lineNum[0] += 1
self.guardCmds.append(copyCmd)
elif "Disjunction" in guard["class"]:
copyCmd = self.checkGuardCmdSyntax(self.flattenDisjuncts(guard["disjuncts"]))
copyCmd[LINENUM] = lineNum[0]
lineNum[0] += 1
self.guardCmds.append(copyCmd)
else:
copyCmd = self.checkGuardCmdSyntax(guard)
copyCmd[LINENUM] = lineNum[0]
lineNum[0] += 1
self.guardCmds.append(copyCmd)
if trueStatements:
for stmt in trueStatements:
# Cisco/Juniper Last Sentence
# Buffered Statement found in CISCO_IOS_XR
if "BufferedStatement" in stmt["class"]:
stmt = stmt["statement"]
if "Statements$StaticStatement" in stmt["class"]:
if "True" in stmt["type"] or "Accept" in stmt["type"]:
self.action["type"] = " permit "
elif "False" in stmt["type"] or "Reject" in stmt["type"]:
self.action["type"] = " deny "
else:
warn("unKNOWN Static Statement")
#Fall through is taken as Permit
self.action["type"] = " permit "
# Juniper Last Sentence
elif "If" in stmt["class"]:
if "Statements$StaticStatement" in stmt["trueStatements"][0]["class"]:
if "True" in stmt["trueStatements"][0]["type"] or "Accept" in stmt["trueStatements"][0]["type"]:
self.action["type"] = " permit "
elif "False" in stmt["trueStatements"][0]["type"] or "Reject" in stmt["trueStatements"][0]["type"]:
self.action["type"] = " deny "
else:
warn("unhandled Static Statement in Juniper")
#Fall through is taken as Permit for noe
self.action["type"] = " permit "
else:
warn("unhandled Static Statement in Juniper")
# "Juniper will not commit this configuration: 'then community add ANCHOR' is not valid because ANCHOR does not contain any literal communities"
elif "Comment" in stmt["class"]:
pass
elif "PrependAsPath" in stmt["class"] and "LiteralAsList" in stmt["expr"]["class"]:
copyCmd = copy.deepcopy(stmt)
copyCmd[LINENUM] = lineNum[0]
lineNum[0] += 1
aslist = []
for asN in copyCmd["expr"]["list"]:
aslist.append(asN["as"])
copyCmd["expr"]["list"] = aslist
self.trueCmds.append(copyCmd)
elif "SetCommunities" in stmt["class"]:
# Juniper Communities
copyCmd = copy.deepcopy(stmt)
copyCmd[LINENUM] = lineNum[0]
lineNum[0] += 1
if "exprs" in copyCmd["communitySetExpr"]:
names = list()
for n in copyCmd["communitySetExpr"]["exprs"]:
if "name" in n:
names.append(n["name"])
del copyCmd["communitySetExpr"]["exprs"]
copyCmd["communitySetExpr"]["name"] = names
if "removalCriterion" in copyCmd["communitySetExpr"]:
copyCmd["communitySetExpr"]["name"] = copyCmd["communitySetExpr"]["removalCriterion"]["name"]
del copyCmd["communitySetExpr"]["removalCriterion"]
self.trueCmds.append(copyCmd)
else:
copyCmd = copy.deepcopy(stmt)
copyCmd[LINENUM] = lineNum[0]
lineNum[0] += 1
self.trueCmds.append(copyCmd)
def checkGuardCmdSyntax(self, cmd):
if "ConjunctionChain" in cmd["class"]:
# Juniper Other policy calls
if "subroutines" in cmd:
called = []
for sub in cmd["subroutines"]:
if "calledPolicyName" in sub:
called.append(sub["calledPolicyName"])
if called:
copyCmd = copy.deepcopy(cmd)
copyCmd["subroutines"] = called
return copyCmd
elif "MatchAsPath" in cmd["class"] and "ExplicitAsPathSet" in cmd["expr"]["class"]:
# Juniper AS-PATH
copyCmd = copy.deepcopy(cmd)
newElem = list()
for ele in copyCmd["expr"]["elems"]:
if "regex" in ele:
newElem.append(ele["regex"])
else:
warn("Unknown ExplicitAsPathSet class")
del copyCmd["expr"]["elems"]
copyCmd["expr"]["elems"] = newElem
return copyCmd
return copy.deepcopy(cmd)
def flattenDisjuncts(self, disjuncts):
"""
Assumption : In a disjunct List, there can be disjuncts or the entity.
In the entity, there is only one more layer of depth
"""
flatcmd = {}
for cmd in disjuncts:
if "Disjunction" in cmd["class"] or "Conjunction" in cmd["class"]:
warn(
"Unhandled flatten Disjuncts - Disjunction/Conjunction in a Disjunction encountered")
else:
for key in cmd:
if key not in flatcmd:
flatcmd[key] = cmd[key]
else:
if flatcmd[key] != cmd[key]:
if isinstance(flatcmd[key], dict):
for k in cmd[key]:
if k not in flatcmd[key]:
flatcmd[key][k] = cmd[key][k]
elif flatcmd[key][k] != cmd[key][k]:
mergedValues = list()
if isinstance(flatcmd[key][k], list):
mergedValues.extend(
flatcmd[key][k])
else:
mergedValues.append(
flatcmd[key][k])
mergedValues.append(cmd[key][k])
flatcmd[key][k] = mergedValues
elif isinstance(flatcmd[key], list):
mergedValues = list()
mergedValues.extend(flatcmd[key])
mergedValues.extend(cmd[key])
flatcmd[key] = mergedValues
else:
warn("Flatten Disjuncts - non-dict/list encountered")
return flatcmd
class RoutePolicy:
""" Class to represent information about a route policy.
:ivar format: Configuration format of the router
:ivar name: Name of the route policy
:ivar router: Router in which the route policy is defined
:ivar routePolicyJson: The representation of the parsed route policy in JSON format.
"""
def __init__(self, name, router, routePolicyJson, format_):
self.format = format_
self.deviceName = router
self.name = name
self.blocks = list()
self.generateClauses(routePolicyJson, [0])
def generateClauses(self, routePolicyJson, lineNum):
if ("CISCO" in self.format.upper() or "ARISTA" in self.format.upper()) and len(routePolicyJson) > 0:
# If there is a IF clause
if "If" in routePolicyJson[0]["class"]:
self.blocks.append(
Block(lineNum, routePolicyJson[0]["guard"], routePolicyJson[0]["trueStatements"]))
if "falseStatements" in routePolicyJson[0]:
self.generateClauses(
routePolicyJson[0]["falseStatements"], lineNum)
# If it is the ending clause, it is ignored in syntatic version.
elif "Statements$StaticStatement" in routePolicyJson[0]["class"] and "ReturnLocalDefaultAction" in routePolicyJson[0]["type"]:
pass
# Else all are just true statements without guardCmds
else:
self.blocks.append(Block(lineNum, None, routePolicyJson))
if "JUNIPER" in self.format.upper():
soFarstmts = list()
for stmt in routePolicyJson:
if "If" in stmt["class"]:
# The last IF which doesn't have True Statement is a filler for now and is ignored in syntatic version.
if "trueStatements" not in stmt or len(stmt["trueStatements"]) == 0:
if len(soFarstmts) > 0:
self.blocks.append(
Block(lineNum, None, soFarstmts))
soFarstmts = list()
# The last rule might actually have no guard in which case all the set commands are without IF statements.
elif "Statements$StaticStatement" in stmt["trueStatements"][0]["class"]:
if 'BooleanExprs$StaticBooleanExpr' in stmt["guard"]["class"]:
soFarstmts.extend(stmt["trueStatements"])
self.blocks.append(
Block(lineNum, None, soFarstmts))
soFarstmts = list()
else:
if len(soFarstmts) > 0:
self.blocks.append(
Block(lineNum, None, soFarstmts))
soFarstmts = list()
self.blocks.append(
Block(lineNum, stmt["guard"], None))
else:
#In Juniper there are terms without any action to apply the fall through semantics, but we take them as one clause with permit.
if len(soFarstmts) > 0:
self.blocks.append(
Block(lineNum, None, soFarstmts))
soFarstmts = list()
self.blocks.append(
Block(lineNum, stmt["guard"], stmt["trueStatements"]))
else:
if "type" not in stmt or 'SetDefaultActionReject' not in stmt["type"]:
soFarstmts.append(stmt)
def GetBlockSequence(device, deviceInfo, pattern, foundDevices, emptyDefDevices, exactDefMatchMap):
""" Generates block sequence for route policy from the parsed JSON object.
:ivar device: The name of the device.
:ivar deviceInfo: The JSON model of the configuration.
:ivar pattern: The routepolicy pattern that is templated.
:ivar foundDevices: The set of devices which have at least one routepolicy matching the pattern.
:ivar emptyDefDevices: The set of devices that have an empty definition for the routepolicy.
:ivar exactDefMatchMap: The bookkeeping used for exact equality optimization.
"""
patternMatchPolicies = []
patternMatchPoliciesLineCounts = []
if deviceInfo.get("routingPolicies"):
routePolicies = deviceInfo.get("routingPolicies")
for policyName in routePolicies:
if pattern.match(policyName):
if device in foundDevices:
rname = device + "#" + policyName
else:
rname = device
routePolicy = RoutePolicy(
policyName, rname, routePolicies[policyName]["statements"], deviceInfo['configurationFormat'])
if len(routePolicy.blocks) > 0:
foundDevices.add(rname)
if not commonFunctions.checkJSONEquality(exactDefMatchMap, routePolicies[policyName], rname):
if len(routePolicy.blocks[-1].trueCmds) > 0:
totalLines = routePolicy.blocks[-1].trueCmds[-1][LINENUM]
elif len(routePolicy.blocks[-1].guardCmds) > 0:
totalLines = routePolicy.blocks[-1].guardCmds[-1][LINENUM]
else:
totalLines = routePolicy.blocks[-1].action[LINENUM]
patternMatchPolicies.append(routePolicy)
patternMatchPoliciesLineCounts.append(totalLines)
else:
emptyDefDevices.add(rname)
return patternMatchPolicies, patternMatchPoliciesLineCounts
def GapPenalty(block):
"""Returns the score for matching the input block with a gap."""
return len(block.guardCmds)*STMT_PENALTY + \
len(block.trueCmds) * STMT_PENALTY
def LineSequence(block):
"""Returns the line sequences for a block - returns guard and true cmds as a list of lists."""
combinedCmds = []
combinedCmds.append(block.guardCmds)
combinedCmds.append(block.trueCmds)
return combinedCmds
def ConvertToString(value):
if isinstance(value, str):
return [value]
elif isinstance(value, int):
return [str(value)]
elif isinstance(value, list):
newlist = []
for v in value:
newlist.append(str(v))
return newlist
else:
raise TypeError("Value other than int, list and string found!!")
def LineScoreHelper(value1, value2, paramValueMap):
score = abs(len(value1)-len(value2))*STMT_PENALTY
templateValues = copy.deepcopy(value1)
for v in value2:
if v in templateValues:
templateValues.remove(v)
else:
found = False
for p in templateValues:
if p in paramValueMap:
if v not in paramValueMap[p]:
score += (STMT_PENALTY/2)
templateValues.remove(p)
found = True
break
if not found:
score += STMT_PENALTY
return score
def LineScore(cmd, stmt, paramValueMap):
"""
Given two cmds, this functions returns their penalty score
"""
score = 0
if cmd["class"] != stmt["class"]:
return commonFunctions.INFINITY
else:
for key in cmd:
if key != LINENUM:
if isinstance(cmd[key], dict):
# Assuming only depth of 1.
if "class" | |
<gh_stars>0
# Omid55
# Test module for group dynamics logs library.
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import unittest
import numpy as np
import pandas as pd
from pandas import testing as pd_testing
from numpy import testing as np_testing
from parameterized import parameterized
import groupdynamics_logs_lib as gll
import utils
class TestTeamLogsLoaderLoad(unittest.TestCase):
@classmethod
def setUp(cls):
cls.loader = gll.TeamLogsLoader(
directory=os.getcwd() + '/src/testing_log/with_confidence')
cls.loader_no_confidence = gll.TeamLogsLoader(
directory=os.getcwd() + '/src/testing_log/without_confidence')
@classmethod
def tearDown(cls):
del cls.loader
del cls.loader_no_confidence
# =========================================================================
# ================================ _load ==================================
# =========================================================================
def test_load_answers_are_correct(self):
expected_answers = pd.DataFrame({
"sender":{0:"pogs10.1",1:"pogs10.1",2:"pogs10.2",3:"pogs10.2",4:"pogs10.1",5:"pogs10.1",6:"pogs10.2",7:"pogs10.2",8:"pogs10.2",9:"pogs10.1",10:"pogs10.1",11:"pogs10.2",12:"pogs10.2",13:"pogs10.2",14:"pogs10.1",15:"pogs10.2",16:"pogs10.2"},
"question":{0:"GD_solo_surgery0",1:"GD_solo_surgery0",2:"GD_solo_surgery0",3:"GD_solo_surgery0",4:"GD_solo_surgery1",5:"GD_solo_surgery1",6:"GD_solo_surgery1",7:"GD_solo_surgery2",8:"GD_solo_surgery2",9:"GD_solo_surgery3",10:"GD_solo_surgery3",11:"GD_solo_surgery3",12:"GD_solo_surgery3",13:"GD_solo_surgery3",14:"GD_solo_sports0",15:"GD_solo_sports0",16:"GD_solo_sports0"},
"input":{0:"answer",1:"confidence",2:"answer",3:"confidence",4:"answer",5:"confidence",6:"answer",7:"answer",8:"confidence",9:"answer",10:"confidence",11:"answer",12:"confidence",13:"answer",14:"confidence",15:"answer",16:"confidence"},
"value":{0:"0.7",1:"79%",2:"0.5",3:"55%",4:"0.8",5:"88.88%",6:"0.6",7:"1",8:"100%",9:"0.85",10:"90%",11:"0.85",12:"100%",13:"0.8",14:"50%",15:"0.1111",16:"10"},
"timestamp":{0:"2020-01-16 14:10:22",1:"2020-01-16 14:10:32",2:"2020-01-16 14:10:34",3:"2020-01-16 14:10:41",4:"2020-01-16 14:14:34",5:"2020-01-16 14:14:38",6:"2020-01-16 14:14:41",7:"2020-01-16 14:18:39",8:"2020-01-16 14:18:42",9:"2020-01-16 14:21:50",10:"2020-01-16 14:21:54",11:"2020-01-16 14:21:56",12:"2020-01-16 14:21:59",13:"2020-01-16 14:22:05",14:"2020-01-16 14:24:08",15:"2020-01-16 14:24:20",16:"2020-01-16 14:24:28"}},
columns=['sender', 'question', 'input', 'value', 'timestamp'])
pd_testing.assert_frame_equal(
expected_answers, self.loader.answers)
def test_load_influences_are_correct(self):
expected_influences = pd.DataFrame({
"sender":{0:"pogs10.1",1:"pogs10.2",2:"pogs10.2",3:"pogs10.1",4:"pogs10.1",5:"pogs10.2",6:"pogs10.2"},
"question":{0:"GD_influence_surgery1",1:"GD_influence_surgery1",2:"GD_influence_surgery1",3:"GD_influence_surgery2",4:"GD_influence_surgery2",5:"GD_influence_surgery2",6:"GD_influence_surgery2"},
"input":{0:"self",1:"self",2:"other",3:"self",4:"other",5:"self",6:"other"},
"value":{0:"90",1:"51",2:"49",3:"1",4:"99",5:"100",6:"0"},
"timestamp":{0:"2020-01-16 14:15:11",1:"2020-01-16 14:15:20",2:"2020-01-16 14:15:22",3:"2020-01-16 14:19:07",4:"2020-01-16 14:19:09",5:"2020-01-16 14:19:10",6:"2020-01-16 14:19:12"}},
columns=['sender', 'question', 'input', 'value', 'timestamp'])
pd_testing.assert_frame_equal(
expected_influences, self.loader.influences)
def test_load_frustrations_are_correct(self):
expected_frustrations = pd.DataFrame({
"sender":{0:"pogs10.2",1:"pogs10.1",2:"pogs10.1",3:"pogs10.2"},
"question":{0:"GD_frustration_surgery",1:"GD_frustration_surgery",2:"GD_frustration_surgery",3:"GD_frustration_surgery"},
"value":{0:"[\"Yes\",\"\",\"\",\"\"]",1:"[\"\",\"No\",\"\",\"\"]",2:"0",3:"5"},
"timestamp":{0:"2020-01-16 14:22:48",1:"2020-01-16 14:22:59",2:"2020-01-16 14:23:07",3:"2020-01-16 14:23:09"}},
columns=['sender', 'question', 'value', 'timestamp'])
pd_testing.assert_frame_equal(
expected_frustrations, self.loader.frustrations)
def test_load_messages_are_correct(self):
expected_messages = pd.DataFrame({
"sender":{0:"pogs10.1",1:"pogs10.2",2:"pogs10.2",3:"pogs10.2",4:"pogs10.1",5:"pogs10.1",6:"pogs10.2"},
"question":{0:"GD_group_surgery1",1:"GD_group_surgery1",2:"GD_group_surgery1",3:"GD_group_surgery1",4:"GD_group_surgery2",5:"GD_group_sports1",6:"GD_group_sports1"},
"text":{0:"Hello there",1:"Hi!!!",2:"I have no clue",3:":)",4:"sup?",5:"bye!",6:"BYE"},
"timestamp":{0:"2020-01-16 14:12:20",1:"2020-01-16 14:12:23",2:"2020-01-16 14:12:39",3:"2020-01-16 14:12:56",4:"2020-01-16 14:17:02",5:"2020-01-16 14:26:04",6:"2020-01-16 14:26:10"}},
columns=['sender', 'question', 'text', 'timestamp'])
pd_testing.assert_frame_equal(
expected_messages, self.loader.messages)
def test_load_answers_are_correct_in_log_with_no_confidence(self):
expected_answers = pd.DataFrame([
{"sender":"subj1","question":"GD_solo_disaster0","input":"answer","value":"50","timestamp":"2020-03-04 18:38:42"},
{"sender":"subj2","question":"GD_solo_disaster0","input":"answer","value":"200","timestamp":"2020-03-04 18:38:51"},
{"sender":"subj1","question":"GD_solo_disaster1","input":"answer","value":"55","timestamp":"2020-03-04 18:42:58"},
{"sender":"subj2","question":"GD_solo_disaster1","input":"answer","value":"1000 mil","timestamp":"2020-03-04 18:43:02"},
{"sender":"subj1","question":"GD_solo_disaster2","input":"answer","value":"100","timestamp":"2020-03-04 18:47:08"},
{"sender":"subj2","question":"GD_solo_disaster2","input":"answer","value":"$88","timestamp":"2020-03-04 18:47:18"}],
columns=['sender', 'question', 'input', 'value', 'timestamp'])
pd_testing.assert_frame_equal(
expected_answers, self.loader_no_confidence.answers)
def test_load_influences_are_correct_in_log_with_no_confidence(self):
expected_influences = pd.DataFrame([
{"sender":"subj1","question":"GD_influence_disaster1","input":"self","value":"100","timestamp":"2020-03-04 18:43:47"},
{"sender":"subj2","question":"GD_influence_disaster1","input":"self","value":"99","timestamp":"2020-03-04 18:43:54"},
{"sender":"subj2","question":"GD_influence_disaster1","input":"other","value":"1","timestamp":"2020-03-04 18:43:57"},
{"sender":"subj2","question":"GD_influence_disaster2","input":"self","value":"50","timestamp":"2020-03-04 18:47:43"},
{"sender":"subj2","question":"GD_influence_disaster2","input":"other","value":"55","timestamp":"2020-03-04 18:47:45"},
{"sender":"subj2","question":"GD_influence_disaster2","input":"self","value":"45","timestamp":"2020-03-04 18:47:46"}],
columns=['sender', 'question', 'input', 'value', 'timestamp'])
pd_testing.assert_frame_equal(
expected_influences, self.loader_no_confidence.influences)
def test_load_messages_are_correct_in_log_with_no_confidence(self):
expected_messages = pd.DataFrame([
{"sender":"subj1","question":"GD_group_disaster1","text":"hello","timestamp":"2020-03-04 18:40:50"},
{"sender":"subj2","question":"GD_group_disaster1","text":"hi there","timestamp":"2020-03-04 18:40:54"},
{"sender":"subj2","question":"GD_group_disaster1","text":"sup???","timestamp":"2020-03-04 18:41:58"},
{"sender":"subj1","question":"GD_group_disaster2","text":"cooooooooool","timestamp":"2020-03-04 18:45:26"}],
columns=['sender', 'question', 'text', 'timestamp'])
pd_testing.assert_frame_equal(
expected_messages, self.loader_no_confidence.messages)
# =========================================================================
# =================== get_answers_in_simple_format ========================
# =========================================================================
def test_get_answers_in_simple_format(self):
expected = pd.DataFrame({
"Question":{0:"sports0",1:"surgery0",2:"surgery1",3:"surgery2",4:"surgery3"},
"pogs10.1's answer":{0:"",1:"0.7",2:"0.8",3:"",4:"0.85"},
"pogs10.1's confidence":{0:"50%",1:"79%",2:"88.88%",3:"",4:"90%"},
"pogs10.2's answer":{0:"0.1111",1:"0.5",2:"0.6",3:"1",4:"0.8"},
"pogs10.2's confidence":{0:"10",1:"55%",2:"",3:"100%",4:"100%"}})
computed = self.loader.get_answers_in_simple_format()
pd_testing.assert_frame_equal(expected, computed)
# =========================================================================
# ===================== get_influence_matrices2x2 ==========================
# =========================================================================
def test_get_influence_matrices2x2(self):
expected_question_orders = ['surgery1', 'surgery2']
expected_influence_matrices = [
np.array([[0.9, 0.1],
[0.49, 0.51]]),
np.array([[0.01, 0.99],
[0.0, 1.0]])]
expected_influences_from_data = [
np.array([[True, False], [True, True]]),
np.array([[True, True], [True, True]])
]
computed_questions_order, computed_influence_matrices, computed_influences_from_data = (
self.loader.get_influence_matrices2x2(make_it_row_stochastic=True))
np_testing.assert_array_equal(
expected_question_orders, computed_questions_order)
np_testing.assert_array_equal(
expected_influence_matrices,
computed_influence_matrices)
np_testing.assert_array_equal(
expected_influences_from_data,
computed_influences_from_data)
@parameterized.expand([
['with_one_missing',
pd.DataFrame({
"sender":{"0":"pogs10.1","1":"pogs10.2","2":"pogs10.2"},
"question":{"0":"GD_influence_surgery1","1":"GD_influence_surgery1","2":"GD_influence_surgery1"},
"input":{"0":"self","1":"self","2":"other"},"value":{"0":"90","1":"51","2":"49"},
"timestamp":{"0":"2020-01-16 14:15:11","1":"2020-01-16 14:15:20","2":"2020-01-16 14:15:22"}}),
[np.array([[0.9, 0.1],
[0.49, 0.51]])],
],
['with_one_missing_and_one_empty',
pd.DataFrame({
"sender":{"0":"pogs10.1","1":"pogs10.2","2":"pogs10.2"},
"question":{"0":"GD_influence_surgery1","1":"GD_influence_surgery1","2":"GD_influence_surgery1"},
"input":{"0":"self","1":"self","2":"other"},"value":{"0":"","1":"51","2":"49"},
"timestamp":{"0":"2020-01-16 14:15:11","1":"2020-01-16 14:15:20","2":"2020-01-16 14:15:22"}}),
[np.array([[0.5, 0.5],
[0.49, 0.51]])],
],
['with_only_one',
pd.DataFrame({
"sender":{"0":"pogs10.1"},
"question":{"0":"GD_influence_surgery1"},
"input":{"0":"self"},"value":{"0":"50",},
"timestamp":{"0":"2020-01-16 14:15:11"}}),
[np.array([[0.50, 0.50],
[0.50, 0.50]])],
],
['with_larger_values',
pd.DataFrame({
"sender":{"0":"pogs10.1","1":"pogs10.2","2":"pogs10.2"},
"question":{"0":"GD_influence_surgery1","1":"GD_influence_surgery1","2":"GD_influence_surgery1"},
"input":{"0":"self","1":"self","2":"other"},"value":{"0":"","1":"60","2":"90"},
"timestamp":{"0":"2020-01-16 14:15:11","1":"2020-01-16 14:15:20","2":"2020-01-16 14:15:22"}}),
[np.array([[0.5, 0.5],
[0.6, 0.4]])],
],
['with_duplicate_due_to_change_of_value',
pd.DataFrame({
"sender":{"0":"pogs10.1","1":"pogs10.2","2":"pogs10.2"},
"question":{"0":"GD_influence_surgery1","1":"GD_influence_surgery1","2":"GD_influence_surgery1"},
"input":{"0":"self","1":"self","2":"other"},
"value":{"0":"5","1":"51","2":"49"},
"timestamp":{"0":"2020-01-16 14:15:12","1":"2020-01-16 14:15:20","2":"2020-01-16 14:15:22"}}),
[np.array([[0.05, 0.95],
[0.49, 0.51]])],
]])
def test_get_influence_matrices2x2_mocked(self, name, influences, expected_influence_matrices):
self.loader.influences = influences
_, computed_influence_matrices, _ = (
self.loader.get_influence_matrices2x2(make_it_row_stochastic=True))
np_testing.assert_array_equal(
expected_influence_matrices,
computed_influence_matrices)
# =========================================================================
# ============== get_frustrations_in_simple_format ========================
# =========================================================================
def test_get_frustrations_in_simple_format(self):
expected = pd.DataFrame({
"Question":{0: "surgery"},
"pogs10.1's answer":{0: "0"},
"pogs10.2's answer":{0: "5"}})
computed = self.loader.get_frustrations_in_simple_format()
pd_testing.assert_frame_equal(expected, computed)
# =========================================================================
# =============== get_all_groups_info_in_one_dataframe ====================
# =========================================================================
def test_get_all_groups_info_in_one_dataframe(self):
teams_log_list = {'s10': self.loader}
dt = [
['s10', '1', 'asbestos', '', '', '', '', '', '', '', '', '', ''],
['s10', '1', 'disaster', '', '', '', '', '', '', '', '', '', ''],
['s10', '1', 'sports', '', '', '', '', '', '', '', '', '', ''],
['s10', '1', 'school', '', '', '', '', '', '', '', '', '', ''],
['s10', '1', 'surgery', '0.7', '0.8', '0.9', '0.1', '', '0.01', '0.99', '0.85', '', ''],
['s10', '2', 'asbestos', '', '', '', '', '', '', '', '', '', ''],
['s10', '2', 'disaster', '', '', '', '', '', '', '', '', '', ''],
['s10', '2', 'sports', '0.1111', '', '', '', '', '', '', '', '', ''],
['s10', '2', 'school', '', '', '', '', '', '', '', '', '', ''],
['s10', '2', 'surgery', '0.5', '0.6', '0.51', '0.49', '1', '1.0', '0.0', '0.8', '', '']]
expected = pd.DataFrame(dt, columns = [
'Group', 'Person', 'Issue', 'Initial opinion',
'Period1 opinion', 'Period1 wii', 'Period1 wij',
'Period2 opinion', 'Period2 wii', 'Period2 wij',
'Period3 opinion', 'Period3 wii', 'Period3 wij'])
computed = gll.get_all_groups_info_in_one_dataframe(
teams_log_list)
pd_testing.assert_frame_equal(expected, computed)
# =========================================================================
# ===================== compute_attachment ================================
# =========================================================================
# ====================== to initial opinion ===============================
def test_compute_attachment_raises_when_not_matching_opinions(self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7]
w12 = [0.1, 0.0, 0.2]
with self.assertRaises(ValueError):
gll.compute_attachment(x1, x2, w12, gll.AttachmentType.TO_INITIAL)
def test_compute_attachment_raises_when_not_matching_opinions_influence(
self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7, 0.5]
w12 = [0.1, 0.0]
with self.assertRaises(ValueError):
gll.compute_attachment(x1, x2, w12, gll.AttachmentType.TO_INITIAL)
def test_compute_attachment_raises_when_start_k_was_not_0_or_1(
self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7, 0.5]
w12 = [0.1, 0.0]
with self.assertRaises(ValueError):
gll.compute_attachment(
x1, x2, w12, start_k=-1,
to_opinion=gll.AttachmentType.TO_INITIAL)
def test_compute_attachment_to_initial_op_when_denom_to_sum_almost_zero(
self):
x1 = [0.8, 0.85, 0.92, 0.92]
x2 = [0.6, 0.6, 0.7, 0.7]
w12 = [0.1, 0.2, 0.1]
expected_a11 = [
(0.85 - 0.8) / (0.1 * (0.6 - 0.8)),
np.nan, # (0.92 - 0.8) / (0.85 - 0.8 + 0.2 * (0.6 - 0.85)),
(0.92 - 0.8) / (0.92 - 0.8 + 0.1 * (0.7 - 0.92))
]
expected_details = [{}, {'n/0': 1}, {}]
computed_a11, details = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, eps=0,
to_opinion=gll.AttachmentType.TO_INITIAL)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
utils.assert_dict_equals({'1': expected_details}, {'1': details})
def test_compute_attachment_to_initial_opinion(self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7, 0.5]
w12 = [0.1, 0.0, 0.2]
expected_a11 = [0.1/0.08, 0.5/0.1, 0.3/0.52]
expected_details = [{}, {}, {}]
computed_a11, details = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, eps=0,
to_opinion=gll.AttachmentType.TO_INITIAL)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
utils.assert_dict_equals({'1': expected_details}, {'1': details})
def test_compute_attachment_when_start_k_equals_1(self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7, 0.5]
w12 = [0.1, 0.0, 0.2]
expected_a11 = [0.5/0.1, 0.3/0.52]
computed_a11, _ = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, start_k=1, eps=0,
to_opinion=gll.AttachmentType.TO_INITIAL)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
def test_compute_attachment_when_division_by_zero(self):
x1 = [0.2, 0.2, 0.2]
x2 = [0.4, 0.4, 0.4]
w12 = [0.1, 0.0]
expected_a11 = [0 / 0.02,
np.nan]
computed_a11, _ = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, eps=0,
to_opinion=gll.AttachmentType.TO_INITIAL)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
def test_compute_attachment_to_initial_when_division_by_zero_with_eps(self):
x1 = [0.2, 0.2, 0.2]
x2 = [0.4, 0.4, 0.4]
w12 = [0.1, 0.0]
eps = 0.01
expected_a11 = [0 / (0.02 + eps),
0 / (0 + eps)]
computed_a11, _ = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, eps=eps,
to_opinion=gll.AttachmentType.TO_INITIAL)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
# ====================== to previous opinion ==============================
def test_compute_attachment_before_disc_raises_when_not_matching_opinions(
self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7]
w12 = [0.1, 0.0, 0.2]
with self.assertRaises(ValueError):
gll.compute_attachment(x1, x2, w12,
to_opinion=gll.AttachmentType.TO_PREVIOUS)
def test_compute_attachment_bef_disc_raises_when_not_matching_op_influence(
self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7, 0.5]
w12 = [0.1, 0.0]
with self.assertRaises(ValueError):
gll.compute_attachment(x1, x2, w12,
to_opinion=gll.AttachmentType.TO_PREVIOUS)
def test_compute_attachment_before_disc_raises_when_start_k_was_not_0_or_1(
self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7, 0.5]
w12 = [0.1, 0.0]
with self.assertRaises(ValueError):
gll.compute_attachment(
x1, x2, w12, start_k=-1,
to_opinion=gll.AttachmentType.TO_PREVIOUS)
def test_compute_attachment_to_previous_opinion(self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7, 0.5]
w12 = [0.1, 0.0, 0.2]
expected_a11 = [
0.1/0.08,
(0.6-0.1)/(0.2-0.1),
(0.4-0.2)/(0.6-0.2+0.2*(0.7-0.6))]
computed_a11, _ = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, eps=0,
to_opinion=gll.AttachmentType.TO_PREVIOUS)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
def test_compute_attachment_to_opinion_before_disc_when_start_k_equals_1(
self):
x1 = [0.1, 0.2, 0.6, 0.4]
x2 = [0.9, 0.4, 0.7, 0.5]
w12 = [0.1, 0.0, 0.2]
expected_a11 = [(0.6-0.1)/(0.2-0.1),
(0.4-0.2)/(0.6-0.2+0.2*(0.7-0.6))]
computed_a11, _ = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, start_k=1, eps=0,
to_opinion=gll.AttachmentType.TO_PREVIOUS)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
def test_compute_attachment_when_div_by_zero(
self):
x1 = [0.2, 0.2, 0.2]
x2 = [0.4, 0.4, 0.4]
w12 = [0.1, 0.0]
expected_a11 = [0, np.nan]
computed_a11, _ = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, eps=0,
to_opinion=gll.AttachmentType.TO_PREVIOUS)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
def test_compute_attachment_to_opinion_bef_disc_when_div_by_zero_with_eps(
self):
x1 = [0.2, 0.2, 0.2]
x2 = [0.4, 0.4, 0.4]
w12 = [0.1, 0.0]
eps = 0.01
expected_a11 = [(0.2 - 0.2) / (0.1 * (0.4 - 0.2) + eps),
0 / eps]
computed_a11, _ = gll.compute_attachment(
xi=x1, xj=x2, wij=w12, eps=eps,
to_opinion=gll.AttachmentType.TO_PREVIOUS)
np_testing.assert_array_almost_equal(expected_a11, computed_a11)
# =========================================================================
# ================== compute_all_teams_attachments ========================
# =========================================================================
def test_compute_all_teams_attachments(self):
# Attachment to the initial opinion.
teams_data = {
55: {
'asbestos': {
'w12': [0.1, 0.0, 0.2],
'w21': [0.0, 0.0, 0.0],
'x1': [0.1, 0.2, 0.6, 0.4],
'x2': [0.9, 0.4, 0.7, 0.5]},
'surgery': {
'w12': [0.35, 0.4, 0.5],
'w21': [0.25, 0.3, 0.3],
'x1': [0.6, 0.65, 0.7, 0.7],
'x2': [0.75, 0.5, 0.6, 0.7]}}}
expected_attachments = {
55: {
'asbestos': {
'a11': [0.1/0.08, 0.5/0.1, 0.3/0.52],
'a22': [np.nan, -0.2/-0.5, -0.4/-0.2], # Nan was -0.5/0.
'a11_nan_details': [{}, {}, {}],
'a22_nan_details': [
{'n/0': 1, 'xi[k]-xi[0]==0': 1, 'wij[k]==0': 1}, {}, {}]
| |
DialogResult """
pass
def ShowModal(self):
""" ShowModal(self: LayerTransparencyDialog) -> Nullable[bool] """
pass
Percent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Percent(self: LayerTransparencyDialog) -> UInt32
Set: Percent(self: LayerTransparencyDialog) = value
"""
class LinetypeDialog(object):
""" LinetypeDialog() """
def ShowDialog(self):
""" ShowDialog(self: LinetypeDialog) -> DialogResult """
pass
def ShowModal(self):
""" ShowModal(self: LinetypeDialog) -> Nullable[bool] """
pass
IncludeByBlockByLayer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IncludeByBlockByLayer(self: LinetypeDialog) -> bool
Set: IncludeByBlockByLayer(self: LinetypeDialog) = value
"""
Linetype = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Linetype(self: LinetypeDialog) -> ObjectId
Set: Linetype(self: LinetypeDialog) = value
"""
class LineWeightDialog(object):
""" LineWeightDialog() """
def ShowDialog(self):
""" ShowDialog(self: LineWeightDialog) -> DialogResult """
pass
def ShowModal(self):
""" ShowModal(self: LineWeightDialog) -> Nullable[bool] """
pass
IncludeByBlockByLayer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IncludeByBlockByLayer(self: LineWeightDialog) -> bool
Set: IncludeByBlockByLayer(self: LineWeightDialog) = value
"""
LineWeight = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: LineWeight(self: LineWeightDialog) -> LineWeight
Set: LineWeight(self: LineWeightDialog) = value
"""
class MenuItem(Menu):
"""
MenuItem(value: str, icon: Icon)
MenuItem(value: str)
"""
@staticmethod # known case of __new__
def __new__(self, value, icon=None):
"""
__new__(cls: type, value: str, icon: Icon)
__new__(cls: type, value: str)
"""
pass
Checked = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Checked(self: MenuItem) -> bool
Set: Checked(self: MenuItem) = value
"""
Enabled = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Enabled(self: MenuItem) -> bool
Set: Enabled(self: MenuItem) = value
"""
Icon = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Icon(self: MenuItem) -> Icon
Set: Icon(self: MenuItem) = value
"""
Text = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Text(self: MenuItem) -> str
Set: Text(self: MenuItem) = value
"""
Visible = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Visible(self: MenuItem) -> bool
Set: Visible(self: MenuItem) = value
"""
Click = None
class MenuItemCollection(object):
""" MenuItemCollection(owner: Menu) """
def Add(self, value):
""" Add(self: MenuItemCollection, value: MenuItem) -> int """
pass
def Clear(self):
""" Clear(self: MenuItemCollection) """
pass
def Contains(self, value):
""" Contains(self: MenuItemCollection, value: MenuItem) -> bool """
pass
def CopyTo(self, array, index):
""" CopyTo(self: MenuItemCollection, array: Array[MenuItem], index: int) """
pass
def GetEnumerator(self):
""" GetEnumerator(self: MenuItemCollection) -> IEnumerator[IMenuItem] """
pass
def IndexOf(self, value):
""" IndexOf(self: MenuItemCollection, value: MenuItem) -> int """
pass
def Insert(self, index, value):
""" Insert(self: MenuItemCollection, index: int, value: MenuItem) """
pass
def Remove(self, value):
""" Remove(self: MenuItemCollection, value: MenuItem) """
pass
def RemoveAt(self, index):
""" RemoveAt(self: MenuItemCollection, index: int) """
pass
def __add__(self, *args): #cannot find CLR method
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, *args): #cannot find CLR method
""" __contains__(self: IList, value: object) -> bool """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args): #cannot find CLR method
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: Menu) """
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Count(self: MenuItemCollection) -> int
"""
IsFixedSize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsFixedSize(self: MenuItemCollection) -> bool
"""
IsReadOnly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsReadOnly(self: MenuItemCollection) -> bool
"""
class OpenFileDialog(object):
""" OpenFileDialog(title: str, defaultName: str, extension: str, dialogName: str, flags: OpenFileDialogFlags) """
def GetFilenames(self):
""" GetFilenames(self: OpenFileDialog) -> Array[str] """
pass
def ShowDialog(self):
""" ShowDialog(self: OpenFileDialog) -> DialogResult """
pass
def ShowModal(self):
""" ShowModal(self: OpenFileDialog) -> Nullable[bool] """
pass
@staticmethod # known case of __new__
def __new__(self, title, defaultName, extension, dialogName, flags):
""" __new__(cls: type, title: str, defaultName: str, extension: str, dialogName: str, flags: OpenFileDialogFlags) """
pass
Filename = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Filename(self: OpenFileDialog) -> str
"""
OpenFileDialogFlags = None
class OpenFileOrFolderDialog(object):
""" OpenFileOrFolderDialog(title: str, defaultName: str, extension: str, dialogName: str, flags: OpenFileDialogFlags) """
def ShowDialog(self):
""" ShowDialog(self: OpenFileOrFolderDialog) -> DialogResult """
pass
@staticmethod # known case of __new__
def __new__(self, title, defaultName, extension, dialogName, flags):
""" __new__(cls: type, title: str, defaultName: str, extension: str, dialogName: str, flags: OpenFileDialogFlags) """
pass
FileOrFoldername = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: FileOrFoldername(self: OpenFileOrFolderDialog) -> str
"""
class Palette(object):
# no doc
Name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Name(self: Palette) -> str
Set: Name(self: Palette) = value
"""
PaletteSet = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PaletteSet(self: Palette) -> PaletteSet
"""
class PaletteActivatedEventArgs(EventArgs):
""" PaletteActivatedEventArgs(activated: Palette, deactivated: Palette) """
@staticmethod # known case of __new__
def __new__(self, activated, deactivated):
""" __new__(cls: type, activated: Palette, deactivated: Palette) """
pass
Activated = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Activated(self: PaletteActivatedEventArgs) -> Palette
"""
Deactivated = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Deactivated(self: PaletteActivatedEventArgs) -> Palette
"""
class PaletteActivatedEventHandler(MulticastDelegate):
""" PaletteActivatedEventHandler(A_0: object, A_1: IntPtr) """
def BeginInvoke(self, sender, e, callback, obj):
""" BeginInvoke(self: PaletteActivatedEventHandler, sender: object, e: PaletteActivatedEventArgs, callback: AsyncCallback, obj: object) -> IAsyncResult """
pass
def EndInvoke(self, result):
""" EndInvoke(self: PaletteActivatedEventHandler, result: IAsyncResult) """
pass
def Invoke(self, sender, e):
""" Invoke(self: PaletteActivatedEventHandler, sender: object, e: PaletteActivatedEventArgs) """
pass
@staticmethod # known case of __new__
def __new__(self, A_0, A_1):
""" __new__(cls: type, A_0: object, A_1: IntPtr) """
pass
class PaletteAddContextMenuEventArgs(EventArgs):
""" PaletteAddContextMenuEventArgs(menuitems: List[MenuItem], removeMenuItems: List[int], nHitFlag: int, nRightClkTab: int) """
@staticmethod # known case of __new__
def __new__(self, menuitems, removeMenuItems, nHitFlag, nRightClkTab):
""" __new__(cls: type, menuitems: List[MenuItem], removeMenuItems: List[int], nHitFlag: int, nRightClkTab: int) """
pass
HitFlag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: HitFlag(self: PaletteAddContextMenuEventArgs) -> int
"""
MenuItems = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: MenuItems(self: PaletteAddContextMenuEventArgs) -> List[MenuItem]
"""
RemoveMenuItems = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: RemoveMenuItems(self: PaletteAddContextMenuEventArgs) -> List[int]
"""
RightClickTab = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: RightClickTab(self: PaletteAddContextMenuEventArgs) -> int
"""
class PaletteAddContextMenuEventHandler(MulticastDelegate):
""" PaletteAddContextMenuEventHandler(A_0: object, A_1: IntPtr) """
def BeginInvoke(self, sender, e, callback, obj):
""" BeginInvoke(self: PaletteAddContextMenuEventHandler, sender: object, e: PaletteAddContextMenuEventArgs, callback: AsyncCallback, obj: object) -> IAsyncResult """
pass
def EndInvoke(self, result):
""" EndInvoke(self: PaletteAddContextMenuEventHandler, result: IAsyncResult) """
pass
def Invoke(self, sender, e):
""" Invoke(self: PaletteAddContextMenuEventHandler, sender: object, e: PaletteAddContextMenuEventArgs) """
pass
@staticmethod # known case of __new__
def __new__(self, A_0, A_1):
""" __new__(cls: type, A_0: object, A_1: IntPtr) """
pass
class PaletteEnterSizeMoveEventArgs(EventArgs):
""" PaletteEnterSizeMoveEventArgs(bEnterSizeMove: bool) """
@staticmethod # known case of __new__
def __new__(self, bEnterSizeMove):
""" __new__(cls: type, bEnterSizeMove: bool) """
pass
EnterSizeMove = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: EnterSizeMove(self: PaletteEnterSizeMoveEventArgs) -> bool
"""
class PaletteEnterSizeMoveEventHandler(MulticastDelegate):
""" PaletteEnterSizeMoveEventHandler(A_0: object, A_1: IntPtr) """
def BeginInvoke(self, sender, e, callback, obj):
""" BeginInvoke(self: PaletteEnterSizeMoveEventHandler, sender: object, e: PaletteEnterSizeMoveEventArgs, callback: AsyncCallback, obj: object) -> IAsyncResult """
pass
def EndInvoke(self, result):
""" EndInvoke(self: PaletteEnterSizeMoveEventHandler, result: IAsyncResult) """
pass
def Invoke(self, sender, e):
""" Invoke(self: PaletteEnterSizeMoveEventHandler, sender: object, e: PaletteEnterSizeMoveEventArgs) """
pass
@staticmethod # known case of __new__
def __new__(self, A_0, A_1):
""" __new__(cls: type, A_0: object, A_1: IntPtr) """
pass
class PalettePersistEventArgs(EventArgs):
""" PalettePersistEventArgs(configurationSection: IConfigurationSection) """
@staticmethod # known case of __new__
def __new__(self, configurationSection):
""" __new__(cls: type, configurationSection: IConfigurationSection) """
pass
ConfigurationSection = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: ConfigurationSection(self: PalettePersistEventArgs) -> IConfigurationSection
"""
class PalettePersistEventHandler(MulticastDelegate):
""" PalettePersistEventHandler(A_0: object, A_1: IntPtr) """
def BeginInvoke(self, sender, e, callback, obj):
""" BeginInvoke(self: PalettePersistEventHandler, sender: object, e: PalettePersistEventArgs, callback: AsyncCallback, obj: object) -> IAsyncResult """
pass
def | |
<reponame>nitram509/nitram-micro-font<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
nitram_micro_mono_CP437 = [
0, 0, 0, 0, 0,
10, 0, 4, 17, 14,
10, 0, 0, 14, 17,
27, 31, 31, 14, 4,
0, 0, 0, 0, 0,
0, 4, 10, 4, 14,
4, 14, 14, 4, 14,
0, 14, 14, 14, 0,
0, 0, 0, 0, 0,
0, 4, 10, 4, 0,
0, 0, 0, 0, 0,
30, 28, 31, 21, 7,
5, 13, 31, 12, 4,
20, 22, 31, 6, 4,
15, 10, 10, 10, 5,
21, 14, 27, 14, 21,
4, 12, 28, 12, 4,
4, 6, 7, 6, 4,
4, 14, 4, 14, 4,
10, 10, 10, 0, 10,
12, 11, 10, 10, 10,
0, 0, 0, 0, 0,
0, 0, 0, 31, 31,
0, 0, 0, 0, 0,
4, 14, 21, 4, 4,
4, 4, 21, 14, 4,
4, 8, 31, 8, 4,
4, 2, 31, 2, 4,
0, 2, 2, 30, 0,
0, 14, 14, 14, 0,
4, 14, 31, 0, 0,
0, 0, 31, 14, 4,
0, 0, 0, 0, 0,
4, 4, 4, 0, 4,
10, 10, 0, 0, 0,
10, 31, 10, 31, 10,
31, 5, 31, 20, 31,
17, 8, 4, 2, 17,
6, 9, 22, 9, 22,
8, 4, 0, 0, 0,
8, 4, 4, 4, 8,
2, 4, 4, 4, 2,
21, 14, 31, 14, 21,
0, 4, 14, 4, 0,
0, 0, 0, 4, 2,
0, 0, 14, 0, 0,
0, 0, 0, 0, 2,
8, 4, 4, 4, 2,
14, 25, 21, 19, 14,
4, 6, 4, 4, 14,
14, 8, 14, 2, 14,
14, 8, 12, 8, 14,
2, 2, 10, 14, 8,
14, 2, 14, 8, 14,
6, 2, 14, 10, 14,
14, 8, 12, 8, 8,
14, 10, 14, 10, 14,
14, 10, 14, 8, 14,
0, 4, 0, 4, 0,
0, 4, 0, 4, 2,
8, 4, 2, 4, 8,
0, 14, 0, 14, 0,
2, 4, 8, 4, 2,
14, 17, 12, 0, 4,
14, 9, 5, 1, 14,
6, 9, 17, 31, 17,
7, 9, 15, 17, 15,
14, 17, 1, 17, 14,
15, 25, 17, 17, 15,
31, 1, 15, 1, 31,
31, 1, 15, 1, 1,
14, 1, 25, 17, 14,
9, 17, 31, 17, 17,
14, 4, 4, 4, 14,
12, 8, 8, 10, 14,
9, 5, 3, 5, 9,
1, 1, 1, 1, 15,
17, 27, 21, 17, 17,
17, 19, 21, 25, 17,
14, 25, 17, 17, 14,
7, 9, 7, 1, 1,
14, 17, 17, 25, 30,
7, 9, 7, 5, 9,
30, 1, 14, 16, 15,
31, 4, 4, 4, 4,
9, 17, 17, 17, 14,
10, 10, 10, 10, 4,
9, 17, 21, 21, 10,
17, 10, 4, 10, 17,
17, 10, 4, 4, 4,
31, 8, 4, 2, 31,
12, 4, 4, 4, 12,
2, 4, 4, 4, 8,
6, 4, 4, 4, 6,
4, 10, 0, 0, 0,
0, 0, 0, 0, 14,
4, 8, 0, 0, 0,
6, 9, 17, 31, 17,
7, 9, 15, 17, 15,
14, 17, 1, 17, 14,
15, 25, 17, 17, 15,
31, 1, 15, 1, 31,
31, 1, 15, 1, 1,
14, 1, 25, 17, 14,
9, 17, 31, 17, 17,
14, 4, 4, 4, 14,
12, 8, 8, 10, 14,
18, 10, 6, 10, 18,
1, 1, 1, 1, 15,
17, 27, 21, 17, 17,
17, 19, 21, 25, 17,
14, 25, 17, 17, 14,
7, 9, 7, 1, 1,
14, 17, 17, 25, 30,
7, 9, 7, 5, 9,
30, 1, 14, 16, 15,
31, 4, 4, 4, 4,
9, 17, 17, 17, 14,
10, 10, 10, 10, 4,
9, 17, 21, 21, 10,
17, 10, 4, 10, 17,
17, 10, 4, 4, 4,
31, 8, 4, 2, 31,
12, 4, 2, 4, 12,
4, 4, 4, 4, 4,
6, 4, 8, 4, 6,
10, 5, 0, 0, 0,
0, 4, 10, 10, 14,
0, 0, 0, 0, 0,
10, 0, 10, 10, 14,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
10, 0, 14, 10, 30,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
31, 17, 17, 17, 31,
0, 14, 10, 14, 0,
0, 0, 4, 0, 0,
0, 0, 0, 0, 0,
0, 0, 4, 0, 0,
0, 14, 10, 14, 0,
0, 0, 0, 0, 0,
10, 0, 14, 10, 30,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
10, 0, 14, 10, 14,
0, 0, 0, 0, 0,
3, 25, 11, 9, 11,
28, 23, 21, 21, 29,
0, 3, 1, 1, 1,
10, 0, 14, 10, 14,
10, 0, 10, 10, 14,
0, 0, 0, 0, 31,
0, 0, 0, 0, 0,
0, 0, 0, 0, 31,
0, 0, 0, 0, 0,
0, 0, 0, 0, 31,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
4, 0, 6, 17, 14,
0, 0, 28, 4, 4,
0, 0, 7, 4, 4,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
4, 0, 4, 4, 4,
4, 18, 9, 18, 4,
4, 9, 18, 9, 4,
0, 10, 0, 10, 0,
10, 21, 10, 21, 10,
21, 10, 21, 10, 21,
4, 4, 4, 4, 4,
4, 4, 7, 4, 4,
4, 7, 4, 7, 4,
10, 10, 11, 10, 10,
0, 0, 15, 10, 10,
0, 7, 4, 7, 4,
10, 11, 8, 11, 10,
10, 10, 10, 10, 10,
0, 15, 8, 11, 10,
10, 11, 8, 15, 0,
10, 10, 15, 0, 0,
4, 7, 4, 7, 0,
0, 0, 7, 4, 4,
4, 4, 28, 0, 0,
4, 4, 31, 0, 0,
0, 0, 31, 4, 4,
4, 4, 28, 4, 4,
0, 0, 31, 0, 0,
4, 4, 31, 4, 4,
4, 28, 4, 28, 4,
10, 10, 26, 10, 10,
10, 26, 2, 30, 0,
0, 30, 2, 26, 10,
10, 27, 0, 31, 0,
0, 31, 0, 27, 10,
10, 26, 2, 26, 10,
0, 31, 0, 31, 0,
10, 27, 0, 27, 10,
4, 31, 0, 31, 0,
10, 10, 31, 0, 0,
0, 31, 0, 31, 4,
0, 0, 31, 10, 10,
10, 10, 30, 0, 0,
4, 28, 4, 28, 0,
0, 28, 4, 28, 4,
0, 0, 30, 10, 10,
10, 10, 31, 10, 10,
4, 31, 4, 31, 4,
4, 4, 7, 0, 0,
0, 0, 28, 4, 4,
31, 31, 31, 31, 31,
0, 0, 31, 31, 31,
3, 3, 3, 3, 3,
24, 24, 24, 24, 24,
31, 31, 31, 0, 0,
0, 0, 0, 0, 0,
6, 9, 13, 17, 13,
0, 0, 0, 0, 0,
14, 17, 17, 17, 14,
0, 4, 10, 4, 0,
0, 0, 4, 0, 0,
0, 0, 0, 0, 0,
0, 0, 4, 0, 0,
0, 4, 10, 4, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 14, 31, 14, 0,
16, 14, 10, 14, 1,
12, 2, 14, 2, 12,
6, 9, 9, 9, 9,
14, 0, 14, 0, 14,
4, 14, 4, 0, 14,
2, 4, 8, 4, 14,
8, 4, 2, 4, 14,
8, 20, 4, 4, 4,
4, 4, 4, 5, 2,
4, 0, 14, 0, 4,
10, 5, 0, 10, 5,
4, 14, 4, 0, 0,
0, 14, 14, 14, 0,
0, 0, 4, 0, 0,
24, 8, 11, 10, 4,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, | |
import codecs
import errno
import fcntl
import io
import os
import pty
import resource
import signal
import struct
import sys
import termios
import time
try:
import builtins # Python 3
except ImportError:
import __builtin__ as builtins # Python 2
# Constants
from pty import (STDIN_FILENO, CHILD)
from .util import which, PtyProcessError
_platform = sys.platform.lower()
# Solaris uses internal __fork_pty(). All others use pty.fork().
_is_solaris = (
_platform.startswith('solaris') or
_platform.startswith('sunos'))
if _is_solaris:
use_native_pty_fork = False
from . import _fork_pty
else:
use_native_pty_fork = True
PY3 = sys.version_info[0] >= 3
if PY3:
def _byte(i):
return bytes([i])
else:
def _byte(i):
return chr(i)
class FileNotFoundError(OSError): pass
class TimeoutError(OSError): pass
_EOF, _INTR = None, None
def _make_eof_intr():
"""Set constants _EOF and _INTR.
This avoids doing potentially costly operations on module load.
"""
global _EOF, _INTR
if (_EOF is not None) and (_INTR is not None):
return
# inherit EOF and INTR definitions from controlling process.
try:
from termios import VEOF, VINTR
fd = None
for name in 'stdin', 'stdout':
stream = getattr(sys, '__%s__' % name, None)
if stream is None or not hasattr(stream, 'fileno'):
continue
try:
fd = stream.fileno()
except ValueError:
continue
if fd is None:
# no fd, raise ValueError to fallback on CEOF, CINTR
raise ValueError("No stream has a fileno")
intr = ord(termios.tcgetattr(fd)[6][VINTR])
eof = ord(termios.tcgetattr(fd)[6][VEOF])
except (ImportError, OSError, IOError, ValueError, termios.error):
# unless the controlling process is also not a terminal,
# such as cron(1), or when stdin and stdout are both closed.
# Fall-back to using CEOF and CINTR. There
try:
from termios import CEOF, CINTR
(intr, eof) = (CINTR, CEOF)
except ImportError:
# ^C, ^D
(intr, eof) = (3, 4)
_INTR = _byte(intr)
_EOF = _byte(eof)
# setecho and setwinsize are pulled out here because on some platforms, we need
# to do this from the child before we exec()
def _setecho(fd, state):
errmsg = 'setecho() may not be called on this platform (it may still be possible to enable/disable echo when spawning the child process)'
try:
attr = termios.tcgetattr(fd)
except termios.error as err:
if err.args[0] == errno.EINVAL:
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
raise
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
try:
# I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent and
# blocked on some platforms. TCSADRAIN would probably be ideal.
termios.tcsetattr(fd, termios.TCSANOW, attr)
except IOError as err:
if err.args[0] == errno.EINVAL:
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
raise
def _setwinsize(fd, rows, cols):
# Some very old platforms have a bug that causes the value for
# termios.TIOCSWINSZ to be truncated. There was a hack here to work
# around this, but it caused problems with newer platforms so has been
# removed. For details see https://github.com/pexpect/pexpect/issues/39
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(fd, TIOCSWINSZ, s)
class PtyProcess(object):
'''This class represents a process running in a pseudoterminal.
The main constructor is the :meth:`spawn` classmethod.
'''
string_type = bytes
if PY3:
linesep = os.linesep.encode('ascii')
crlf = '\r\n'.encode('ascii')
@staticmethod
def write_to_stdout(b):
try:
return sys.stdout.buffer.write(b)
except AttributeError:
# If stdout has been replaced, it may not have .buffer
return sys.stdout.write(b.decode('ascii', 'replace'))
else:
linesep = os.linesep
crlf = '\r\n'
write_to_stdout = sys.stdout.write
encoding = None
argv = None
env = None
launch_dir = None
def __init__(self, pid, fd):
_make_eof_intr() # Ensure _EOF and _INTR are calculated
self.pid = pid
self.fd = fd
readf = io.open(fd, 'rb', buffering=0)
writef = io.open(fd, 'wb', buffering=0, closefd=False)
self.fileobj = io.BufferedRWPair(readf, writef)
self.terminated = False
self.closed = False
self.exitstatus = None
self.signalstatus = None
# status returned by os.waitpid
self.status = None
self.flag_eof = False
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
@classmethod
def spawn(
cls, argv, cwd=None, env=None, echo=True, preexec_fn=None,
dimensions=(24, 80)):
'''Start the given command in a child process in a pseudo terminal.
This does all the fork/exec type of stuff for a pty, and returns an
instance of PtyProcess.
If preexec_fn is supplied, it will be called with no arguments in the
child process before exec-ing the specified command.
It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be used.
'''
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
command_with_path = which(command)
if command_with_path is None:
raise FileNotFoundError('The command was not found or was not ' +
'executable: %s.' % command)
command = command_with_path
argv[0] = command
# [issue #119] To prevent the case where exec fails and the user is
# stuck interacting with a python child process instead of whatever
# was expected, we implement the solution from
# http://stackoverflow.com/a/3703179 to pass the exception to the
# parent process
# [issue #119] 1. Before forking, open a pipe in the parent process.
exec_err_pipe_read, exec_err_pipe_write = os.pipe()
if use_native_pty_fork:
pid, fd = pty.fork()
else:
# Use internal fork_pty, for Solaris
pid, fd = _fork_pty.fork_pty()
# Some platforms must call setwinsize() and setecho() from the
# child process, and others from the master process. We do both,
# allowing IOError for either.
if pid == CHILD:
# set window size
try:
_setwinsize(STDIN_FILENO, *dimensions)
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
# disable echo if spawn argument echo was unset
if not echo:
try:
_setecho(STDIN_FILENO, False)
except (IOError, termios.error) as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
# [issue #119] 3. The child closes the reading end and sets the
# close-on-exec flag for the writing end.
os.close(exec_err_pipe_read)
fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
# Do not allow child to inherit open file descriptors from parent,
# with the exception of the exec_err_pipe_write of the pipe
# Impose ceiling on max_fd: AIX bugfix for users with unlimited
# nofiles where resource.RLIMIT_NOFILE is 2^63-1 and os.closerange()
# occasionally raises out of range error
max_fd = min(1048576, resource.getrlimit(resource.RLIMIT_NOFILE)[0])
os.closerange(3, exec_err_pipe_write)
os.closerange(exec_err_pipe_write+1, max_fd)
if cwd is not None:
os.chdir(cwd)
if preexec_fn is not None:
try:
preexec_fn()
except Exception as e:
ename = type(e).__name__
tosend = '{}:0:{}'.format(ename, str(e))
if PY3:
tosend = tosend.encode('utf-8')
os.write(exec_err_pipe_write, tosend)
os.close(exec_err_pipe_write)
os._exit(1)
try:
if env is None:
os.execv(command, argv)
else:
os.execvpe(command, argv, env)
except OSError as err:
# [issue #119] 5. If exec fails, the child writes the error
# code back to the parent using the pipe, then exits.
tosend = 'OSError:{}:{}'.format(err.errno, str(err))
if PY3:
tosend = tosend.encode('utf-8')
os.write(exec_err_pipe_write, tosend)
os.close(exec_err_pipe_write)
os._exit(os.EX_OSERR)
# Parent
inst = cls(pid, fd)
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
# [issue #119] 2. After forking, the parent closes the writing end
# of the pipe and reads from the reading end.
os.close(exec_err_pipe_write)
exec_err_data = os.read(exec_err_pipe_read, 4096)
os.close(exec_err_pipe_read)
# [issue #119] 6. The parent reads eof (a zero-length read) if the
# child successfully performed exec, since close-on-exec made
# successful exec close the writing end of the pipe. Or, if exec
# failed, the parent reads the error code and can proceed
# accordingly. Either way, the parent blocks until | |
<reponame>bb-froggy/tpm2-pkcs11
# SPDX-License-Identifier: BSD-2-Clause
# python stdlib dependencies
import binascii
import io
import os
import struct
import sys
import yaml
from tempfile import mkstemp
# local imports
from .command import Command
from .command import commandlet
from .db import Db
from .objects import PKCS11ObjectFactory as PKCS11ObjectFactory
from .objects import PKCS11X509
from .utils import AESAuthUnwrapper
from .utils import TemporaryDirectory
from .utils import hash_pass
from .utils import rand_hex_str
from .utils import pemcert_to_attrs
from .utils import str2bool
from .utils import str2bytes
from .utils import asn1parse_tss_key
from .utils import get_pobject
from .tpm2 import Tpm2
from .pkcs11t import * # noqa
class NewKeyCommandBase(Command):
'''
creates a key to a token within a tpm2-pkcs11 store.
'''
def generate_options(self, group_parser):
group_parser.add_argument(
'--label',
help='The tokens label to import the key too.\n',
required=True)
group_parser.add_argument(
'--key-label',
help='The label of the key imported. Defaults to an integer value.\n')
group_parser.add_argument(
'--id',
help='The key id. Defaults to a random 8 bytes of hex.\n',
default=binascii.hexlify(os.urandom(8)).decode())
group_parser.add_argument(
'--attr-always-authenticate',
action='store_true',
help='Sets the CKA_ALWAYS_AUTHENTICATE attribute to CK_TRUE.\n')
group_parser.add_argument(
'--hierarchy-auth',
help='The hierarchyauth, required for transient pobjects.\n',
default='')
pinopts = group_parser.add_mutually_exclusive_group(required=True)
pinopts.add_argument('--sopin', help='The Administrator pin.\n'),
pinopts.add_argument('--userpin', help='The User pin.\n'),
# Implemented by derived class
def new_key_create(self, pobj, objauth, hierarchyauth, tpm2, alg, privkey, d):
raise NotImplementedError('Implement: new_key')
def new_key_init(self, label, sopin, userpin, hierarchyauth, pobj, sealobjects, tpm2, d):
pobj_handle = get_pobject(pobj, tpm2, hierarchyauth, d)
# Get the primary object encrypted auth value and sokey information
# to decode it. Based on the incoming pin
is_so = sopin != None
pin = sopin if is_so else userpin
pubkey = '%spub' % ('so' if is_so else 'user')
privkey = '%spriv' % ('so' if is_so else 'user')
saltkey = '%sauthsalt' % ('so' if is_so else 'user')
sealpub = sealobjects[pubkey]
sealpriv = sealobjects[privkey]
sealsalt = sealobjects[saltkey]
sealctx = tpm2.load(pobj_handle, pobj['objauth'], sealpriv, sealpub)
sealauth = hash_pass(pin, salt=sealsalt)['hash']
wrappingkey = tpm2.unseal(sealctx, sealauth)
wrapper = AESAuthUnwrapper(wrappingkey)
#create an auth value for the tertiary object.
objauth = rand_hex_str()
encobjauth = wrapper.wrap(str2bytes(objauth))
return (encobjauth, objauth)
@staticmethod
def new_key_save(alg, keylabel, tid, label, privblob, pubblob,
tertiarypubdata, encobjauth, db, tpm2, extra_privattrs=None, extra_pubattrs=None):
token = db.gettoken(label)
#
# Cache the objects attributes from the public structure and other sources
# and populate the db with the data. This allows use of the public data
# without needed to load any objects which requires a pin to do.
#
y = yaml.safe_load(tertiarypubdata)
initial_pubattrs = {}
initial_privattrs = {}
# add the id
initial_privattrs.update({CKA_ID: binascii.hexlify(tid.encode()).decode()})
initial_pubattrs.update({CKA_ID: binascii.hexlify(tid.encode()).decode()})
# Add keylabel for ALL objects if set
if keylabel is not None:
initial_privattrs.update({
CKA_LABEL: binascii.hexlify(keylabel.encode()).decode()
})
initial_pubattrs.update({
CKA_LABEL: binascii.hexlify(keylabel.encode()).decode()
})
# add additional attrs
if extra_privattrs:
initial_privattrs.update(extra_privattrs)
if initial_pubattrs and extra_pubattrs:
initial_pubattrs.update(extra_pubattrs)
objects = PKCS11ObjectFactory(y, tpm2, encobjauth, initial_pubattrs, initial_privattrs, tpm_pub=pubblob, tpm_priv=privblob)
# Store private to database
db.addtertiary(token['id'], objects['private'])
# if it's asymmetric, add a public object too
if 'public' in objects and objects['public'] is not None:
db.addtertiary(token['id'], objects['public'])
return objects
@staticmethod
def output(objects, action):
d = {
'action' : action,
}
for k, v in objects.items():
if v is not None:
d[k] = { 'CKA_ID' : objects[k][CKA_ID] }
yaml.safe_dump(d, sys.stdout, default_flow_style=False)
def __call__(self, args):
path = args['path']
with Db(path) as db:
with TemporaryDirectory() as d:
tpm2 = Tpm2(d)
label = args['label']
sopin = args['sopin']
userpin = args['userpin']
alg = args['algorithm'] if 'algorithm' in args else None
key_label = args['key_label']
tid = args['id']
hierarchyauth = args['hierarchy_auth']
privkey = None
try:
privkey = args['privkey']
except KeyError:
privkey = None
token = db.gettoken(label)
pobjectid = token['pid']
pobj = db.getprimary(pobjectid)
sealobjects = db.getsealobject(token['id'])
encobjauth, objauth = self.new_key_init(
label, sopin, userpin, hierarchyauth,
pobj, sealobjects, tpm2, d)
tertiarypriv, tertiarypub, tertiarypubdata = self.new_key_create(
pobj, objauth, hierarchyauth, tpm2, alg, privkey, d)
# handle options that can add additional attributes
always_auth = args['attr_always_authenticate']
priv_attrs = {CKA_ALWAYS_AUTHENTICATE : always_auth}
return NewKeyCommandBase.new_key_save(
alg, key_label, tid, label, tertiarypriv, tertiarypub,
tertiarypubdata, encobjauth, db, tpm2, extra_privattrs=priv_attrs)
@commandlet("import")
class ImportCommand(NewKeyCommandBase):
'''
Imports a rsa key to a token within a tpm2-pkcs11 store.
'''
# adhere to an interface
# pylint: disable=no-self-use
def generate_options(self, group_parser):
super(ImportCommand, self).generate_options(group_parser)
group_parser.add_argument(
'--privkey',
help='Full path of the private key to be imported.\n',
required=True)
group_parser.add_argument(
'--algorithm',
help='The type of the key.\n',
choices=['rsa', 'ecc'],
required=True)
# Imports a new key
def new_key_create(self, pobj, objauth, hierarchyauth, tpm2, alg, privkey, d):
pobj_handle = get_pobject(pobj, tpm2, hierarchyauth, d)
tertiarypriv, tertiarypub, tertiarypubdata = tpm2.importkey(
pobj_handle, pobj['objauth'], objauth, privkey=privkey, alg=alg)
return (tertiarypriv, tertiarypub, tertiarypubdata)
def __call__(self, args):
objects = super(ImportCommand, self).__call__(args)
NewKeyCommandBase.output(objects, 'import')
@commandlet("addkey")
class AddKeyCommand(NewKeyCommandBase):
'''
Adds a key to a token within a tpm2-pkcs11 store.
'''
# adhere to an interface
# pylint: disable=no-self-use
def generate_options(self, group_parser):
super(AddKeyCommand, self).generate_options(group_parser)
group_parser.add_argument(
'--algorithm',
help='The type of the key.\n',
choices=Tpm2.ALGS,
required=True)
# Creates a new key
def new_key_create(self, pobj, objauth, hierarchyauth, tpm2, alg, privkey, d):
pobj_handle = get_pobject(pobj, tpm2, hierarchyauth, d)
tertiarypriv, tertiarypub, tertiarypubdata = tpm2.create(
pobj_handle, pobj['objauth'], objauth, alg=alg)
return (tertiarypriv, tertiarypub, tertiarypubdata)
def __call__(self, args):
objects = super(AddKeyCommand, self).__call__(args)
NewKeyCommandBase.output(objects, 'add')
@commandlet("addcert")
class AddCert(Command):
'''
Adds a certificate object
'''
# adhere to an interface
# pylint: disable=no-self-use
def generate_options(self, group_parser):
group_parser.add_argument(
'--label', help='The profile label to remove.\n', required=True)
group_parser.add_argument(
'cert', help='The x509 PEM certificate to add.\n')
sub_group = group_parser.add_mutually_exclusive_group()
sub_group.add_argument(
'--key-label',
help='The associated private key label.\n')
group_parser.add_argument(
'--key-id',
help='The associated private key id in hex.\n')
def __call__(self, args):
path = args['path']
label = args['label']
keylabel = args['key_label']
keyid = args['key_id']
certpath = args['cert']
if (keylabel is None) == (keyid is None):
sys.exit('Expected --key-label or --key-id to be specified')
attrs = pemcert_to_attrs(certpath)
pkcs11_object = PKCS11X509(attrs)
with Db(path) as db:
# get token to add to
token = db.gettoken(label)
# verify that key is existing
# XXX we should be verifying that it's expected, but I guess one could always load up a cert
# not associated with a key.
tobjs = db.gettertiary(token['id'])
# look up the private key
missing_id_or_label = None
for t in tobjs:
if keylabel is not None:
missing_id_or_label = AddCert.get_id_by_label(t, keylabel)
else:
missing_id_or_label = AddCert.get_label_by_id(t, keyid)
if missing_id_or_label is not None:
break
if missing_id_or_label is None:
raise RuntimeError('Cannot find key with id "%s"' % keylabel)
# have valid keylabel needed id
if keylabel:
pkcs11_object.update({CKA_ID: missing_id_or_label})
pkcs11_object.update({CKA_LABEL: binascii.hexlify(keylabel.encode()).decode()})
# have valid id needed keylabel
else:
pkcs11_object.update({CKA_LABEL: missing_id_or_label})
pkcs11_object.update({CKA_ID: keyid})
# TODO verify that cert is cryptographically bound to key found
# add the cert
db.addtertiary(token['id'], pkcs11_object)
NewKeyCommandBase.output({'cert' : pkcs11_object}, 'add')
@staticmethod
def get_id_by_label(tobj, keylabel):
attrs = yaml.safe_load(io.StringIO(tobj['attrs']))
if CKA_LABEL in attrs:
x = attrs[CKA_LABEL]
x = binascii.unhexlify(x).decode()
if x == keylabel and attrs[CKA_CLASS] == CKO_PRIVATE_KEY:
return attrs[CKA_ID]
return None
@staticmethod
def get_label_by_id(tobj, keyid):
attrs = yaml.safe_load(io.StringIO(tobj['attrs']))
if CKA_ID in attrs:
x = attrs[CKA_ID]
if x == keyid and attrs[CKA_CLASS] == CKO_PRIVATE_KEY:
return attrs[CKA_LABEL] if CKA_LABEL in attrs else ''
return None
@commandlet("objmod")
class ObjMod(Command):
'''
Dumps and modifies objects.
'''
_type_map = {
'int' : 'do_int',
'str' : 'do_str',
'bool': 'do_bool',
'raw' : 'do_raw',
}
@staticmethod
def do_int(value):
return int(value, 0)
@staticmethod
def do_bool(value):
return str2bool(value)
@staticmethod
def do_str(value):
return binascii.hexlify(value.encode()).decode()
@staticmethod
def do_raw(value):
return value
@classmethod
def mod(cls, path, tid, key, value, inattrs, vtype):
with Db(path) as db:
obj = db.getobject(tid)
if obj is None:
sys.exit('Not found, object with id: {}'.format(tid))
s = obj['attrs']
obj_attrs = yaml.safe_load(s)
# if we don't have any update data, just dump the attributes
if not key and not inattrs:
print(yaml.safe_dump(obj_attrs, default_flow_style=False))
sys.exit()
# if we have attributes YAML file, then we want to update all attributes
if inattrs:
with Db(path) as db:
y = yaml.safe_load(open(inattrs, "r"))
db.updatetertiary(obj['id'], y)
sys.exit()
# else we have --key and possibly --value
#
# look in the CKA_ globals from pkcs11t.py file for
# a mapping string or raw value map.
# filter(lambda x: x.startswith('CKA_'), globals().keys())
keys = []
for k in globals().keys():
if k.startswith('CKA_'):
keys.append(k)
keynames = {}
for k in keys:
keynames[globals()[k]] = k
keyname=None
if key in keys:
keyname=key
key=globals()[key]
else:
key = int(key, 0)
if key not in keynames:
sys.exit('Unknown key: %d', key)
keyname = keynames[key]
if not value:
if key and not key in obj_attrs:
sys.exit("Key not found")
print(yaml.safe_dump({keyname : obj_attrs[key]}))
sys.exit()
if not vtype:
sys.exit("When specifying a value, type is required")
value = getattr(cls, ObjMod._type_map[vtype])(value)
obj_attrs[key] = value
with Db(path) | |
"""Compilation of functions for analyzing maizsim output."""
import os
import collections
import pandas as pd
import numpy as np
from numpy import genfromtxt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from SALib.analyze import rbd_fast
from ideotype import DATA_PATH
from ideotype.init_params import params_sample
from ideotype.data_process import read_data, process_sims, agg_sims
def run_rbdfast(N_sample, run_name):
"""
Sensitivity analysis through RBD-FAST method.
Parameters
----------
N_sample : int
number of samples to generate.
run_name : str
run name for batch sims.
"""
problem, param_values = params_sample(run_name, N_sample)
# * param_values cannot directly be used for rbd_fast analysis here
# since values change with each sampling
# read in previously saved param.csv file as np.matrix
fpath_read = os.path.join(os.path.expanduser('~'),
'upscale', 'params',
f'param_{run_name}.csv'
)
X = genfromtxt(fpath_read, delimiter=',', skip_header=1)
# TODO: still need code that reads in Y here
Y = []
# Calculate sensitivity index
Si = rbd_fast.analyze(problem, X, Y, print_to_consol=False)
return Si
def run_pca(df, n):
"""
Run PCA on dataset.
Parameters
----------
df : np.matrix or pd.DataFrame
Data for PCA.
n : int
Number of components.
Returns
-------
Dataframe with all PC components.
"""
x = df
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=n)
principalComponents = pca.fit_transform(x)
column_labels = [f'PC{comp+1}' for comp in np.arange(n)]
df_pca = pd.DataFrame(data=principalComponents,
columns=column_labels)
return pca, df_pca
def linear_mod(df, features, target, test_size=0.33):
"""
Linear model that operates from DF based on features & target.
Parameters
----------
df : pd.DataFrame
Dataframe to draw data for linear model.
features : list
List of features as strings used to construct linear model.
target : list
List of target as string.
test_size : float
Default as 0.33 - 33% of data set aside for testing.
"""
X = df[features]
y = df[target]
mod = LinearRegression(fit_intercept=True)
mod.fit(X, y)
y_pred = mod.predict(X)
coefs = mod.coef_
mse = mean_squared_error(y, y_pred)
r2 = r2_score(y, y_pred)
return coefs, mse, r2
def identify_top_phenos(run_name, n_pheno=5, w_yield=1, w_disp=1):
"""
Identify top performing phenotypes.
Parameters
----------
n_pheno : int
Number of top phenotypes to identify.
w_yield : int
Weight on importance of yield.
Value between 0 - 1.
w_disp : int
Weight on importance of yield dispersion.
Value between 0 - 1.
Returns
-------
df_pheno : pd.DataFrame
Dataframe with phenotype performance and site info for mapping.
mx_pheno : np.array
Matrix with site, pheno, and phenotype performance info for heatmap.
"""
df_sims, df_sites, df_wea, df_params, df_all, df_matured = read_data(
os.path.join(DATA_PATH, 'files', f'filepaths_{run_name}.yml'))
sites = sorted(list(set(df_all.site)))
phenos = list(set(df_all.cvar))
list_top_phenos = [[] for item in np.arange(n_pheno)]
# Identify high performing combinations
for site in sites:
# Filter out data for specific site
df_sub = df_all.query(f'site=="{site}"')
# Calculate mean yied and yield dispersion acorss years
# for specified site
yield_mean = df_sub.groupby('cvar').mean().dm_ear
yield_var = df_sub.groupby('cvar').var().dm_ear
yield_disp = yield_var/yield_mean
# Standardize yield_mean & yield_disp into between 0 & 1
yield_mean_norm = (
yield_mean-yield_mean.min())/(yield_mean.max()-yield_mean.min())
yield_disp_norm = (
yield_disp-yield_disp.min())/(yield_disp.max()-yield_disp.min())
# Identify max yield and min dispersion
max_yield = yield_mean_norm.max()
min_disp = yield_disp_norm.min()
# Calculate distance to theoretical optimal
dist = [np.sqrt(
w_yield*(ymean - max_yield)**2 + w_disp*(ydisp - min_disp)**2)
for ymean, ydisp in zip(yield_mean_norm, yield_disp_norm)]
df_dist = pd.DataFrame(dist, columns=['dist'])
top_phenos = list(df_dist.nsmallest(n_pheno, 'dist').index)
for item in np.arange(len(list_top_phenos)):
top_pheno = top_phenos[item]
list_top_phenos[item].append(top_pheno)
# Set up dataframe with top performing pheno info
df_pheno = pd.DataFrame(list_top_phenos).transpose()
df_pheno.columns = [f'pheno{n+1}' for n in np.arange(n_pheno)]
df_pheno['sites'] = sites
df_pheno = pd.merge(df_pheno, df_sites, left_on='sites', right_on='site')
df_sites_sorted = pd.DataFrame(sites)
df_sites_sorted.columns = ['site']
df_sites_sorted['site_num'] = np.arange(len(sites))
df_pheno = pd.merge(
df_pheno, df_sites_sorted, left_on='sites', right_on='site')
# Initiate empty matrix
mx_pheno = np.empty(shape=[len(phenos), len(sites)])
mx_pheno[:] = np.nan
# Fill in matrix data
for item in np.arange(n_pheno):
mx_pheno[df_pheno[f'pheno{item+1}'], df_pheno['site_num']] = item + 1
return(df_pheno, mx_pheno)
def top_pheno_prevalence(run_name, n_pheno, intervals):
"""
Identify the prevalence of top performing phenotypes.
Parameters
----------
run_name : str
Simulation run name.
n_pheno : int
Number of top phenotypes to identify.
intervals : int
Number of intervals to create for yield and dispersion weights.
Returns
-------
df_pheno_prevalence : pd.DataFrame
"""
pheno_prevalences = []
list_intervals = [round(item, 2) for item in np.arange(
0, 1.000001, 1/intervals)]
w_yields = list_intervals.copy()
w_disps = list_intervals.copy()
w_disps.reverse()
for item in np.arange(intervals):
df_pheno, mx = identify_top_phenos(
run_name=run_name,
n_pheno=n_pheno,
w_yield=w_yields[item],
w_disp=w_disps[item])
# convert matrix with site and ranking info into dataframe
df = pd.DataFrame(mx)
# count the number of times each phenotype
# made it into top rankings (n_pheno) across all locations
pheno_prevalence = list(df.count(axis=1))
pheno_prevalences.append(pheno_prevalence)
df_pheno_prevalence = pd.DataFrame(pheno_prevalences).transpose()
return(df_pheno_prevalence)
def prevalent_top_pheno(run_name, n_pheno, w_yield, w_disp, site_threshold):
"""
Identify top performing and prevalent phenotypes.
Parameters
----------
run_name : str
site_threshold : int
Threshold for number of sites phenotype should at least have ranked
as top performer.
Returns
-------
list_top_pheno : list
List of top performing prevalent phenotypes.
"""
df_pheno, mx_pheno = identify_top_phenos(
run_name, n_pheno, w_yield, w_disp)
df_prevalence = pd.DataFrame(mx_pheno).notna().astype(int).sum(axis=1)
df_prevalence_sorted = df_prevalence.sort_values()
list_top_phenos = df_prevalence_sorted[
df_prevalence_sorted > site_threshold].index.tolist()
list_top_phenos.reverse()
return(list_top_phenos)
def rank_by_yield(df):
"""
Rank phenotypes by yield only.
Parameters
----------
df : pd.DataFrame
MAIZSIM yield output dataframe.
df_sims or df_mature
"""
# Prep data
groups = ['cvar', 'site']
how = 'mean'
sim = 'dm_ear'
mx_mean = agg_sims(df, groups, how, sim)
df_yield_means = pd.DataFrame(mx_mean)
# Sort data based on mean yield value
df_yield_means['mean'] = df_yield_means.mean(axis=1)
# Rank phenos by yield
phenos_ranked_by_yield = list(df_yield_means.sort_values(by=['mean'],
axis=0, ascending=False).index)
return phenos_ranked_by_yield
def rank_all_phenos(run_name, n_pheno, w_yield, w_disp):
"""
Rank performance for all phenotypes across all locations.
Parameters
----------
run_name : str
n_pheno : int
w_yield : float
w_disp : float
Returns
-------
phenos_ranked : list
"""
# Identify ranking for all phenotypes
df_pheno, mx = identify_top_phenos(
run_name, n_pheno=n_pheno, w_yield=w_yield, w_disp=w_disp)
# Rank general performance for all phenotypes across all sites
performance = []
for site in np.arange(df_pheno.shape[0]):
# Select phenotypes ranked by performance from df_pheno
phenos = df_pheno.iloc[site, :n_pheno].tolist()
# Assign each phenotype ranked value
# -- lower values mean better performance)
pheno_ranks = np.arange(n_pheno)
# Compile phenotype and ranking info into dict
dict_rank = dict(zip(phenos, pheno_ranks))
# Sort dict to order by phenotype
dict_sorted = collections.OrderedDict(sorted(dict_rank.items()))
# Append ranking into list of performance
performance.append(list(dict_sorted.values()))
# Calculate performance
# -- phenotypes with lowest sum have best performance overall
df_rankings = pd.DataFrame(performance).transpose()
df_performance = df_rankings.sum(axis=1)
phenos_ranked = list(df_performance.sort_values(ascending=True).index)
return(df_rankings, phenos_ranked)
def rank_top_phenos(run_name, n_pheno, w_yield, w_disp):
"""
Rank phenotypes that at least rank top n at sim sites.
n_pheno : int
Ranking that phenotype at least should achieve.
"""
df_pheno, mx = identify_top_phenos(run_name,
n_pheno=n_pheno,
w_yield=w_yield,
w_disp=w_disp)
top_phenos = []
for item in np.arange(n_pheno):
# Identify phenotypes in each ranking for each site
top_pheno = list(set(df_pheno.iloc[:, item]))
top_phenos.extend(top_pheno)
# Compile all phenotypes
list_top_phenos = list(set(top_phenos))
# Determine prevalence of phenotype occurrence
rank_sums = []
for item in list_top_phenos:
rank_list = list(mx[item])
rank_list_reversed = [(n_pheno + 1) - rank for rank in rank_list]
rank_sum = np.nansum(rank_list_reversed)
rank_sums.append(rank_sum)
df_ranksum = pd.DataFrame({'pheno': list_top_phenos,
'rank_sum': rank_sums})
top_pheno_ranks = list(df_ranksum.sort_values(
'rank_sum', ascending=False)['pheno'])
return(top_pheno_ranks)
def identify_improved_phenos(n_pheno, w_yield, w_disp,
future_run, rank_cutoff=20):
"""
Identify improved phenotypes.
Parameters
----------
n_pheno : int
w_yield : int
w_disp : int
future_run : str
run_name of future sim ('f2050', 'f2100')
rank_cutoff : int
Cut-off rank to be considered as 'top-ranking'.
Returns
-------
phenos_improved : list
All phenotypes that had positive rank change.
phenos_targeted : list
All phenotypes that had positive rank change and
also had final rank within rank_cutoff.
phenos_new : list
All phenotypes that ranked within rank_cutoff,
but was not originally one of the top ranked phenotypes.
"""
# Rank top phenos
top_phenos_present = rank_top_phenos('present', n_pheno, w_yield, w_disp)
top_phenos_future = rank_top_phenos(future_run, n_pheno, w_yield, w_disp)
# Calculate rank difference & identify new ranks
rank_diffs = []
new_ranks = []
for item, pheno in enumerate(top_phenos_present):
try:
new_rank = top_phenos_future.index(pheno)
new_ranks.append(new_rank)
rank_diffs.append(item-new_rank)
except (ValueError):
new_ranks.append(np.nan)
rank_diffs.append(np.nan)
# Compile into dataframe
df_ranks = pd.DataFrame({'top_phenos_present': top_phenos_present,
'new_rank': new_ranks,
'rank_diff': rank_diffs})
df_ranks_sorted = df_ranks.sort_values('rank_diff', ascending=False)
# Improved & targeted phenos
phenos_improved = list(df_ranks_sorted.query(
'rank_diff>0')['top_phenos_present'])
phenos_targeted = list(df_ranks_sorted.query('rank_diff>0').query(
f'new_rank<{rank_cutoff}')['top_phenos_present'])
# New phenos
pheno_select = [
count for count, pheno in enumerate(rank_diffs) if pheno is np.nan]
phenos_new = []
for item in pheno_select:
if item < rank_cutoff:
try:
new_pheno = top_phenos_future[item]
if new_pheno not in top_phenos_present:
phenos_new.append(new_pheno)
except(ValueError):
print('future top ranks less than | |
<filename>Lib/test/test_xml_etree.py
# IMPORTANT: the same tests are run from "test_xml_etree_c" in order
# to ensure consistency between the C implementation and the Python
# implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
import copy
import functools
import html
import io
import operator
import pickle
import sys
import types
import unittest
import warnings
import weakref
from itertools import product
from test import support
from test.support import TESTFN, findfile, import_fresh_module, gc_collect, swap_attr
# pyET is the pure-Python implementation.
#
# ET is pyET in test_xml_etree and is the C accelerated version in
# test_xml_etree_c.
pyET = None
ET = None
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
try:
SIMPLE_XMLFILE.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filename is not encodable to utf8")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
UTF8_BUG_XMLFILE = findfile("expat224_utf8_bug.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS_ELEMS = """
<root>
<h:table xmlns:h="hello">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="foo">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
EXTERNAL_ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY entity SYSTEM "file:///non-existing-file.xml">
]>
<document>&entity;</document>
"""
def checkwarnings(*filters, quiet=False):
def decorator(test):
def newtest(*args, **kwargs):
with support.check_warnings(*filters, quiet=quiet):
test(*args, **kwargs)
functools.update_wrapper(newtest, test)
return newtest
return decorator
class ModuleTest(unittest.TestCase):
def test_sanity(self):
# Import sanity.
from xml.etree import ElementTree
from xml.etree import ElementInclude
from xml.etree import ElementPath
def test_all(self):
names = ("xml.etree.ElementTree", "_elementtree")
support.check__all__(self, ET, names, blacklist=("HTML_EMPTY",))
def serialize(elem, to_string=True, encoding='unicode', **options):
if encoding != 'unicode':
file = io.BytesIO()
else:
file = io.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, encoding=encoding, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize_list(seq):
return [elem.tag for elem in seq]
class ElementTestCase:
@classmethod
def setUpClass(cls):
cls.modules = {pyET, ET}
def pickleRoundTrip(self, obj, name, dumper, loader, proto):
save_m = sys.modules[name]
try:
sys.modules[name] = dumper
temp = pickle.dumps(obj, proto)
sys.modules[name] = loader
result = pickle.loads(temp)
except pickle.PicklingError as pe:
# pyET must be second, because pyET may be (equal to) ET.
human = dict([(ET, "cET"), (pyET, "pyET")])
raise support.TestFailed("Failed to round-trip %r from %r to %r"
% (obj,
human.get(dumper, dumper),
human.get(loader, loader))) from pe
finally:
sys.modules[name] = save_m
return result
def assertEqualElements(self, alice, bob):
self.assertIsInstance(alice, (ET.Element, pyET.Element))
self.assertIsInstance(bob, (ET.Element, pyET.Element))
self.assertEqual(len(list(alice)), len(list(bob)))
for x, y in zip(alice, bob):
self.assertEqualElements(x, y)
properties = operator.attrgetter('tag', 'tail', 'text', 'attrib')
self.assertEqual(properties(alice), properties(bob))
# --------------------------------------------------------------------
# element tree tests
class ElementTreeTest(unittest.TestCase):
def serialize_check(self, elem, expected):
self.assertEqual(serialize(elem), expected)
def test_interface(self):
# Test element tree interface.
def check_string(string):
len(string)
for char in string:
self.assertEqual(len(char), 1,
msg="expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
self.assertEqual(mapping["key"], "value",
msg="expected value string, got %r" % mapping["key"])
def check_element(element):
self.assertTrue(ET.iselement(element), msg="not an element")
direlem = dir(element)
for attr in 'tag', 'attrib', 'text', 'tail':
self.assertTrue(hasattr(element, attr),
msg='no %s member' % attr)
self.assertIn(attr, direlem,
msg='no %s visible by dir' % attr)
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
element = ET.Element("tag")
check_element(element)
tree = ET.ElementTree(element)
check_element(tree.getroot())
element = ET.Element("t\xe4g", key="value")
tree = ET.ElementTree(element)
self.assertRegex(repr(element), r"^<Element 't\xe4g' at 0x.*>$")
element = ET.Element("tag", key="value")
# Make sure all standard element methods exist.
def check_method(method):
self.assertTrue(hasattr(method, '__call__'),
msg="%s not callable" % method)
check_method(element.append)
check_method(element.extend)
check_method(element.insert)
check_method(element.remove)
check_method(element.getchildren)
check_method(element.find)
check_method(element.iterfind)
check_method(element.findall)
check_method(element.findtext)
check_method(element.clear)
check_method(element.get)
check_method(element.set)
check_method(element.keys)
check_method(element.items)
check_method(element.iter)
check_method(element.itertext)
check_method(element.getiterator)
# These methods return an iterable. See bug 6472.
def check_iter(it):
check_method(it.__next__)
check_iter(element.iterfind("tag"))
check_iter(element.iterfind("*"))
check_iter(tree.iterfind("tag"))
check_iter(tree.iterfind("*"))
# These aliases are provided:
self.assertEqual(ET.XML, ET.fromstring)
self.assertEqual(ET.PI, ET.ProcessingInstruction)
def test_set_attribute(self):
element = ET.Element('tag')
self.assertEqual(element.tag, 'tag')
element.tag = 'Tag'
self.assertEqual(element.tag, 'Tag')
element.tag = 'TAG'
self.assertEqual(element.tag, 'TAG')
self.assertIsNone(element.text)
element.text = 'Text'
self.assertEqual(element.text, 'Text')
element.text = 'TEXT'
self.assertEqual(element.text, 'TEXT')
self.assertIsNone(element.tail)
element.tail = 'Tail'
self.assertEqual(element.tail, 'Tail')
element.tail = 'TAIL'
self.assertEqual(element.tail, 'TAIL')
self.assertEqual(element.attrib, {})
element.attrib = {'a': 'b', 'c': 'd'}
self.assertEqual(element.attrib, {'a': 'b', 'c': 'd'})
element.attrib = {'A': 'B', 'C': 'D'}
self.assertEqual(element.attrib, {'A': 'B', 'C': 'D'})
def test_simpleops(self):
# Basic method sanity checks.
elem = ET.XML("<body><tag/></body>")
self.serialize_check(elem, '<body><tag /></body>')
e = ET.Element("tag2")
elem.append(e)
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
self.serialize_check(elem, '<body><tag /></body>')
elem.insert(0, e)
self.serialize_check(elem, '<body><tag2 /><tag /></body>')
elem.remove(e)
elem.extend([e])
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
element = ET.Element("tag", key="value")
self.serialize_check(element, '<tag key="value" />') # 1
subelement = ET.Element("subtag")
element.append(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 2
element.insert(0, subelement)
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>') # 3
element.remove(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 4
element.remove(subelement)
self.serialize_check(element, '<tag key="value" />') # 5
with self.assertRaises(ValueError) as cm:
element.remove(subelement)
self.assertEqual(str(cm.exception), 'list.remove(x): x not in list')
self.serialize_check(element, '<tag key="value" />') # 6
element[0:0] = [subelement, subelement, subelement]
self.serialize_check(element[1], '<subtag />')
self.assertEqual(element[1:9], [element[1], element[2]])
self.assertEqual(element[:9:2], [element[0], element[2]])
del element[1:2]
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>')
def test_cdata(self):
# Test CDATA handling (etc).
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag><![CDATA[hello]]></tag>"),
'<tag>hello</tag>')
def test_file_init(self):
stringfile = io.BytesIO(SAMPLE_XML.encode("utf-8"))
tree = ET.ElementTree(file=stringfile)
self.assertEqual(tree.find("tag").tag, 'tag')
self.assertEqual(tree.find("section/tag").tag, 'tag')
tree = ET.ElementTree(file=SIMPLE_XMLFILE)
self.assertEqual(tree.find("element").tag, 'element')
self.assertEqual(tree.find("element/../empty-element").tag,
'empty-element')
def test_path_cache(self):
# Check that the path cache behaves sanely.
from xml.etree import ElementPath
elem = ET.XML(SAMPLE_XML)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
cache_len_10 = len(ElementPath._cache)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
self.assertEqual(len(ElementPath._cache), cache_len_10)
for i in range(20): ET.ElementTree(elem).find('./'+str(i))
self.assertGreater(len(ElementPath._cache), cache_len_10)
for i in range(600): ET.ElementTree(elem).find('./'+str(i))
self.assertLess(len(ElementPath._cache), 500)
def test_copy(self):
# Test copy handling (etc).
import copy
e1 = ET.XML("<tag>hello<foo/></tag>")
e2 = copy.copy(e1)
e3 = copy.deepcopy(e1)
e1.find("foo").tag = "bar"
self.serialize_check(e1, '<tag>hello<bar /></tag>')
self.serialize_check(e2, '<tag>hello<bar /></tag>')
self.serialize_check(e3, '<tag>hello<foo /></tag>')
def test_attrib(self):
# Test attribute handling.
elem = ET.Element("tag")
elem.get("key") # 1.1
self.assertEqual(elem.get("key", "default"), 'default') # 1.2
elem.set("key", "value")
self.assertEqual(elem.get("key"), 'value') # 1.3
elem = ET.Element("tag", key="value")
self.assertEqual(elem.get("key"), 'value') # 2.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 2.2
attrib = {"key": "value"}
elem = ET.Element("tag", attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 3.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 3.2
attrib = {"key": "value"}
elem = ET.Element("tag", **attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 4.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 4.2
elem = ET.Element("tag", {"key": "other"}, key="value")
self.assertEqual(elem.get("key"), 'value') # 5.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 5.2
elem = ET.Element('test')
elem.text = "aa"
elem.set('testa', 'testval')
elem.set('testb', 'test2')
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test2">aa</test>')
self.assertEqual(sorted(elem.keys()), ['testa', 'testb'])
self.assertEqual(sorted(elem.items()),
[('testa', 'testval'), ('testb', 'test2')])
self.assertEqual(elem.attrib['testb'], 'test2')
elem.attrib['testb'] = 'test1'
elem.attrib['testc'] = 'test2'
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test1" testc="test2">aa</test>')
elem = ET.Element('test')
elem.set('a', '\r')
elem.set('b', '\r\n')
elem.set('c', '\t\n\r ')
elem.set('d', '\n\n')
self.assertEqual(ET.tostring(elem),
b'<test a=" " b=" " c="	 " d=" " />')
def test_makeelement(self):
# Test makeelement handling.
elem = ET.Element("tag")
attrib = {"key": "value"}
subelem = elem.makeelement("subtag", attrib)
self.assertIsNot(subelem.attrib, attrib, msg="attrib aliasing")
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.clear()
self.serialize_check(elem, '<tag />')
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.extend([subelem, subelem])
self.serialize_check(elem,
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>')
elem[:] = [subelem]
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem[:] = tuple([subelem])
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
def test_parsefile(self):
# Test parsing from file.
tree = ET.parse(SIMPLE_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
tree = ET.parse(SIMPLE_NS_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<ns0:root xmlns:ns0="namespace">\n'
' <ns0:element key="value">text</ns0:element>\n'
' <ns0:element>text</ns0:element>tail\n'
' <ns0:empty-element />\n'
'</ns0:root>')
with open(SIMPLE_XMLFILE) as f:
data = f.read()
parser = ET.XMLParser()
self.assertRegex(parser.version, r'^Expat ')
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
target = ET.TreeBuilder()
parser = ET.XMLParser(target=target)
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
def test_parseliteral(self):
element = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
element = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
sequence = ["<html><body>", "text</bo", "dy></html>"]
element = ET.fromstringlist(sequence)
self.assertEqual(ET.tostring(element),
b'<html><body>text</body></html>')
self.assertEqual(b"".join(ET.tostringlist(element)),
b'<html><body>text</body></html>')
self.assertEqual(ET.tostring(element, "ascii"),
b"<?xml version='1.0' encoding='ascii'?>\n"
b"<html><body>text</body></html>")
_, ids = ET.XMLID("<html><body>text</body></html>")
self.assertEqual(len(ids), 0)
_, ids = ET.XMLID("<html><body id='body'>text</body></html>")
self.assertEqual(len(ids), 1)
self.assertEqual(ids["body"].tag, 'body')
def test_iterparse(self):
# Test iterparse interface.
iterparse = ET.iterparse
context = iterparse(SIMPLE_XMLFILE)
action, elem = next(context)
self.assertEqual((action, elem.tag), ('end', 'element'))
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', 'element'),
('end', 'empty-element'),
('end', 'root'),
])
self.assertEqual(context.root.tag, 'root')
context = iterparse(SIMPLE_NS_XMLFILE)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', '{namespace}element'),
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
])
events = ()
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ()
context = iterparse(SIMPLE_XMLFILE, events=events)
self.assertEqual([(action, elem.tag) for action, elem in context], | |
<filename>models.py
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
import math
import os
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import BertConfig, WEIGHTS_NAME, CONFIG_NAME, BertPreTrainedModel, BertModel
def create_model(args, device, config_file='', weights_file=''):
''' create squad model from args '''
ModelClass = None
if args.squad_model == 'bert_base':
print('creating bert base model')
ModelClass = SquadModel
if args.squad_model == 'bert_linear':
print('creating bert linear model')
ModelClass = SquadLinearModel
if args.squad_model == 'bert_deep':
print('creating bert deep model')
ModelClass = SquadDeepModel
if args.squad_model == 'bert_qanet':
print('creating bert qanet model')
ModelClass = SquadModelQANet
if config_file == '' and weights_file == '':
print('creating an untrained model')
return ModelClass.from_pretrained(args.bert_model,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)))
else:
print('loading a trained model')
config = BertConfig(config_file)
model = ModelClass(config)
model.load_state_dict(torch.load(weights_file, map_location=device))
return model
class SquadModel(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(SquadModel, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
class SquadLinearModel(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(SquadLinearModel, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.hidden1 = nn.Linear(config.hidden_size, config.hidden_size)
self.hidden2 = nn.Linear(config.hidden_size, config.hidden_size)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
#logits = self.qa_outputs(sequence_output)
logits = self.qa_outputs(self.hidden2(self.hidden1(sequence_output)))
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
class SquadDeepModel(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for | |
'''
Created on Apr 15, 2014
@author: <NAME>
'''
import argparse
import cv2
import numpy as np
import os
from obj_detect import SegmentedObject
from obj_baxter import BaxterObject
class BaxterExperiment(BaxterObject):
'''
A BaxterExperiment is a BaxterObject with methods to facilitate the
use a BaxterObject's functions. It contains methods for importing images
and exporting results en masse, as well as displaying the result images
(along with segments and bounding rectangles) in a window.
A notable method is display_results(), which brings up result images of
the segmentation algorithm in a window. On Windows, it can also accept
keyboard input:
- Pressing ESC or 'q' closes the window.
- Pressing 'a' moves the slider a tick left, and 'A' 5 ticks.
- Pressing 'd' moves the slider a tick right, and 'D' 5 ticks.
- Pressing 's' toggles what segment of the image to display.
- Pressing 'r' toggles what bounding rectangle to display.
- Pressing TAB temporarily displays the background image, allowing
for quick comparison between the background and current image.
'''
def __init__(self, bg_file=None):
'''
Initiates BaxterExperiment, with (optionally) a user-specified
background image.
Args:
bg_path: file path to background image.
'''
super(BaxterExperiment, self).__init__(bg_file)
self._name = "BaxterObject"
self._bar = "Image"
self._pos = 0
self._total = 1
self._seg = 0 # 0 = none, 1 = region, 2 = object
self._rect = 2 # 0 = none, 1 = upright, 2 = min area
return
def export_results(self, output_dir, segment=True, table=True):
'''
Initiates BaxterExperiment, with (optionally) a user-specified
background image.
Args:
output_dir: directory path to write output images to.
Returns:
True if the output directory is valid; false otherwise.
'''
if not os.path.isdir(output_dir):
return False
if not output_dir.endswith("/"):
output_dir += "/"
if segment:
self.export_measure_segment(output_dir+"reference-_seg.png")
self.export_arm_segment(output_dir+"arm-_seg.png")
self.export_uncompressed_segment(output_dir+"object-_seg.png")
self.export_compress_segment(output_dir+"compression-_seg.png")
if table:
self.export_sizes(output_dir + "sizes.csv")
return True
def print_results(self):
'''
Prints out various results of the BaxterExperiment's image processing:
the arm color range, the millimeter to pixel conversion factor, the
measurement object reference pixel size, the box object size,
the uncompressed object pixel size, and the (smallest area) compressed
object pixel size.
'''
print "Color range:", self._color_low, self._color_high
print "Millimeters / Pixel:", self.get_mm_per_px()
print "Measure object size (px):", self.get_measure_size()
print "Box object size (px):", self.get_box_size()
print "Object object size (px):", self.get_uncompressed_size()
print "Compressed object size (px):", self.get_compressed_size()
return
def import_images(self, path_dir): # Caution: very specific
'''
Loads images from a directory into the BaxterExperiment. The specific
naming convention for the images is as follows: the background image is
"background"/"bg", the reference object image is "reference"/"ref",
the arm image is "arm", the box image is "box", the uncompressed
object image is "object"/"obj", and the compressed object images
start with "compression". Images that are not named this way
are ignored.
The method only reads PNG or JPG image files. Also note that the
compression images are added in alphabetical order.
Args:
path_dir: directory path of the images to load.
Returns:
True if the input directory is valid; false otherwise.
'''
if not os.path.isdir(path_dir):
return False
if not path_dir.endswith("/"):
path_dir += "/"
for file in os.listdir(path_dir): # Must find background first
if file.endswith(".png") or file.endswith(".jpg"):
name = os.path.splitext(file)[0]
if name == "background" or name == "bg":
self.bg_path = path_dir + file
break
if not self.bg_path:
return False
for file in sorted(os.listdir(path_dir)):
if file.endswith(".png") or file.endswith(".jpg"):
name = os.path.splitext(file)[0]
if name == "reference" or name == "ref":
self.set_measure_image(path_dir + file, 100, 100)
elif name == "arm":
self.set_arm_image(path_dir + file)
elif name == "box":
self.set_box_image(path_dir + file)
elif name == "object" or name == "obj":
self.set_uncompressed_image(path_dir + file)
elif name.startswith("compression"):
self.set_compressed_image(path_dir + file)
return True
def set_roi(self, x, y, w, h, xy_type="absolute", dim_type="absolute"):
'''
Sets the rectangular region of interest for all images that are loaded
into BaxterExperiment.
Note that there is no check for validity.
Args:
x: integer x-value of top-left point of the ROI rectangle.
y: integer y-value of top-left point of the ROI rectangle.
w: integer width (x-dimension) of the ROI rectangle.
h: integer height (y-dimension) of the ROI rectangle.
xy_type: 'absolute' if (x,y) are to be interpreted as absolute
pixel values; 'relative' if (x,y) are percentages of
overall image from which to determine the top-left corner
pixel.
dim_type: 'absolute' if (w,h) are to be interpreted as absolute
pixel dimensions; 'relative' if (x,y) are percentages of
overall image from which to determine pixel dimensions.
'''
self.set_arm_roi(x, y, w, h, xy_type, dim_type)
self.set_uncompressed_roi(x, y, w, h, xy_type, dim_type)
self.set_compressed_roi(x, y, w, h, xy_type, dim_type)
self.set_measure_roi(x, y, w, h, xy_type, dim_type)
self.set_box_roi(x, y, w, h, xy_type, dim_type)
return
def display_results(self):
'''
Opens a window and displays the results of the BaxterExperiment's
segmentation of its object images. The window contains a slider
which the user can move to toggle between different image results.
It also accepts keyboard input:
- Pressing ESC or 'q' closes the window.
- Pressing 'a' moves the slider a tick left, and 'A' 5 ticks.
- Pressing 'd' moves the slider a tick right, and 'D' 5 ticks.
- Pressing 's' toggles what segment of the image to display.
- Pressing 'r' toggles what bounding rectangle to display.
- Pressing TAB temporarily displays the background image, allowing
for quick comparison between the background and current image.
This method does not terminate until the user closes the window. Note
also that the keyboard functions have been tested to only completely
work on Windows.
'''
self._total = 5 + len(self.compress_obj)
#cv2.namedWindow(self._name)
self._display_update(self._pos)
cv2.cv.CreateTrackbar(self._bar, self._name, 0,
self._total-1, self._display_update)
while True:
k = cv2.waitKey()
self._pos = cv2.getTrackbarPos(self._bar, self._name)
if k == 27 or k == ord('q') or k == -1: # ESC or no key press
break
if k == 9: # tab
self._display_update(0)
cv2.waitKey(500)
elif k == ord('a'): # left arrow
self._pos = (self._pos - 1) % self._total
cv2.setTrackbarPos(self._bar, self._name, self._pos)
elif k == ord('d'): # right arrow
self._pos = (self._pos + 1) % self._total
cv2.setTrackbarPos(self._bar, self._name, self._pos)
elif k == ord('A'): # left arrow * 5
self._pos = (self._pos - 5) % self._total
cv2.setTrackbarPos(self._bar, self._name, self._pos)
elif k == ord('D'): # right arrow * 5
self._pos = (self._pos + 5) % self._total
cv2.setTrackbarPos(self._bar, self._name, self._pos)
elif k == ord('s'):
self._seg = (self._seg + 1) % 3
elif k == ord('r'):
self._rect = (self._rect + 1) % 3
else:
continue
self._display_update(self._pos)
cv2.waitKey(-1) # for Linux
cv2.destroyWindow(self._name)
cv2.imshow(self._name, np.array([0])) # for Linux
return
def _display_update(self, index):
bg_img = cv2.imread(self.bg_path)
if index == 0:
# Apply the same blurring filter as in object_detect.py
cv2.imshow(self._name, cv2.bilateralFilter(bg_img, 5, 100, 100))
return
obj = None
if index == 1:
obj = self.measure_obj
elif index == 2:
obj = self.arm_obj
elif index == 3:
obj = self.box_obj
elif index == 4:
obj = self.uncompress_obj
elif index >= 5 and index-5 < len(self.compress_obj):
obj = self.compress_obj[index-5]
if obj is None:
black_img = np.zeros(bg_img.shape[:-1], np.uint8)
cv2.imshow(self._name, black_img)
return
if self._seg == 2:
obj_mask = obj.get_object_mask()
img = cv2.bitwise_and(obj.fg_img, obj.fg_img, mask=obj_mask)
elif self._seg == 1:
region_mask = cv2.bitwise_and(obj.rect_mask, obj.color_mask)
img = cv2.bitwise_and(obj.fg_img, obj.fg_img, mask=region_mask)
else:
img = obj.fg_img.copy()
if self._rect >= 1:
points = np.int0(obj.get_object_rectangle_points(self._rect == 2))
cv2.drawContours(img, [points], 0, (255,255,255), 2)
cv2.imshow(self._name, img)
return
# Test script for BaxterExperiment
def main():
parser = argparse.ArgumentParser(description="Process Baxter experiment images.")
parser.add_argument("-v", "--view", action="store_true",
help="display results in window")
parser.add_argument("-e", "--export", nargs=1, metavar="DIR",
help="export results to file directory")
parser.add_argument("-i", "--import", nargs=1, metavar="DIR", dest="dir",
help="load directory path of images to add")
parser.add_argument("-ie", nargs=1, metavar="DIR",
help="load directory path of images and export to same")
parser.add_argument("-b", "--bg", nargs=1, metavar="FILE",
help="add background image")
parser.add_argument("-m", "--measure", nargs=1, metavar="FILE",
help="add measure reference image")
parser.add_argument("-m-d", "--measure-dim", nargs=2, type=int,
metavar=("WIDTH", "HEIGHT"),
help="specify measure reference dimensions")
parser.add_argument("-x", "--box", nargs=1, metavar="FILE",
help="add box reference | |
import numpy as np
import math
def l2_regularization(W, reg_strength):
'''
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
'''
# TODO: Copy from previous assignment
loss = reg_strength * np.trace(np.matmul(W.T, W)) # L2(W) = λ * tr(W.T * W)
grad = 2 * reg_strength * W # dL2(W)/dW = 2 * λ * W
return loss, grad
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
# TODO implement softmax
# Your final implementation shouldn't have any loops
single = (predictions.ndim == 1)
if single:
predictions = predictions.reshape(1, predictions.shape[0])
maximums = np.amax(predictions, axis=1).reshape(predictions.shape[0], 1)
predictions_ts = predictions - maximums
predictions_exp = np.exp(predictions_ts)
sums = np.sum(predictions_exp, axis=1).reshape(predictions_exp.shape[0], 1)
result = predictions_exp / sums
if single:
result = result.reshape(result.size)
return result
def cross_entropy_loss(probs, target_index):
'''
Computes cross-entropy loss
Arguments:
probs, np array, shape is either (N) or (batch_size, N) -
probabilities for every class
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss: single value
'''
# TODO implement cross-entropy
# Your final implementation shouldn't have any loops
new_loss = np.vectorize(lambda x: -math.log(x))
if len(probs.shape) == 1:
probs_target = probs[target_index]
size_target = 1
else:
batch_size = np.arange(target_index.shape[0])
probs_target = probs[batch_size,target_index.flatten()]
size_target = target_index.shape[0]
loss = np.sum(new_loss(probs_target)) / size_target
return loss
def softmax_with_cross_entropy(predictions, target_index):
'''
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
'''
# TODO copy from the previous assignment
prediction = predictions.copy()
probs = softmax(prediction)
loss = cross_entropy_loss(probs, target_index)
d_preds = probs
if len(predictions.shape)==1:
d_preds[target_index] -= 1
else:
batch_size = np.arange(target_index.shape[0])
d_preds[batch_size,target_index.flatten()] -= 1
d_preds = d_preds/target_index.shape[0]
return loss, d_preds
class Param:
'''
Trainable parameter of the model
Captures both parameter value and the gradient
'''
def __init__(self, value):
self.value = value
self.grad = np.zeros_like(value)
class ReLULayer:
def __init__(self):
pass
def forward(self, X):
# TODO copy from the previous assignment
self.d_out_result = np.greater(X, 0).astype(float) # dZ/dX
return np.maximum(X, 0) # Z
def backward(self, d_out):
# TODO copy from the previous assignment
d_result = np.multiply(d_out, self.d_out_result) # dL/dX = dL/dZ * dZ/dX
return d_result # dL/dX
def params(self):
return {}
def reset_grad(self):
pass
class FullyConnectedLayer:
def __init__(self, n_input, n_output):
self.W = Param(0.001 * np.random.randn(n_input, n_output))
self.B = Param(0.001 * np.random.randn(1, n_output))
self.X = None
self.dw = None
def forward(self, X):
# TODO copy from the previous assignment
self.X = X
return np.matmul(X, self.W.value) + self.B.value
def backward(self, d_out):
# TODO copy from the previous assignment
d_input = np.matmul(d_out, self.W.value.T) # dL/dX = dL/dZ * dZ/dX = dL/dZ * W.T
dLdW = np.matmul(self.X.T, d_out) # dL/dW = dL/dZ * dZ/dW = X.T * dL/dZ
dLdB = 2 * np.mean(d_out, axis=0) # dL/dB = dL/dZ * dZ/dB = I * dL/dZ
self.W.grad += dLdW
self.B.grad += dLdB
return d_input # dL/dX
def params(self):
return { 'W': self.W, 'B': self.B }
def reset_grad(self):
self.W.grad = np.zeros_like(self.W.value)
self.B.grad = np.zeros_like(self.B.value)
class ConvolutionalLayer:
def __init__(self, in_channels, out_channels,
filter_size, padding):
'''
Initializes the layer
Arguments:
in_channels, int - number of input channels
out_channels, int - number of output channels
filter_size, int - size of the conv filter
padding, int - number of 'pixels' to pad on each side
'''
self.filter_size = filter_size
self.in_channels = in_channels
self.out_channels = out_channels
self.W = Param(
np.random.randn(filter_size, filter_size,
in_channels, out_channels)
)
self.B = Param(np.zeros(out_channels))
self.padding = padding
def forward(self, X):
batch_size, height, width, channels = X.shape
self.X = X
if self.padding:
self.X = np.zeros((batch_size,
height + 2 * self.padding,
width + 2 * self.padding,
channels), dtype=X.dtype)
self.X[:, self.padding: -self.padding, self.padding: -self.padding, :] = X
_, height, width, channels = self.X.shape
out_height = height - self.filter_size + 1
out_width = width - self.filter_size + 1
output = []
# TODO: Implement forward pass
# Hint: setup variables that hold the result
# and one x/y location at a time in the loop below
# It's ok to use loops for going over width and height
# but try to avoid having any other loops
for y in range(out_height):
row = []
for x in range(out_width):
x_filter = self.X[:, y: y + self.filter_size, x: x + self.filter_size, :]
x_filter = np.transpose(x_filter, axes=[0, 3, 2, 1]).reshape((batch_size, self.filter_size * self.filter_size * channels))
W_filter = np.transpose(self.W.value, axes=[2, 0, 1, 3])
out = x_filter.dot(W_filter.reshape((self.filter_size * self.filter_size * self.in_channels, self.out_channels)))
# out has shape (batch_size, out_channel)
row.append(np.array([out], dtype=self.W.value.dtype).reshape((batch_size, 1, 1, self.out_channels)))
output.append(np.dstack(row))
output = np.hstack(output)
output += self.B.value
return output
def backward(self, d_out):
# Hint: Forward pass was reduced to matrix multiply
# You already know how to backprop through that
# when you implemented FullyConnectedLayer
# Just do it the same number of times and accumulate gradients
batch_size, height, width, channels = self.X.shape
_, out_height, out_width, out_channels = d_out.shape
# TODO: Implement backward pass
# Same as forward, setup variables of the right shape that
# aggregate input gradient and fill them for every location
# of the output
d_in = np.zeros(self.X.shape)
# Try to avoid having any other loops here too
for y in range(out_height):
for x in range(out_width):
# TODO: Implement backward pass for specific location
# Aggregate gradients for both the input and
# the parameters (W and B)
d_filter = d_out[:, y, x, :]
X_filter = self.X[:, y: y + self.filter_size, x: x + self.filter_size, :]
X_filter = np.transpose(X_filter, axes=[0, 3, 1, 2])
X_filter = X_filter.reshape((batch_size, self.filter_size * self.filter_size * channels))
X_transpose = X_filter.transpose()
W_filter = np.transpose(self.W.value, axes=[2, 0, 1, 3])
W_filter = W_filter.reshape((self.filter_size * self.filter_size * self.in_channels, self.out_channels))
W_transpose = W_filter.transpose()
d_W_filter = np.dot(X_transpose, d_filter)
d_W_filter = d_W_filter.reshape(self.in_channels, self.filter_size, self.filter_size, self.out_channels)
d_W_transpose = np.transpose(d_W_filter, axes=[2, 1, 0, 3])
self.W.grad += d_W_transpose
E = np.ones(shape=(1, batch_size))
B = np.dot(E, d_filter)
B = B.reshape((d_filter.shape[1]))
self.B.grad += B
d_in_xy = np.dot(d_filter, W_transpose)#d_filter.dot(w_filter.transpose())
d_in_xy = d_in_xy.reshape((batch_size, channels, self.filter_size, self.filter_size))
d_in_xy = np.transpose(d_in_xy, axes=[0, 3, 2, 1])
d_in[:, y: y + self.filter_size, x: x + self.filter_size, :] += d_in_xy
if self.padding:
d_in = d_in[:, self.padding: -self.padding, self.padding: -self.padding, :]
return d_in
def params(self):
return { 'W': self.W, 'B': self.B }
def reset_grad(self):
self.W.grad = np.zeros_like(self.W.value)
self.B.grad = np.zeros_like(self.B.value)
class MaxPoolingLayer:
def __init__(self, pool_size, stride):
'''
Initializes the max pool
Arguments:
pool_size, int - area to pool
stride, int - step size between pooling windows
'''
self.pool_size = pool_size
self.stride = stride
self.X = None
def forward(self, X):
batch_size, height, width, channels = X.shape
# TODO: Implement maxpool forward pass
# Hint: Similarly to Conv layer, loop on
# output x/y dimension
self.X = X
output = []
for y in range(0, height, self.stride):
row = []
for x in range(0, width, self.stride):
X_filter = X[:, y: y + self.pool_size, x: x + self.pool_size, :]
row.append(X_filter.max(axis=1).max(axis=1).reshape(batch_size, 1, 1, channels))
row = np.dstack(row)
output.append(row)
output = np.hstack(output)
return output
def backward(self, d_out):
# TODO: Implement maxpool backward pass
batch_size, height, width, channels = self.X.shape
output = np.zeros(self.X.shape)
for y_num, y in enumerate(range(0, height, self.stride)):
for x_num, x in enumerate(range(0, width, self.stride)):
d_filter = d_out[:, y_num, x_num, :]
d_filter = d_filter.reshape(batch_size, 1, 1, channels)
X_filter = self.X[:, y: y + self.pool_size, x: x + self.pool_size, :]
d_filter_out = (X_filter == X_filter.max(axis=1).max(axis=1).reshape(batch_size, 1, 1, channels)) * d_filter
output[:, y: y + self.pool_size, | |
import numpy as np
from cdlib.evaluation.internal import onmi
from cdlib.evaluation.internal.omega import Omega
from nf1 import NF1
from collections import namedtuple, defaultdict
__all__ = [
"MatchingResult",
"normalized_mutual_information",
"overlapping_normalized_mutual_information_LFK",
"overlapping_normalized_mutual_information_MGH",
"omega",
"f1",
"nf1",
"adjusted_rand_index",
"adjusted_mutual_information",
"variation_of_information",
"partition_closeness_simple",
]
# MatchingResult = namedtuple("MatchingResult", ['mean', 'std'])
MatchingResult = namedtuple("MatchingResult", "score std")
MatchingResult.__new__.__defaults__ = (None,) * len(MatchingResult._fields)
def __check_partition_coverage(first_partition: object, second_partition: object):
nodes_first = {
node: None for community in first_partition.communities for node in community
}
nodes_second = {
node: None for community in second_partition.communities for node in community
}
if len(set(nodes_first.keys()) ^ set(nodes_second.keys())) != 0:
raise ValueError("Both partitions should cover the same node set")
def __check_partition_overlap(first_partition: object, second_partition: object):
if first_partition.overlap or second_partition.overlap:
raise ValueError("Not defined for overlapping partitions")
def normalized_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.normalized_mutual_information(louvain_communities,leiden_communities)
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import normalized_mutual_info_score
return MatchingResult(
score=normalized_mutual_info_score(first_partition_c, second_partition_c)
)
def overlapping_normalized_mutual_information_LFK(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by Lancichinetti et al. (1)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_LFK(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2009). Detecting the overlapping and hierarchical community structure in complex networks. New Journal of Physics, 11(3), 033015.
"""
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
)
)
def overlapping_normalized_mutual_information_MGH(
first_partition: object, second_partition: object, normalization: str = "max"
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by McDaid et al. using a different normalization than the original LFR one. See ref.
for more details.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:param normalization: one of "max" or "LFK". Default "max" (corresponds to the main method described in the article)
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_MGH(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2011). Normalized mutual information to evaluate overlapping community finding algorithms. arXiv preprint arXiv:1110.2515. Chicago
"""
if normalization == "max":
variant = "MGH"
elif normalization == "LFK":
variant = "MGH_LFK"
else:
raise ValueError(
"Wrong 'normalization' value. Please specify one among [max, LFK]."
)
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
variant=variant,
)
)
def omega(first_partition: object, second_partition: object) -> MatchingResult:
"""
Index of resemblance for overlapping, complete coverage, network clusterings.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.omega(louvain_communities,leiden_communities)
:Reference:
1. <NAME>, <NAME>, and <NAME>. 2012. `Using the omega index for evaluating abstractive algorithms detection. <https://pdfs.semanticscholar.org/59d6/5d5aa09d789408fd9fd3c009a1b070ff5859.pdf/>`_ In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization. Association for Computational Linguistics, Stroudsburg, PA, USA, 10-18.
"""
__check_partition_coverage(first_partition, second_partition)
first_partition = {k: v for k, v in enumerate(first_partition.communities)}
second_partition = {k: v for k, v in enumerate(second_partition.communities)}
om_idx = Omega(first_partition, second_partition)
return MatchingResult(score=om_idx.omega_score)
def f1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the average F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.f1(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_ In Complex Networks VII (pp. 133-144). Springer, Cham.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(
score=results["details"]["F1 mean"][0], std=results["details"]["F1 std"][0]
)
def nf1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the Normalized F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.nf1(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_
2. <NAME>. (2017). : `RDyn: graph benchmark handling algorithms dynamics. Journal of Complex Networks. <https://academic.oup.com/comnet/article-abstract/5/6/893/3925036?redirectedFrom=PDF/>`_ 5(6), 893-912.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(score=results["scores"].loc["NF1"][0])
def adjusted_rand_index(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_index(a, b) == adjusted_rand_index(b, a)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_rand_index(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., & <NAME>. (1985). `Comparing partitions. <https://link.springer.com/article/10.1007/BF01908075/>`_ Journal of classification, 2(1), 193-218.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_rand_score
return MatchingResult(
score=adjusted_rand_score(first_partition_c, second_partition_c)
)
def adjusted_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities | |
<gh_stars>0
import time
import numpy as np
import numpy.random as rd
import gym
import torch
import torch.nn as nn
class EvaluateRewardSV: # SV: Simplify Version. Only for tutorial.
def __init__(self, env):
self.env = env
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_eva_reward__sv(self, act, max_step, action_max, is_discrete, is_render=False):
reward_sum = 0
state = self.env.reset()
for _ in range(max_step):
states = torch.tensor((state,), dtype=torch.float32, device=self.device)
actions = act(states)
if is_discrete:
actions = actions.argmax(dim=1) # discrete action space
action = actions.cpu().data.numpy()[0]
next_state, reward, done, _ = self.env.step(action * action_max)
reward_sum += reward
if is_render: # open a window and show this env
self.env.render()
if done:
break
state = next_state
return reward_sum
class QNet(nn.Module): # class AgentQLearning
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim), )
def forward(self, s):
q = self.net(s)
return q
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim), nn.Tanh(), )
def forward(self, s):
a = self.net(s)
return a
class Critic(nn.Module): # 2020-05-05 fix bug
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1), )
def forward(self, s, a):
x = torch.cat((s, a), dim=1)
q = self.net(x)
return q
class BufferList:
def __init__(self, memo_max_len):
self.memories = list()
self.max_len = memo_max_len
self.now_len = len(self.memories)
def add_memo(self, memory_tuple):
self.memories.append(memory_tuple)
def init_before_sample(self):
del_len = len(self.memories) - self.max_len
if del_len > 0:
del self.memories[:del_len]
# print('Length of Deleted Memories:', del_len)
self.now_len = len(self.memories)
def random_sample(self, batch_size, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# indices = rd.choice(self.memo_len, batch_size, replace=False) # why perform worse?
# indices = rd.choice(self.memo_len, batch_size, replace=True) # why perform better?
# same as:
indices = rd.randint(self.now_len, size=batch_size)
'''convert list into array'''
arrays = [list()
for _ in range(5)] # len(self.memories[0]) == 5
for index in indices:
items = self.memories[index]
for item, array in zip(items, arrays):
array.append(item)
'''convert array into torch.tensor'''
tensors = [torch.tensor(np.array(ary), dtype=torch.float32, device=device)
for ary in arrays]
return tensors
class BufferArray: # 2020-05-20
def __init__(self, memo_max_len, state_dim, action_dim, ):
memo_dim = 1 + 1 + state_dim + action_dim + state_dim
self.memories = np.empty((memo_max_len, memo_dim), dtype=np.float32)
self.next_idx = 0
self.is_full = False
self.max_len = memo_max_len
self.now_len = self.max_len if self.is_full else self.next_idx
self.state_idx = 1 + 1 + state_dim # reward_dim==1, done_dim==1
self.action_idx = self.state_idx + action_dim
def add_memo(self, memo_tuple):
# memo_array == (reward, mask, state, action, next_state)
self.memories[self.next_idx] = np.hstack(memo_tuple)
self.next_idx = self.next_idx + 1
if self.next_idx >= self.max_len:
self.is_full = True
self.next_idx = 0
def extend_memo(self, memo_array): # 2020-07-07
# assert isinstance(memo_array, np.ndarray)
size = memo_array.shape[0]
next_idx = self.next_idx + size
if next_idx < self.max_len:
self.memories[self.next_idx:next_idx] = memo_array
if next_idx >= self.max_len:
if next_idx > self.max_len:
self.memories[self.next_idx:self.max_len] = memo_array[:self.max_len - self.next_idx]
self.is_full = True
next_idx = next_idx - self.max_len
self.memories[0:next_idx] = memo_array[-next_idx:]
else:
self.memories[self.next_idx:next_idx] = memo_array
self.next_idx = next_idx
def init_before_sample(self):
self.now_len = self.max_len if self.is_full else self.next_idx
def random_sample(self, batch_size, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# indices = rd.choice(self.memo_len, batch_size, replace=False) # why perform worse?
# indices = rd.choice(self.memo_len, batch_size, replace=True) # why perform better?
# same as:
indices = rd.randint(self.now_len, size=batch_size)
memory = self.memories[indices]
if device:
memory = torch.tensor(memory, device=device)
'''convert array into torch.tensor'''
tensors = (
memory[:, 0:1], # rewards
memory[:, 1:2], # masks, mark == (1-float(done)) * gamma
memory[:, 2:self.state_idx], # states
memory[:, self.state_idx:self.action_idx], # actions
memory[:, self.action_idx:], # next_states
)
return tensors
def soft_target_update(target, online, tau=5e-3):
for target_param, param in zip(target.parameters(), online.parameters()):
target_param.data.copy_(tau * param.data + (1.0 - tau) * target_param.data)
def run__tutorial_discrete_action():
"""It is a DQN tutorial, we need 1min for training.
This simplify DQN can't work well on harder task.
Other RL algorithms can work well on harder task but complicated.
You can change this code and make the training finish in (10 sec, 10k step) as an execrise.
"""
env_name = 'CartPole-v0' # a tutorial RL env. We need 10s for training.
env = gym.make(env_name) # an OpenAI standard env
state_dim = 4
action_dim = 2
action_max = int(1)
target_reward = 195.0
is_discrete = True
# from AgentRun import get_env_info
# state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(env, is_print=True)
# assert is_discrete is True # DQN is for discrete action space.
""" You will see the following:
| env_name: <CartPoleEnv<CartPole-v0>>, action space: Discrete
| state_dim: 4, action_dim: 2, action_max: 1, target_reward: 195.0
"""
''' I copy the code from AgentDQN to the following for tutorial.'''
net_dim = 2 ** 7 # the dimension (or width) of network
learning_rate = 2e-4 # learning rate for Adam Optimizer (ADAM = RMSProp + Momentum)
max_buffer = 2 ** 12 # the max storage number of replay buffer.
max_epoch = 2 ** 12 # epoch or episodes when training step
max_step = 2 ** 9 # the max step that actor interact with env before training critic
gamma = 0.99 # reward discount factor (gamma must less than 1.0)
batch_size = 2 ** 6 # batch_size for network training
criterion = torch.nn.MSELoss() # criterion for critic's q_value estimate
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # choose GPU or CPU automatically
''' QNet is an actor or critic? DQN is not a Actor-Critic Method.
AgentDQN chooses action with the largest q value outputing by Q_Network. Q_Network is an actor.
AgentDQN outputs q_value by Q_Network. Q_Network is also a critic.
'''
act = QNet(state_dim, action_dim, net_dim).to(device)
act.train()
act_optim = torch.optim.Adam(act.parameters(), lr=learning_rate)
act_target = QNet(state_dim, action_dim, net_dim).to(device)
act_target.load_state_dict(act.state_dict())
act_target.eval()
# from AgentRun import BufferList # simpler but slower
# buffer = BufferList(max_buffer, state_dim, action_dim=1) # experiment replay buffer, discrete action is an int
# from AgentZoo import BufferArray # faster but a bit complicated
buffer = BufferArray(max_buffer, state_dim, action_dim=1) # experiment replay buffer, discrete action is an int
'''training loop'''
self_state = env.reset()
self_steps = 0 # steps of an episode
self_r_sum = 0.0 # sum of rewards of an episode with exploration
total_step = 0 # total step before training st0p
evaluator = EvaluateRewardSV(env) # SV: Simplify Version for tutorial
max_reward = evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete)
# the max r_sum without exploration
start_time = time.time()
for epoch in range(max_epoch):
'''update_buffer'''
explore_rate = 0.1 # explore rate when update_buffer(), epsilon-greedy
rewards = list()
steps = list()
for _ in range(max_step):
if rd.rand() < explore_rate: # epsilon-Greedy: explored policy for DQN
action = rd.randint(action_dim)
else:
states = torch.tensor((self_state,), dtype=torch.float32, device=device)
actions = act_target(states).argmax(dim=1).cpu().data.numpy() # discrete action space
action = actions[0]
next_state, reward, done, _ = env.step(action)
self_r_sum += reward
self_steps += 1
mask = 0.0 if done else gamma
buffer.add_memo((reward, mask, self_state, action, next_state))
self_state = next_state
if done:
rewards.append(self_r_sum)
self_r_sum = 0.0
steps.append(self_steps)
self_steps = 0
self_state = env.reset()
total_step += sum(steps)
avg_reward = np.average(rewards)
print(end=f'Reward:{avg_reward:6.1f} Step:{total_step:8} ')
'''update_parameters'''
loss_c_sum = 0.0
update_times = max_step
buffer.init_before_sample() # update the buffer.now_len
for _ in range(update_times):
with torch.no_grad():
rewards, masks, states, actions, next_states = buffer.random_sample(batch_size, device)
next_q_target = act_target(next_states).max(dim=1, keepdim=True)[0]
q_target = rewards + masks * next_q_target
act.train()
actions = actions.type(torch.long)
q_eval = act(states).gather(1, actions)
critic_loss = criterion(q_eval, q_target)
loss_c_sum += critic_loss.item()
act_optim.zero_grad()
critic_loss.backward()
act_optim.step()
soft_target_update(act_target, act, tau=5e-2)
# soft_target_update(act_target, act, tau=5e-3)
''' A small tau can stabilize training in harder env.
You can change tau into smaller tau 5e-3. But this env is too easy.
You can try the harder env and other DRL Algorithms in run__xx() in AgentRun.py
'''
# loss_a_avg = 0.0
loss_c_avg = loss_c_sum / update_times
print(end=f'Loss:{loss_c_avg:6.1f} ')
# evaluate the true reward of this agent without exploration
eva_reward_list = [evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete)
for _ in range(3)]
eva_reward = np.average(eva_reward_list)
print(f'TrueRewward:{eva_reward:6.1f}')
if eva_reward > max_reward:
max_reward = eva_reward
if max_reward > target_reward:
print(f"|\tReach target_reward: {max_reward:6.1f} > {target_reward:6.1f}")
break
used_time = int(time.time() - start_time)
print(f"|\tTraining UsedTime: {used_time}s")
'''open a window and show the env'''
for _ in range(4):
eva_reward = evaluator.get_eva_reward__sv(act, max_step, action_max, is_discrete, is_render=True)
print(f'|Evaluated reward is: {eva_reward}')
def run__tutorial_continuous_action():
"""It is a DDPG | |
"""
Notebook management module.
"""
import os
import markdown
from copy import deepcopy
from PyQt5.QtCore import Qt
from PyQt5 import QtCore, QtGui, QtWidgets
"""
from PyQt4.QtCore import Qt, QDir, QFile, QSettings, QSize
from PyQt4.QtGui import (QAbstractItemDelegate, QAbstractItemView, QColor, QDialog, QDialogButtonBox,
QFileDialog, QFont, QGridLayout, QLabel, QLineEdit, QListWidget, QListWidgetItem,
QPen, QPushButton, QStyle, QVBoxLayout, QTabWidget, QWidget, QBrush, QTreeWidget,
QTreeWidgetItem, QSpinBox, QScrollArea, QCheckBox, QIcon, QPalette, QFont)
"""
import mikidown
## TODO look at using QColorDialog ?
try:
import slickpicker
BETTER_COLOR_PICKER = True
except ImportError as e:
print("Can't find slickpicker, falling back to QLineEdit for editing mikidown colors")
BETTER_COLOR_PICKER = False
from .utils import allMDExtensions
from .config import Setting, readListFromSettings, writeListToSettings, writeDictToSettings
from .fontbutton import QFontButton
class ListDelegate(QtWidgets.QAbstractItemDelegate):
"""Customize view and behavior of notebook list"""
def __init__(self, parent=None):
super(ListDelegate, self).__init__(parent)
def paint(self, painter, option, index):
r = option.rect
if option.state & QtWidgets.QStyle.State_Selected:
painter.fillRect(r, self.parent().palette().highlight())
fontPen = QtGui.QPen(self.parent().palette().highlightedText(), 1, Qt.SolidLine)
else:
painter.fillRect(r, self.parent().palette().base())
fontPen = QtGui.QPen(self.parent().palette().text(), 1, Qt.SolidLine)
painter.setPen(fontPen)
name = index.data(Qt.DisplayRole)
path = index.data(Qt.UserRole)
imageSpace = 10
# notebook name
r = option.rect.adjusted(imageSpace, 0, -10, -20)
name_font = QtGui.QFont(self.parent().font())
name_font.setPointSize(10)
name_font.setBold(True)
if index.flags() == Qt.NoItemFlags:
name_font.setStrikeOut(True)
painter.setFont(name_font)
painter.drawText(r.left(), r.top(), r.width(), r.height(),
Qt.AlignBottom|Qt.AlignLeft, name)
# notebook path
path_font = QtGui.QFont(self.parent().font())
path_font.setPointSize(8)
if index.flags() == Qt.NoItemFlags:
path_font.setStrikeOut(True)
r = option.rect.adjusted(imageSpace, 20, -10, 0)
painter.setFont(path_font)
painter.drawText(r.left(), r.top(), r.width(), r.height(),
Qt.AlignLeft, path)
def sizeHint(self, option, index):
return QtCore.QSize(200, 40)
class NotebookExtSettingsDialog(QtWidgets.QDialog):
def __init__(self, parent=None, cfg_list=[]):
super(NotebookExtSettingsDialog, self).__init__(parent)
self.extCfgEdit = QtWidgets.QTreeWidget()
self.extCfgEdit.setHeaderLabels(['Property', 'Value'])
self.addRow = QtWidgets.QPushButton('+')
self.removeRow = QtWidgets.QPushButton('-')
self.buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
layout = QtWidgets.QGridLayout(self)
layout.addWidget(self.extCfgEdit,0,0,1,2)
layout.addWidget(self.addRow,1,0,1,1)
layout.addWidget(self.removeRow,1,1,1,1)
layout.addWidget(self.buttonBox,2,0,1,2)
self.initCfgPanel(cfg_list)
self.addRow.clicked.connect(self.actionAdd)
self.removeRow.clicked.connect(self.actionRemove)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def initCfgPanel(self, cfg_list):
for item in cfg_list:
self.actionAdd(prop_name=item[0], prop_val=item[1])
def actionRemove(self):
item = self.extCfgEdit.currentItem()
row = self.extCfgEdit.indexOfTopLevelItem(item)
self.extCfgEdit.takeTopLevelItem(row)
def actionAdd(self, checked=False, prop_name='', prop_val=''):
item = QtWidgets.QTreeWidgetItem(self.extCfgEdit, [prop_name, prop_val])
item.setFlags(item.flags()|Qt.ItemIsEditable)
#self.extCfgEdit.addTopLevelItem(item)
def configToList(self):
items = []
for i in range(self.extCfgEdit.topLevelItemCount()):
witem = self.extCfgEdit.topLevelItem(i)
items.append((witem.text(0), witem.text(1)))
return items
class NotebookSettingsDialog(QtWidgets.QDialog):
"""Dialog for adjusting notebook settings"""
def __init__(self, parent=None):
super(NotebookSettingsDialog, self).__init__(parent)
self.setWindowTitle(self.tr("Notebook settings - mikidown"))
# widgets for tab 1
self.mdExts = QtWidgets.QListWidget()
self.mjEdit = QtWidgets.QLineEdit()
self.moveUp = QtWidgets.QPushButton('<<')
self.moveDown = QtWidgets.QPushButton('>>')
self.configureExtension = QtWidgets.QPushButton(self.tr('Edit Settings for this extension'))
self.tmpdict = deepcopy(self.parent().settings.extcfg)
# widgets for tab 2
self.fExtEdit = QtWidgets.QLineEdit()
self.attImgEdit = QtWidgets.QLineEdit()
self.attDocEdit = QtWidgets.QLineEdit()
# mandatory button box
self.buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
# tab panels
tabs = QtWidgets.QTabWidget()
markupTab = QtWidgets.QWidget()
fileExtsTab = QtWidgets.QWidget()
tabs.addTab(markupTab, "Markdown")
tabs.addTab(fileExtsTab, self.tr("File extensions"))
# initialization functions
self.initExtList()
self.mdExts.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.mjEdit.setText(self.parent().settings.mathjax)
self.attImgEdit.setText(', '.join(self.parent().settings.attachmentImage))
self.attDocEdit.setText(', '.join(self.parent().settings.attachmentDocument))
self.fExtEdit.setText(self.parent().settings.fileExt)
# set up tab 1
layout = QtWidgets.QGridLayout(markupTab)
layout.addWidget(QtWidgets.QLabel(self.tr("Markdown extensions")),0,0,1,4)
layout.addWidget(self.mdExts,1,0,1,4)
layout.addWidget(self.moveUp,2,0,1,1)
layout.addWidget(self.moveDown,2,1,1,1)
layout.addWidget(self.configureExtension,2,2,1,2)
layout.addWidget(QtWidgets.QLabel(self.tr("MathJax Location")),3,0,1,1)
layout.addWidget(self.mjEdit,3,1,1,3)
# set up tab 2
layout = QtWidgets.QGridLayout(fileExtsTab)
layout.addWidget(QtWidgets.QLabel(self.tr("Note file extension")),0,0,1,1)
layout.addWidget(QtWidgets.QLabel(self.tr("Image file extension")),1,0,1,1)
layout.addWidget(QtWidgets.QLabel(self.tr("Document file extension")),2,0,1,1)
layout.addWidget(self.fExtEdit,0,1,1,1)
layout.addWidget(self.attImgEdit,1,1,1,1)
layout.addWidget(self.attDocEdit,2,1,1,1)
# put it together
vlayout = QtWidgets.QVBoxLayout(self)
vlayout.addWidget(tabs)
vlayout.addWidget(self.buttonBox)
# setup signal handlers
self.moveUp.clicked.connect(self.moveItemUp)
self.configureExtension.clicked.connect(self.configExt)
self.moveDown.clicked.connect(self.moveItemDown)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def configExt(self, checked=False, ext=None):
if ext is None:
ext = self.mdExts.currentItem().text()
cfg = self.tmpdict.get(ext,[])
dialog = NotebookExtSettingsDialog(cfg_list=cfg)
done = dialog.exec()
if done:
self.tmpdict[ext] = dialog.configToList()
def initExtList(self):
extset = set(self.parent().settings.extensions)
#for easier performance in checking
for ext in self.parent().settings.extensions:
item = QtWidgets.QListWidgetItem(ext, self.mdExts)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
for ext in self.parent().settings.faulty_exts:
item = QtWidgets.QListWidgetItem(ext, self.mdExts)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setBackground(QtGui.QBrush(QtGui.QColor('red')))
item.setForeground(QtGui.QBrush(QtGui.QColor('black')))
item.setCheckState(Qt.Checked)
for ext in allMDExtensions():
if ext in extset: continue
item = QtWidgets.QListWidgetItem(ext, self.mdExts)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Unchecked)
#self.mdExts.addItem(item)
def moveItemUp(self):
item = self.mdExts.currentItem()
row = self.mdExts.currentRow()
if row != 0:
# self.mdExts.removeItemWidget(item)
self.mdExts.takeItem(row)
self.mdExts.insertItem(row-1, item)
self.mdExts.setCurrentRow(row-1)
def moveItemDown(self):
item = self.mdExts.currentItem()
row = self.mdExts.currentRow()
count = self.mdExts.count()
if row != count-1:
self.mdExts.takeItem(row)
self.mdExts.insertItem(row+1, item)
self.mdExts.setCurrentRow(row+1)
def accept(self):
# write to settings first
msettings = self.parent().settings
nbsettings = msettings.qsettings
nbsettings.setValue('mathJax', self.mjEdit.text())
extlist = []
for i in range(self.mdExts.count()):
item = self.mdExts.item(i)
if item.checkState() == Qt.Checked:
extlist.append(item.text())
writeListToSettings(nbsettings, 'extensions', extlist)
writeListToSettings(nbsettings, 'attachmentImage', self.attImgEdit.text().split(", "))
writeListToSettings(nbsettings, 'attachmentDocument', self.attDocEdit.text().split(", "))
writeDictToSettings(nbsettings, 'extensionsConfig', self.tmpdict)
# then to memory
msettings.extensions = extlist
msettings.mathjax = self.mjEdit.text()
msettings.attachmentDocument = readListFromSettings(nbsettings, 'attachmentDocument')
msettings.attachmentImage = readListFromSettings(nbsettings, 'attachmentImage')
msettings.extcfg.update(self.tmpdict)
msettings.md = markdown.Markdown(msettings.extensions, extension_configs=msettings.extcfg)
# then make mikidown use these settings NOW
curitem=self.parent().notesTree.currentItem()
self.parent().currentItemChangedWrapper(curitem, curitem)
QtGui.QDialog.accept(self)
class NotebookListDialog(QtWidgets.QDialog):
"""Display, create, remove, modify notebookList """
def __init__(self, parent=None):
super(NotebookListDialog, self).__init__(parent)
self.notebookList = QtWidgets.QListWidget()
self.moveUp = QtWidgets.QPushButton('<<')
self.moveDown = QtWidgets.QPushButton('>>')
self.add = QtWidgets.QPushButton(self.tr('Add'))
self.remove = QtWidgets.QPushButton(self.tr('Remove'))
self.buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(False)
layout = QtWidgets.QGridLayout()
layout.addWidget(self.notebookList, 0, 0, 4, 6)
layout.addWidget(self.moveUp, 1, 6)
layout.addWidget(self.moveDown, 2, 6)
layout.addWidget(self.add, 4, 0)
layout.addWidget(self.remove, 4, 1)
layout.addWidget(self.buttonBox, 4, 5, 1, 2)
self.setLayout(layout)
self.notebookList.setItemDelegate(ListDelegate(self.notebookList))
self.notebookList.currentRowChanged.connect(self.updateUi)
self.add.clicked.connect(self.actionAdd)
self.remove.clicked.connect(self.actionRemove)
self.moveUp.clicked.connect(self.moveItemUp)
self.moveDown.clicked.connect(self.moveItemDown)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.initList()
def initList(self):
self.notebookList.clear()
notebooks = Mikibook.read()
for nb in notebooks:
item = QtWidgets.QListWidgetItem()
item.setData(Qt.DisplayRole, nb[0])
item.setData(Qt.UserRole, nb[1])
lockPath = os.path.join(nb[1], '.mikidown_lock')
if os.path.exists(lockPath):
item.setFlags(Qt.NoItemFlags)
self.notebookList.addItem(item)
self.updateUi(len(notebooks) != 0)
self.notebookList.setCurrentRow(0)
# QListWidgetItem(nb, self.notebookList) ???
def updateUi(self, row):
flag = (row != -1)
self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(flag)
self.remove.setEnabled(flag)
self.moveUp.setEnabled(flag)
self.moveDown.setEnabled(flag)
def actionAdd(self):
Mikibook.create()
self.initList()
count = self.notebookList.count()
self.notebookList.setCurrentRow(count-1)
def actionRemove(self):
item = self.notebookList.currentItem()
row = self.notebookList.currentRow()
name = item.data(Qt.DisplayRole)
path = item.data(Qt.UserRole)
self.notebookList.takeItem(row)
Mikibook.remove(name, path)
def moveItemUp(self):
item = self.notebookList.currentItem()
row = self.notebookList.currentRow()
if row != 0:
# self.notebookList.removeItemWidget(item)
self.notebookList.takeItem(row)
self.notebookList.insertItem(row-1, item)
self.notebookList.setCurrentRow(row-1)
def moveItemDown(self):
item = self.notebookList.currentItem()
row = self.notebookList.currentRow()
count = self.notebookList.count()
if row != count-1:
self.notebookList.takeItem(row)
self.notebookList.insertItem(row+1, item)
self.notebookList.setCurrentRow(row+1)
def accept(self):
notebookPath = self.notebookList.currentItem().data(Qt.UserRole)
notebookName = self.notebookList.currentItem().data(Qt.DisplayRole)
settings = Setting([[notebookName, notebookPath]])
window = mikidown.MikiWindow(settings)
window.show()
count = self.notebookList.count()
notebooks = []
for i in range(count):
name = self.notebookList.item(i).data(Qt.DisplayRole)
path = self.notebookList.item(i).data(Qt.UserRole)
notebooks.append([name, path])
Mikibook.write(notebooks)
QtWidgets.QDialog.accept(self)
class NewNotebookDlg(QtWidgets.QDialog):
def __init__(self, parent=None):
super(NewNotebookDlg, self).__init__(parent)
self.setWindowTitle(self.tr('Add Notebook - mikidown'))
tipLabel = QtWidgets.QLabel(self.tr('Choose a name and folder for your notebook.') +
self.tr('\nThe folder can be an existing notebook folder.'))
self.nameEditor = QtWidgets.QLineEdit()
self.nameEditor.setText(self.tr('Notes'))
nameLabel = QtWidgets.QLabel(self.tr('Name:'))
nameLabel.setBuddy(self.nameEditor)
self.pathEditor = QtWidgets.QLineEdit()
# self.pathEditor.setText('~/mikidown')
self.pathEditor.setText(os.path.expanduser('~').replace(os.sep,'/')+'/mikinotes')
pathLabel = QtWidgets.QLabel(self.tr('Path:'))
pathLabel.setBuddy(self.pathEditor)
browse = QtWidgets.QPushButton(self.tr('Browse'))
buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
grid = QtWidgets.QGridLayout()
grid.setRowMinimumHeight(1, 10)
grid.setRowMinimumHeight(4, 10)
grid.addWidget(tipLabel, 0, 0, 1, 4)
grid.addWidget(nameLabel, 2, 0)
grid.addWidget(self.nameEditor, 2, 1, 1, 4)
grid.addWidget(pathLabel, 3, 0)
grid.addWidget(self.pathEditor, 3, 1, 1, 4)
grid.addWidget(browse, 3, 5)
grid.addWidget(buttonBox, 5, 4, 1, 2)
self.setLayout(grid)
browse.clicked.connect(self.browse)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
def browse(self):
default = os.path.expanduser('~')
path = QtWidgets.QFileDialog.getExistingDirectory(self,
self.tr("Select Folder"),
default,
QtWidgets.QFileDialog.ShowDirsOnly)
self.pathEditor.setText(path)
def closeEvent(self, event):
event.accept()
class MikidownHighlightCfgWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(MikidownHighlightCfgWidget, self).__init__(parent)
layout = QtWidgets.QGridLayout(self)
colors = Mikibook.highlighterColors()
for i in range(16):
layout.addWidget(QtWidgets.QLabel(Mikibook.highlighter_labels[i]),i,0,1,1)
if BETTER_COLOR_PICKER:
layout.addWidget(slickpicker.QColorEdit(colors[i]),i,1,1,1)
else:
layout.addWidget(QtWidgets.QLineEdit(colors[i]),i,1,1,1)
def configToList(self):
items=[]
for i in range(16):
if BETTER_COLOR_PICKER:
items.append(self.layout().itemAtPosition(i,1).widget().lineEdit.text())
else:
items.append(self.layout().itemAtPosition(i,1).widget().text())
return items
class MikidownCfgDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(MikidownCfgDialog, self).__init__(parent)
#tab = QWidget()
#tab2 = QWidget()
self.setWindowTitle(self.tr("Settings - mikidown"))
self.recentNotesCount = QtWidgets.QSpinBox()
recent_notes_n = Mikibook.settings.value('recentNotesNumber',type=int, defaultValue=20)
self.recentNotesCount.setValue(recent_notes_n)
self.buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
self.hltCfg = MikidownHighlightCfgWidget(parent=self)
self.tabWidth = QtWidgets.QSpinBox(self)
self.tabWidth.setRange(2, 8)
self.tabWidth.setSingleStep(2)
self.iconTheme = QtWidgets.QLineEdit(self)
self.iconTheme.setText(Mikibook.settings.value('iconTheme', QtGui.QIcon.themeName()))
self.editorFont = QFontButton(parent=self)
fontval = QtGui.QFont()
fontfam = Mikibook.settings.value('editorFont', defaultValue=None)
fontsize = Mikibook.settings.value('editorFontSize', type=int, defaultValue=12)
if fontfam is not None:
fontval.setFamily(fontfam)
fontval.setPointSize(fontsize)
self.headerScalesFont = QtWidgets.QCheckBox(self)
if Mikibook.settings.value('headerScaleFont', type=bool, defaultValue=True):
self.headerScalesFont.setCheckState(Qt.Checked)
else:
self.headerScalesFont.setCheckState(Qt.Unchecked)
self.editorFont.font = fontval
self.tabWidth.setValue(Mikibook.settings.value('tabWidth', type=int, defaultValue=4))
self.tabToSpaces = QtWidgets.QCheckBox(self)
if Mikibook.settings.value('tabInsertsSpaces', type=bool, defaultValue=True):
self.tabToSpaces.setCheckState(Qt.Checked)
else:
self.tabToSpaces.setCheckState(Qt.Unchecked)
self.minimizeToTray = QtWidgets.QCheckBox(self)
if Mikibook.settings.value('minimizeToTray', type=bool, defaultValue=False):
self.minimizeToTray.setCheckState(Qt.Checked)
else:
self.minimizeToTray.setCheckState(Qt.Unchecked)
layout = QtWidgets.QGridLayout(self)
layout.addWidget(QtWidgets.QLabel(self.tr("Minimize to tray?")),0,0,1,1)
layout.addWidget(self.minimizeToTray,0,1,1,1)
layout.addWidget(QtWidgets.QLabel(self.tr("# of recently viewed notes to keep")),1,0,1,1)
layout.addWidget(self.recentNotesCount,1,1,1,1)
layout.addWidget(QtWidgets.QLabel(self.tr("Editor font")), 2, 0, 1, 1)
layout.addWidget(self.editorFont, 2, 1, 1, 1)
layout.addWidget(QtWidgets.QLabel(self.tr("Header rank scales editor font?")), 3, 0, 1, 1)
layout.addWidget(self.headerScalesFont, 3, 1, 1, 1)
qs = QtWidgets.QScrollArea(self)
qs.setWidget(self.hltCfg)
layout.addWidget(QtWidgets.QLabel(self.tr("Tabs expand to spaces?")), 4, 0, 1, 1)
layout.addWidget(self.tabToSpaces, 4, 1, 1, 1)
layout.addWidget(QtWidgets.QLabel(self.tr("Tab width")), 5, 0, 1, 1)
layout.addWidget(self.tabWidth, 5, 1, 1, 1)
layout.addWidget(QtWidgets.QLabel(self.tr("Icon Theme")),6,0,1,1)
layout.addWidget(self.iconTheme,6,1,1,1)
layout.addWidget(qs,7,0,1,2)
layout.addWidget(self.buttonBox,8,0,1,2)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def accept(self):
Mikibook.settings.setValue('recentNotesNumber', self.recentNotesCount.value())
Mikibook.settings.setValue('editorFont', self.editorFont.font.family())
Mikibook.settings.setValue('editorFontSize', self.editorFont.font.pointSize())
if self.headerScalesFont.isChecked():
Mikibook.settings.setValue('headerScaleFont', True)
else:
Mikibook.settings.setValue('headerScaleFont', False)
Mikibook.settings.setValue('tabWidth', self.tabWidth.value())
Mikibook.settings.setValue('iconTheme', self.iconTheme.text())
if self.tabToSpaces.isChecked():
Mikibook.settings.setValue('tabInsertsSpaces', True)
else:
Mikibook.settings.setValue('tabInsertsSpaces', False)
Mikibook.settings.setValue(
'minimizeToTray',
self.minimizeToTray.isChecked()
)
Mikibook.setHighlighterColors(self.hltCfg.configToList())
QtGui.QIcon.setThemeName(self.iconTheme.text())
#then make mikidown use these settings NOW
self.parent().loadHighlighter()
QtWidgets.QDialog.accept(self)
class Mikibook():
# ~/.config/mikidown/mikidown.conf
settings = QtCore.QSettings(QtCore.QSettings.IniFormat, QtCore.QSettings.UserScope, 'mikidown', 'mikidown')
lockpath = os.path.join(os.path.dirname(settings.fileName()),'lock').replace(os.sep,'/')
highlighter_labels = [
'HTML Tags',
'1<sup>st</sup> LVL headers',
'2<sup>nd</sup> LVL headers',
'3<sup>rd</sup> LVL headers',
'4<sup>th</sup> and lower LVL headers',
'HTML Symbols',
'HTML comments',
'Strikethrough',
'Underline',
'Bold',
'Italics',
'Links',
'Links and images',
'Block Quotes',
'Fenced Code',
'Math'
]
@staticmethod
def highlighterColors():
items | |
local_var_params and
len(local_var_params['namespace']) > 1024):
raise ValueError("Invalid value for parameter `namespace` when calling `namespaces_namespace_jobs_get`, length must be less than or equal to `1024`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Jobs', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def namespaces_namespace_jobs_job_get(self, namespace, job, **kwargs): # noqa: E501
"""Retrieve a job # noqa: E501
Retrieve a job. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.namespaces_namespace_jobs_job_get(namespace, job, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: The name of the namespace. (required)
:param str job: The name of the job. (required)
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.namespaces_namespace_jobs_job_get_with_http_info(namespace, job, **kwargs) # noqa: E501
else:
(data) = self.namespaces_namespace_jobs_job_get_with_http_info(namespace, job, **kwargs) # noqa: E501
return data
def namespaces_namespace_jobs_job_get_with_http_info(self, namespace, job, **kwargs): # noqa: E501
"""Retrieve a job # noqa: E501
Retrieve a job. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.namespaces_namespace_jobs_job_get_with_http_info(namespace, job, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: The name of the namespace. (required)
:param str job: The name of the job. (required)
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['namespace', 'job'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method namespaces_namespace_jobs_job_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `namespaces_namespace_jobs_job_get`") # noqa: E501
# verify the required parameter 'job' is set
if ('job' not in local_var_params or
local_var_params['job'] is None):
raise ValueError("Missing the required parameter `job` when calling `namespaces_namespace_jobs_job_get`") # noqa: E501
if ('namespace' in local_var_params and
len(local_var_params['namespace']) > 1024):
raise ValueError("Invalid value for parameter `namespace` when calling `namespaces_namespace_jobs_job_get`, length must be less than or equal to `1024`") # noqa: E501
if ('job' in local_var_params and
len(local_var_params['job']) > 1024):
raise ValueError("Invalid value for parameter `job` when calling `namespaces_namespace_jobs_job_get`, length must be less than or equal to `1024`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
if 'job' in local_var_params:
path_params['job'] = local_var_params['job'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/namespaces/{namespace}/jobs/{job}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def namespaces_namespace_jobs_job_put(self, namespace, job, **kwargs): # noqa: E501
"""Create a job # noqa: E501
Creates a new job object. All job objects are immutable and are uniquely identified by a generated ID. Marquez will create a version of a job each time the contents of the object is modified. For example, the `location` of a job may change over time resulting in new versions. The accumulated versions can be listed, used to rerun a specific job version or possibly help debug a failed job run. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.namespaces_namespace_jobs_job_put(namespace, job, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: The name of the namespace. (required)
:param str job: The name of the job. (required)
:param CreateJob create_job:
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.namespaces_namespace_jobs_job_put_with_http_info(namespace, job, **kwargs) # noqa: E501
else:
(data) = self.namespaces_namespace_jobs_job_put_with_http_info(namespace, job, **kwargs) # noqa: E501
return data
def namespaces_namespace_jobs_job_put_with_http_info(self, namespace, job, **kwargs): # noqa: E501
"""Create a job # noqa: E501
Creates a new job object. All job objects are immutable and are uniquely identified by a generated ID. Marquez will create a version of a job each time the contents of the object is modified. For example, the `location` of a job may change over time resulting in new versions. The accumulated versions can be listed, used to rerun a specific job version or possibly help debug a failed job run. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.namespaces_namespace_jobs_job_put_with_http_info(namespace, job, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: The name of the namespace. (required)
:param str job: The name of the job. (required)
:param CreateJob create_job:
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['namespace', 'job', 'create_job'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method namespaces_namespace_jobs_job_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `namespaces_namespace_jobs_job_put`") # noqa: E501
# verify the required parameter 'job' is set
if ('job' not in local_var_params or
local_var_params['job'] is None):
raise ValueError("Missing the required parameter `job` when calling `namespaces_namespace_jobs_job_put`") # noqa: E501
if ('namespace' in local_var_params and
len(local_var_params['namespace']) > 1024):
raise ValueError("Invalid value for parameter `namespace` when calling `namespaces_namespace_jobs_job_put`, length must be less than or equal to `1024`") # noqa: E501
if ('job' in local_var_params and
len(local_var_params['job']) > 1024):
raise ValueError("Invalid value for parameter `job` when calling `namespaces_namespace_jobs_job_put`, length must be less than or equal to `1024`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
if 'job' in local_var_params:
path_params['job'] = local_var_params['job'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_job' in local_var_params:
body_params = local_var_params['create_job']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/namespaces/{namespace}/jobs/{job}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def namespaces_namespace_jobs_job_runs_get(self, namespace, job, **kwargs): # noqa: E501
"""List all job runs # noqa: E501
Returns a list of job runs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.namespaces_namespace_jobs_job_runs_get(namespace, job, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: The name of the namespace. (required)
:param str job: The name of the job. (required)
:return: JobRuns
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.namespaces_namespace_jobs_job_runs_get_with_http_info(namespace, job, **kwargs) # noqa: E501
else:
(data) = self.namespaces_namespace_jobs_job_runs_get_with_http_info(namespace, job, **kwargs) # noqa: E501
return data
def namespaces_namespace_jobs_job_runs_get_with_http_info(self, namespace, job, **kwargs): # noqa: E501
"""List all job runs # noqa: E501
Returns a list of job runs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, <NAME>
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import os
import logging
logger = logging.getLogger(__name__)
class Config(object):
"""Config abstraction
Idea is coming from Sphinx config.
Holds system wide configuration variables. See
`configuring oyProjectManager`_ for more detail.
.. _configuring oyProjectManager: ../configure.html
"""
default_config_values = dict(
database_url="sqlite:///$OYPROJECTMANAGER_PATH/project_manager.db",
status_list=[
'WTS',
'WIP',
'REV',
'APP',
'CMP'
],
status_list_long_names=[
'Waiting To Start',
'Work In Progress',
'For Review',
'Approved',
'Completed'
],
status_bg_colors=[
(192, 80, 77), #WTS
(255, 192, 0), #WIP
( 89, 141, 213), #REV
(155, 187, 89), #APP
(155, 187, 89), #CMP
],
status_fg_colors=[
(255, 255, 255), #WTS
( 0, 0, 0), #WIP
( 0, 0, 0), #REV
( 0, 0, 0), #APP
( 0, 0, 0), #CMP
],
sequence_format="%h%p%t %R",
shot_number_prefix="SH",
shot_number_padding=3,
rev_number_prefix="r",
rev_number_padding=2,
ver_number_prefix="v",
ver_number_padding=3,
default_fps=25,
default_asset_type_name="Generic",
default_take_name="Main",
users_data=[{"name": "Administrator", "initials": "adm"}],
# just use one repository for now
repository_env_key="REPO",
repository={
"name": "Default",
"windows_path": "~/Projects",
"linux_path": "~/Projects",
"osx_path": "~/Projects"
},
file_size_format="%.2f MB",
time_format='%d.%m.%Y %H:%M',
environments=[
{
"name": "Maya",
"extensions": ["ma", "mb"]
},
{
"name": "Houdini",
"extensions": ["hip"]
},
{
"name": "Nuke",
"extensions": ["nk"],
},
{
"name": "Photoshop",
"extensions": ["psd", "pdd"],
"export_extensions": ["tif", "tga", "bmp", "jpg", "iff"],
},
{
"name": "3DEqualizer",
"extensions": ["3te"]
},
{
"name": "Fusion",
"extensions": ["comp"]
}
],
resolution_presets={
"PC Video": [640, 480, 1.0],
"NTSC": [720, 486, 0.91],
"NTSC 16:9": [720, 486, 1.21],
"PAL": [720, 576, 1.067],
"PAL 16:9": [720, 576, 1.46],
"HD 720": [1280, 720, 1.0],
"HD 1080": [1920, 1080, 1.0],
"1K Super 35": [1024, 778, 1.0],
"2K Super 35": [2048, 1556, 1.0],
"4K Super 35": [4096, 3112, 1.0],
"A4 Portrait": [2480, 3508, 1.0],
"A4 Landscape": [3508, 2480, 1.0],
"A3 Portrait": [3508, 4960, 1.0],
"A3 Landscape": [4960, 3508, 1.0],
"A2 Portrait": [4960, 7016, 1.0],
"A2 Landscape": [7016, 4960, 1.0],
"50x70cm Poster Portrait": [5905, 8268, 1.0],
"50x70cm Poster Landscape": [8268, 5905, 1.0],
"70x100cm Poster Portrait": [8268, 11810, 1.0],
"70x100cm Poster Landscape": [11810, 8268, 1.0],
"1k Square": [1024, 1024, 1.0],
"2k Square": [2048, 2048, 1.0],
"3k Square": [3072, 3072, 1.0],
"4k Square": [4096, 4096, 1.0],
},
default_resolution_preset="HD 1080",
project_structure="""{% for sequence in project.sequences %}
{% set seq_path = project.full_path + '/Sequences/' + sequence.code %}
{{seq_path}}/Edit/Offline
{{seq_path}}/Edit/Sound
{{seq_path}}/References/Artworks
{{seq_path}}/References/Text/Scenario
{{seq_path}}/References/Text/Brief
{{seq_path}}/References/Photos_Images
{{seq_path}}/References/Videos
{{seq_path}}/References/Others
{{seq_path}}/References/Storyboard
{% for shot in sequence.shots %}
{{seq_path}}/Shots/{{shot.code}}
{{seq_path}}/Shots/{{shot.code}}/Plate
{{seq_path}}/Shots/{{shot.code}}/Reference
{{seq_path}}/Shots/{{shot.code}}/Texture
{% endfor %}
{% endfor %}
{% for asset in project.assets%}
{% set asset_path = project.full_path + '/Assets/' + asset.type + '/' + asset.code %}
{{asset_path}}/Texture
{{asset_path}}/Reference
{% endfor %}
""",
asset_thumbnail_path="{{project.code}}/Assets/{{asset.type}}/{{asset.code}}/Thumbnail",
asset_thumbnail_filename="{{asset.code}}_thumbnail.{{extension}}",
shot_thumbnail_path="{{project.code}}/Sequences/{{sequence.code}}/Shots/{{shot.code}}/Thumbnail",
shot_thumbnail_filename="{{shot.code}}_thumbnail.{{extension}}",
thumbnail_format="jpg",
thumbnail_quality=70,
thumbnail_size=[320, 180],
version_types=[
{
"name": "Animation",
"code": "Anim",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Camera",
"code": "Cam",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Composition",
"code": "Comp",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}/v{{'%03d'|format(version.version_number)}}",
"extra_folders": "{{version.path}}/Elements",
"environments": ["Nuke", "Fusion"],
"type_for": "Shot"
},
# {
# "name": "Edit",
# "code": "Edit",
# "path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
# "filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
# "output_path": "{{version._path}}/Output/{{version.take_name}}",
# "extra_folders": "",
# "environments": ["Nuke", "Fusion"],
# "type_for": "Shot"
# },
{
"name": "FX",
"code": "FX",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": """{{version.path}}/anim
{{version.path}}/cache
{{version.path}}/exports""",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Model",
"code": "Model",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Asset"
},
{
"name": "Other",
"code": "Other",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini", "Nuke", "Fusion",
"Photoshop"],
"type_for": "Asset"
},
{
"name": "Previs",
"code": "Previs",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Lighting",
"code": "Lighting",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Rig",
"code": "Rig",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Asset"
},
{
"name": "Roto",
"code": "Roto",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Nuke", "Fusion"],
"type_for": "Shot"
},
{
"name": "Layout",
"code": "Layout",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Matte",
"code": "Matte",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Photoshop"],
"type_for": "Shot"
},
{
"name": "Texture",
"code": "Texture",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Photoshop", "Nuke", "Fusion"],
"type_for": "Asset",
},
{
"name": "Illustration",
"code": "Illust",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Photoshop"],
"type_for": "Asset"
},
{
"name": "Look Development",
"code": "LookDev",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Asset"
},
{
"name": "Match Move",
"code": "MM",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["3DEqualizer"],
"type_for": "Shot"
}
],
maya_workspace_file_content="""workspace -fr "3dPaintTextures" ".mayaFiles/sourceimages/3dPaintTextures/";
workspace -fr "Adobe(R) Illustrator(R)" ".mayaFiles/data/";
workspace -fr "aliasWire" ".mayaFiles/data/";
workspace -fr "animImport" ".mayaFiles/data/";
workspace -fr "animExport" ".mayaFiles/data/";
workspace -fr "audio" ".mayaFiles/sound/";
workspace -fr "autoSave" ".mayaFiles/autosave/";
workspace -fr "clips" ".mayaFiles/clips/";
workspace -fr "DAE_FBX" ".mayaFiles/data/";
workspace -fr "DAE_FBX export" ".mayaFiles/data/";
workspace -fr "depth" ".mayaFiles/renderData/depth/";
workspace -fr "diskCache" ".mayaFiles/cache/";
workspace -fr "DXF" ".mayaFiles/data/";
workspace -fr "DXF export" ".mayaFiles/data/";
workspace -fr "DXF_FBX" ".mayaFiles/data/";
workspace -fr "DXF_FBX export" ".mayaFiles/data/";
workspace -fr "eps" ".mayaFiles/data/";
workspace -fr "EPS" ".mayaFiles/data/";
workspace -fr "FBX" ".mayaFiles/data/";
workspace -fr "FBX export" ".mayaFiles/data/";
workspace -fr "fluidCache" ".mayaFiles/cache/fluid/";
workspace -fr "furAttrMap" ".mayaFiles/renderData/fur/furAttrMap/";
workspace -fr "furEqualMap" ".mayaFiles/renderData/fur/furEqualMap/";
workspace -fr "furFiles" ".mayaFiles/renderData/fur/furFiles/";
workspace -fr "furImages" ".mayaFiles/renderData/fur/furImages/";
workspace -fr "furShadowMap" ".mayaFiles/renderData/fur/furShadowMap/";
workspace -fr "IGES" ".mayaFiles/data/";
workspace -fr "IGESexport" ".mayaFiles/data/";
workspace -fr "illustrator" ".mayaFiles/data/";
workspace -fr "image" ".mayaFiles/images/";
workspace -fr "images" ".mayaFiles/images/";
workspace -fr "iprImages" ".mayaFiles/renderData/iprImages/";
workspace -fr "lights" ".mayaFiles/renderData/shaders/";
workspace -fr "mayaAscii" ".mayaFiles/scenes/";
workspace -fr "mayaBinary" ".mayaFiles/scenes/";
workspace -fr "mel" ".mayaFiles/scripts/";
workspace -fr "mentalray" ".mayaFiles/renderData/mentalray/";
workspace -fr "mentalRay" ".mayaFiles/renderData/mentalray";
workspace -fr "move" ".mayaFiles/data/";
workspace -fr "movie" ".mayaFiles/movies/";
workspace -fr "OBJ" ".mayaFiles/data/";
workspace -fr "OBJexport" ".mayaFiles/data/";
workspace -fr "offlineEdit" ".mayaFiles/scenes/edits/";
workspace -fr "particles" ".mayaFiles/particles/";
workspace -fr "renderData" ".mayaFiles/renderData/";
workspace -fr "renderScenes" ".mayaFiles/scenes/";
workspace -fr "RIB" ".mayaFiles/data/";
workspace -fr "RIBexport" ".mayaFiles/data/";
workspace -fr "scene" ".mayaFiles/scenes/";
workspace -fr "scripts" ".mayaFiles/scripts/";
workspace -fr "shaders" ".mayaFiles/renderData/shaders/";
workspace -fr "sound" ".mayaFiles/sound/";
workspace -fr "sourceImages" ".mayaFiles/sourceimages/";
workspace -fr "templates" ".mayaFiles/assets/";
workspace -fr "textures" ".mayaFiles/images/";
workspace -fr "translatorData" ".mayaFiles/data/";
"""
)
def __init__(self):
self.config_values = Config.default_config_values.copy()
self.user_config = {}
# the priority order is
# oyProjectManager.config
# config.py under .oyrc directory
# config.py under $OYPROJECTMANAGER_PATH
self._parse_settings()
def _parse_settings(self):
# for now just use $OYPROJECTMANAGER_PATH
ENV_KEY = "OYPROJECTMANAGER_PATH"
# try to get the environment variable
if not os.environ.has_key(ENV_KEY):
# don't do anything
logger.debug("no environment key found for user settings")
else:
logger.debug("environment key found")
resolved_path = os.path.expanduser(
os.path.join(
os.environ[ENV_KEY],
"config.py"
)
)
# using `while` is not safe to expand variables
# do the expansion for 5 times which is complex enough
# and I don't (hopefully) expect anybody to use
# more than 5 level deep environment variables
resolved_path = os.path.expandvars(
os.path.expandvars(
os.path.expandvars(
os.path.expandvars(
resolved_path
)
)
)
)
try:
try:
logger.debug("importing user config")
execfile(resolved_path, self.user_config)
except SyntaxError, err:
raise RuntimeError("There is a syntax error in your "
"configuration file: " + str(err))
# append the data to the current settings
logger.debug("updating system config")
for key in self.user_config:
if key in self.config_values:
self.config_values[key] = self.user_config[key]
except IOError:
logger.warning("The $OYPROJETMANAGER_PATH:" + resolved_path + \
" doesn't exists! skipping user config")
def __getattr__(self, name):
return self.config_values[name]
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
return setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def __contains__(self, name):
return name in self.config_values
@property
def last_user_id(self):
"""returns the last user id
It is not very much related with the config.py and user settings, but
it seems the most appropriate place is this one to get information from
individual users.
This should work fairly fast, because it uses the local filesystem not
the network thus the fileserver.
"""
# TODO: This should be replaced with beaker.session
file_name = 'last_user_id'
file_path = os.path.expanduser("~/.oypmrc/")
file_full_path = os.path.join(file_path, file_name)
last_user_id = None
try:
last_user_file = open(file_full_path)
except IOError:
pass
else:
last_user_id = int(last_user_file.readline().strip())
last_user_file.close()
return last_user_id
@last_user_id.setter
def last_user_id(self, user_id):
"""sets the user id for the last user
"""
if not isinstance(user_id, int):
raise RuntimeWarning("user_id for last_user_id should be an int")
file_name = 'last_user_id'
file_path = os.path.expanduser("~/.oypmrc/")
file_full_path = os.path.join(file_path, file_name)
logger.debug("saving user id to %s" % file_full_path)
# create the folder first
try:
os.makedirs(file_path)
except OSError:
# already created
pass
try:
last_user_file | |
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
appstream = boto3.client("appstream")
def describe_users(cache):
response = cache.get("describe_users")
if response:
return response
cache["describe_users"] = appstream.describe_users(AuthenticationType="USERPOOL")
return cache["describe_users"]
@registry.register_check("appstream")
def default_internet_access_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
"""[AppStream.1] AppStream 2.0 fleets should not provide default internet access"""
# loop through AppStream 2.0 fleets
response = appstream.describe_fleets()
myAppstreamFleets = response["Fleets"]
for fleet in myAppstreamFleets:
iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()
fleetArn = str(fleet["Arn"])
fleetName = str(fleet["DisplayName"])
# find fleets that are configured to provide default internet access
defaultInternetAccessCheck = str(fleet["EnableDefaultInternetAccess"])
if defaultInternetAccessCheck == "True":
finding = {
"SchemaVersion": "2018-10-08",
"Id": fleetArn + "/appstream-default-internet-access",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fleetArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[AppStream.1] AppStream 2.0 fleets should not provide default internet access",
"Description": "AppStream 2.0 fleet "
+ fleetName
+ " is configured to provide default internet access. If you use the Default Internet Access option for enabling internet access, the NAT configuration is not limited to 100 fleet instances. If your deployment must support more than 100 concurrent users, use this configuration. Refer to the remediation instructions if this configuration is not intended",
"Remediation": {
"Recommendation": {
"Text": "If your fleet should not have default internet access refer to the instructions in the Amazon AppStream 2.0 Administration Guide",
"Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/internet-access.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAppStreamFleet",
"Id": fleetArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fleetName": fleetName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-5",
"NIST SP 800-53 AC-4",
"NIST SP 800-53 AC-10",
"NIST SP 800-53 SC-7",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.1.3",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
# create Sec Hub finding
finding = {
"SchemaVersion": "2018-10-08",
"Id": fleetArn + "/appstream-default-internet-access",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fleetArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[AppStream.1] AppStream 2.0 fleets should not provide default internet access",
"Description": "AppStream 2.0 fleet "
+ fleetName
+ " is not configured to provide default internet access.",
"Remediation": {
"Recommendation": {
"Text": "If your fleet should not have default internet access refer to the instructions in the Amazon AppStream 2.0 Administration Guide",
"Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/internet-access.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAppStreamFleet",
"Id": fleetArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fleetName": fleetName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-5",
"NIST SP 800-53 AC-4",
"NIST SP 800-53 AC-10",
"NIST SP 800-53 SC-7",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.1.3",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("appstream")
def public_image_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[AppStream.2] AppStream 2.0 images you build should not be publicly accessible"""
#TODO: Right now, this check is returning all public images including what appear
#to be globally public images. My best guess right now is that we could look at
#the arn of public images that don't have an accountId in the arn and ignore those.
# loop through AppStream 2.0 images
response = appstream.describe_images(Type="PUBLIC", MaxResults=25)
myAppstreamImages = response["Images"]
for images in myAppstreamImages:
imageName = str(images["Name"])
imageArn = str(images["Arn"])
# ISO Time
iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()
# create Sec Hub finding
finding = {
"SchemaVersion": "2018-10-08",
"Id": imageArn + "/appstream-public-image",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": imageArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[AppStream.2] AppStream 2.0 images you build should not be publicly accessible",
"Description": "AppStream 2.0 image "
+ imageName
+ " is publicly accessible. Permissions set on images that are shared with you may limit what you can do with those images. Refer to the remediation instructions if this configuration is not intended. Note that AWS managed AppStream 2.0 images will always be publicly accessible",
"Remediation": {
"Recommendation": {
"Text": "If your image should not be publicly accessible refer to the instructions in the Amazon AppStream 2.0 Administration Guide",
"Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/administer-images.html#stop-sharing-image-with-all-accounts",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAppStreamImage",
"Id": imageArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"Image Name": imageName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
@registry.register_check("appstream")
def compromise_appstream_user_check(
cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str
) -> dict:
"""[AppStream.3] AppStream 2.0 users should be monitored for signs of compromise"""
# loop through AppStream 2.0 users
response = describe_users(cache)
myAppStreamUsers = response["Users"]
for users in myAppStreamUsers:
userArn = str(users["Arn"])
userName = str(users["UserName"])
userStatus = str(users["Status"])
iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()
if userStatus == "COMPROMISED":
# create Sec Hub finding
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/appstream-compromised-user",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Unusual Behaviors/User",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "CRITICAL"},
"Confidence": 99,
"Title": "[AppStream.3] AppStream 2.0 users should be monitored for signs of compromise",
"Description": "AppStream 2.0 user "
+ userName
+ " is compromised. COMPROMISED – The user is disabled because of a potential security threat. Refer to the remediation instructions for information on how to remove them",
"Remediation": {
"Recommendation": {
"Text": "To disable and remove compromised users refer to the instructions in the User Pool Administration section of the Amazon AppStream 2.0 Administration Guide",
"Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/user-pool-admin.html#user-pool-admin-disabling",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAppStreamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"UserName": userName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.RA-3",
"NIST CSF DE.CM-7",
"NIST SP 800-53 AU-12",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PE-3",
"NIST SP 800-53 PE-6",
"NIST SP 800-53 PE-20",
"NIST SP 800-53 PM-12",
"NIST SP 800-53 PM-16",
"NIST SP 800-53 RA-3",
"NIST SP 800-53 SI-4",
"NIST SP 800-53 SI-5",
"AICPA TSC CC3.2",
"AICPA TSC CC7.2",
"ISO 27001:2013 Clause 6.1.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.14.2.7",
"ISO 27001:2013 A.15.2.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/appstream-compromised-user",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Unusual Behaviors/User",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[AppStream.3] AppStream 2.0 users should be monitored for signs of compromise",
"Description": "AppStream 2.0 user " + userName + " is not compromised.",
"Remediation": {
"Recommendation": {
"Text": "To disable and remove compromised users refer to the instructions in the User Pool Administration section of the Amazon AppStream 2.0 Administration Guide",
"Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/user-pool-admin.html#user-pool-admin-disabling",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAppStreamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"UserName": userName}},
}
],
"Compliance": | |
#!/usr/bin/python
# todo: preview doesnt work
# TODO: new types: pixel (rgba)
# todo: legacy? e.g. compute image of points, computation illustration etc.
# todo: doesn't detect wrong number of args
# todo: set color
# todo: filled julia with the number of it
# todo: implement missing rect and arg and line complex
# todo: filled julia: nb it as output also
# todo: copy, useless?
# todo: lyapunov
# todo: implement arg_density + option set_smooth_argument
# todo: "rect_cdensity" : 7, # real src, double density, bool, bool, real, bool
# todo: "set_color" : 5,
# todo: "set_edge_thickness" : 1,
# todo: #"set_arg_shading" : 1,
# todo: "set_rect_shading" : 1,
# todo: "set_imaginary_part" : 1,
# todo: "draw_rect_edge" : 2,
# todo: "draw_rect_cross" : 2,
# todo: "draw_shade_arg" : 2,
# todo: implement load/save
from ezInputConf import convert_string_to_boolean
from ezInputConf import convert_string_to_string
# todo: this should be external
instruction_descriptors = {
# name : (arg types, var types)
"abs" : (
("variable", "variable"),
("complex", "real")),
"add_test_enter_disk" : (
("complex", "real"),
()),
"add_test_leave_annulus" : (
("complex", "real", "real"),
()),
"add_test_leave_disk" : (
("complex", "real"),
()),
"add_test_leave_left_half_plane" : (
("real", ),
()),
"add_test_leave_right_half_plane" : (
("real", ),
()),
"annulus_index" : (
("complex", "real", "real", "variable", "variable"),
("complex", "integer")),
"arg" : (
("complex", "variable", "variable"),
("complex", "real")),
"arg_density" : (
("variable", "variable"),
("real", "real")),
"boundaries" : (
("variable", "variable"),
("integer", "real")),
"cauliflower_julia" : ( # nit, pts, stop
("integer", "variable", "variable"),
("complex", "integer")),
"coord_rect" : (
("complex", "complex", "variable", "variable"),
("complex", "complex")),
"draw_complex_density" : (
("variable", "string"),
("complex", )),
"draw_density" : (
("variable", "string"),
("real", )),
"draw_indexes" : (
("variable", "string"),
("integer", ),
),
"draw_integers" : (
("variable", "string"),
("integer", )),
"draw_reals" : (
("variable", "string"),
("real", )),
"eval_main_map" : (
("variable", "variable"),
("complex", "complex")),
"exit" : (
("string", ),
()),
"is_in_annulus" : (
("complex", "real", "real", "variable", "variable"),
("complex", "integer")),
"is_in_disk" : (
("complex", "real", "variable", "variable"),
("complex", "integer")),
"is_in_filled_julia_set" : (
("integer", "real", "variable", "variable"),
("complex", "integer")),
"is_nan" : (
("variable", "variable"),
("complex", "integer")),
"is_where_wrt_annulus" : (
("complex", "real", "real", "variable", "variable"),
("complex", "integer")),
"iterate_main_map" : ( # z, z_n, n=nb_it, stop_value
("integer", "variable", "variable", "variable", "variable"),
("complex", "complex", "integer", "integer")),
"load_complexes" : (
("string", "variable"),
("complex", )),
"load_integers" : (
("string", "variable"),
("integer", )),
"load_reals" : (
("string", "variable"),
("real", )),
"make_grid" : (
("variable", ),
("complex", )),
"name" : (
("string", ),
()),
"nop" : (
(),
()),
"print_complexes" : (
("variable", ),
("complex", )),
"replace_nans" : (
("complex", "variable", "variable"),
("complex", "complex")),
"save_complexes" : (
("variable", "string"),
("complex", )),
"save_integers" : (
("variable", "string"),
("integer", )),
"save_reals" : (
("variable", "string"),
("real", )),
"reset_tests" : (
(),
()),
"set_critical_value" : (
("complex", ),
()),
"set_drawing_threshold" : (
("real", ),
()),
"set_drawing_type" : (
("string", ),
()),
"set_grid_tlbr" : (
("real", "real", "real", "real"),
()),
"set_grid_width" : (
("integer", ),
()),
"set_main_map_type" : (
("string", ),
()),
"set_number_of_indexes" : (
("integer", ),
()),
"set_number_of_iterations" : (
("integer", ),
()),
"set_preview_parameters" : (
("boolean", "integer", "integer", "real", "string", "string"),
()),
"set_shade_enhance_power" : (
("real", ),
()),
"set_shade_max" : (
("string", ),
()),
"set_shade_type" : (
("string", ),
()),
"set_smooth_argument" : (
("boolean",),
()),
"setup_fatou_inverse_data" : (
("complex", "real"),
()),
"setup_linearizer_data" : (
("integer", "complex", "real", "integer"),
()),
"show_help" : (
("string", ),
()),
}
# -----------------------------------------------------------------------------
instruction_number_args = {
name : len (instruction_descriptors [name] [0])
for name in instruction_descriptors }
variable_types_in_arguments = {
name : instruction_descriptors [name] [1]
for name in instruction_descriptors }
instruction_argument_types = {
name : instruction_descriptors [name] [0]
for name in instruction_descriptors }
def make_variable_positions_in_arguments (instruction_descriptors):
positions = {}
for instr_name in instruction_descriptors:
pos = []
args = instruction_descriptors [instr_name] [0]
for pos_id in range (len (args)):
if (args [pos_id] == "variable"):
pos . append (pos_id)
positions [instr_name] = tuple (pos)
return positions
variable_positions_in_arguments = (
make_variable_positions_in_arguments (instruction_descriptors))
def cat_dict (dict0, dict1):
return { ** dict0, ** dict1 }
class Processor:
def __init__ (self, program, external_variables):
self . program = program
self . split_program ()
self . cursor = 0
self . external_variables = external_variables
self . original_variables = {
"Results" : "complex",
"Iterations" : "integer", }
self . original_variable_names = (
list (self . original_variables . keys ()))
self . requested_variables = {}
#self . requested_variables_names = []
self . instructions = []
self . assemble ()
self . variables = cat_dict (self . original_variables,
self . requested_variables)
return
def assemble (self):
current_line = self . get_next_line ()
while (current_line != ""):
self . interpret_line (current_line)
current_line = self . get_next_line ()
return
def split_program (self):
prog_lines = self . program . splitlines ()
self . prog_lines = []
self . line_numbers = []
line_number = 0
for line in prog_lines:
line_number += 1
line = line . strip ()
if (line == ""):
continue
if (line [0] == "#"):
continue
comment_start = line . find ("#")
# NOTE: this forbid "#" in arguments
if (comment_start > 0):
line = line [:comment_start]
self . prog_lines . append (line)
self . line_numbers . append (line_number)
self . prog_len = len (self . prog_lines)
def get_next_line (self):
if (self . cursor == self . prog_len):
return ""
line = self . prog_lines [self . cursor]
self . cursor += 1
return line
def describe_position (self):
if (self . cursor == 0):
return "(nowhere)"
return "(@ " + str (self . line_numbers [self . cursor - 1]) + ")"
def add_requested_variables (self, arg_variables):
#print ("arg_variables="+str(arg_variables))
previous_variable_names = list (self . requested_variables . keys ())
for variable in arg_variables:
var_name = variable [0]
var_type = variable [1]
try:
expected_var_type = self . original_variables [var_name]
except (KeyError):
pass
else:
if (var_type == expected_var_type):
continue
else:
raise Exception (self . describe_position () + " Wrong var type")
try:
expected_var_type = self . requested_variables [var_name]
except (KeyError):
self . requested_variables [var_name] = var_type
else:
if (expected_var_type == "any"):
continue
elif (var_type != expected_var_type):
raise Exception (self . describe_position () + " Wrong var type")
return
def skip_spaces_in_line (self, cursor):
while ((cursor < len (self . instruction_text))
and (self . instruction_text [cursor] == " ")):
cursor += 1
return cursor
def find_end_of_argument_string (self, cursor):
line_len = len (self . instruction_text)
while (self . instruction_text [cursor] not in ['"']):
if (cursor == line_len - 1):
raise Exception ("Unended string "
+ self . describe_position ()
+ " in \""
+ self . instruction_text
+ "\"")
else:
if (self . instruction_text [cursor] == '\\'):
cursor += 2
if (cursor == line_len):
raise Exception ("Unended string "
+ self . describe_position ()
+ " in \""
+ self . instruction_text
+ "\"")
else:
cursor += 1
return cursor + 1
def find_argument_end (self, argument_start):
if (self . instruction_text [argument_start] == '"'):
argument_end = (
self . find_end_of_argument_string (argument_start + 1))
else:
c = argument_start
while ((c < len (self . instruction_text))
and (self . instruction_text [c] != ",")
and (self . instruction_text [c] != " ")):
c += 1
argument_end = c
return argument_end
def split_instuction_line (self):
raise Exception ("not here yet")
line = self . instruction_text
len_line = len (line)
next_space = line . find (" ")
if (next_space < 0):
first_word = line
arguments = []
else:
first_word = line [: next_space]
next_nonspace = self . skip_spaces_in_line (next_space)
if (next_nonspace == len_line):
arguments = []
else:
while (False):
#todo
pass
self . structured_line = [first_word] + arguments
def get_next_word (self):
cursor = self . instruction_cursor
line = self . instruction_text
len_line = len (line)
#print (cursor)
if (cursor == len_line):
return ""
word_start = cursor
#print ("line="+line)
#print ("start="+line [word_start:])
if (cursor == 0):
word_end = line . find (" ")
if (word_end < 0):
word_end = len_line
next_cursor = len_line
else:
next_cursor = self . skip_spaces_in_line (word_end)
else:
# NOTE: this restrict the use of coma only between arguments
# NOTE: that is forbid it in strings (=only file names)
# it's changing
#NOTE: no empty | |
################################
########### Imports ############
################################
import sys
import traceback
import numpy as np
import scipy.special as ss
import scipy.optimize as so
import scipy.integrate as si
import scipy.interpolate as inter
try:
import h5py as h5
h5py = 1
except ModuleNotFoundError:
h5py = 0
print("Could not find h5py. Datasets will not be able to be saved or loaded using NGC5533_functions.")
#-----------For path detection-----------
import subprocess
#def getGitRoot():
# return subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE).communicate()[0].rstrip().decode('utf-8')
# The above does not work on Windows machines and has therefore been removed.
defaultpath = '../'
################################
########## Constants ###########
################################
#---------Definitely Constant---------
G = 4.30091e-6 #gravitational constant (kpc/solar mass*(km/s)^2)
rhocrit = 9.3e-18 #critical density of the Universe (kg/km^3)
#---------Measured Directly-----------
L = 3.27e10 #luminosity (Solar Luminosities)
absmag = -22.02 #absolute magnitude
magsun = 4.42 #absolute magnitude of the sun
L0 = np.power(10, (0.4*(magsun-absmag))) #Absolute Magnitude to luminosity
#---------Measured Indirectly---------
ups = 2.8 #bulge mass-to-light ratio (Solar Mass/Solar Luminosity)???
q = 0.33 #intrinsic axis ratio
e2 = 1-(q**2) #eccentricity
i = 52*(np.pi/180) #inclination angle
h_rc = 1.4 #core radius (kpc)
c = 1e-12 #(what does this constant do?)
Mvir = 1e11*((c/(11.7))**(-40/3)) #virial mass (in solar mass) solved from eq(5)
Mbh_def = 2.7e9 #Black Hole mass (in solar mass)
#---------Definitely Variable---------
n_c = 2.7 #concentration parameter
h_c = 8.9 #radial scale-length (kpc)
hrho00_c = 0.31e9 #halo central surface density (solar mass/kpc^2)
drho00_c = 0.31e9 #disk central surface density (solar mass/kpc^2)
#---------Uncategorized-------------------
re_c = 2.6 #effective radius (kpc)
epsdisk = 5.0 #from Noordermeer's paper
rs = (1/c)*(((3*Mvir)/((4*np.pi*100*rhocrit)))**(1/3)) #scale radius (kpc)
rho_s = (100/3)*((c**3)/(np.log(1+c)-(c/(1+c))))*rhocrit #characteristic density
h_gamma = 0
################################
########### Saving #############
################################
def savedata(xvalues,yvalues,group,dataset,path=defaultpath,file='Inputs.hdf5'):
#this is a dummy filename to enforce ordering; try not to save here except for testing!
if h5py == 1:
saved = h5.File(path+'/'+file,'a')
if group in ['Disk', 'disc', 'Disc', 'd', 'D']:
group = 'disk'
print("Group name set to 'disk'.")
if group in ['bh','Bh','BH','Black Hole','BlackHole','Blackhole,','Black hole','black hole','Black Hole']:
group = 'blackhole'
print("Group name set to 'blackhole'.")
if group in ['dm','DM','Dm','Dark Matter','Dark matter','dark matter','h','H','Halo','darkmatter','Darkmatter','DarkMatter']:
group = 'halo'
print("Group name set to 'halo'.")
if group in ['b','B','Bulge']:
group = 'bulge'
print("Group name set to 'bulge'.")
if group in ['t','T','Total']:
group = 'total'
print("Group name set to 'total'.")
try:
grp = saved.create_group(group)
grp.create_dataset(dataset,data=[xvalues,yvalues])
except ValueError:
try:
grp = saved[group]
grp.create_dataset(dataset,data=[xvalues,yvalues])
except RuntimeError:
x = loaddata(group,dataset,path,file)[0]
x = np.append(x,xvalues)
y = loaddata(group,dataset,path,file)[1]
y = np.append(y,yvalues)
x, y = (list(a) for a in zip(*sorted(zip(x, y))))
i = 0
while i < len(x)-1:
if x[i+1] == x[i]:
x = np.delete(x,i+1)
y = np.delete(y,i+1)
else:
i += 1
del grp[dataset]
savedata(x,y,group,dataset,path,file)
return y
finally: #No matter what,
saved.close()
#print("Saved.") #Convenient for debugging but annoying for fitting.
elif h5py == 0:
print("ERROR: h5py was not loaded.")
return 1
def loaddata(group,dataset,path=defaultpath,file='Inputs.hdf5'):
if h5py == 1:
saved = h5.File(path+'/'+file,'r')
if group in ['Disk', 'disc', 'Disc', 'd', 'D']:
group = 'disk'
print("Group name set to 'disk'.")
if group in ['bh','Bh','BH','Black Hole','BlackHole','Blackhole,','Black hole','black hole','Black Hole']:
group = 'blackhole'
print("Group name set to 'blackhole'.")
if group in ['dm','DM','Dm','Dark Matter','Dark matter','dark matter','h','H','Halo','darkmatter','Darkmatter','Dark Matter']:
group = 'halo'
print("Group name set to 'halo'.")
if group in ['b','B','Bulge']:
group = 'bulge'
print("Group name set to 'bulge'.")
if group in ['t','T','Total']:
group = 'total'
print("Group name set to 'total'.")
grp = saved[group]
dset = grp[dataset]
a = dset[:]
return a
#Placeholder; I will design this to store information at a later date.
elif h5py == 0:
print("ERROR: h5py was not loaded.")
return 1
saved.close() #no matter what, close the file when you're done
def checkfile(group='all',path=defaultpath,file='Inputs.hdf5'):
if h5py ==1:
saved = h5.File(path+'/'+file,'r')
if group == 'all':
print('Groups:')
for n in saved:
print(saved[n])
print('')
print(' ---------------- ')
print('')
print('More information:')
for n in saved:
grp = saved[n]
print(grp)
for m in grp:
print(' '+str(grp[m]))
else:
print(group+':')
grp = saved[group]
for n in grp:
print(grp[n])
saved.close()
elif h5py ==0:
print("ERROR: h5py was not loaded.")
return 1
################################
######### Black Hole ###########
################################
def bh_v(r,M=Mbh_def,save=False,load=True,comp='blackhole',**kwargs): #M in solar masses, r in kpc
if isinstance(r,float) or isinstance(r,int):
r = np.asarray([r])
if isinstance(r,list):
r = np.asarray(r)
a = np.sqrt(G*M/r)
if save:
load = False
if load:
try: #Load existing prefactor if available
#file = comp+'.hdf5'
#print(file) #Troubleshooting
y = loaddata(comp,'Mbh'+str(M),file=comp+'.hdf5',**kwargs)[1]
x = loaddata(comp,'Mbh'+str(M),file=comp+'.hdf5',**kwargs)[0]
except KeyError: #If unable to load, save
#If unable to load, load default instead and apply a prefactor retroactively
# y = np.sqrt(M)*loaddata(comp,'Mbh1',file=comp+'.hdf5',**kwargs)[1]
# x = loaddata(comp,'Mbh1',file=comp+'.hdf5',**kwargs)[0]
# spline = inter.InterpolatedUnivariateSpline(x,y,k=3) #k is the order of the polynomial
# return spline(r)
save = True
# except: #Attempting to catch problem with spline having too few points
# print('An error has occured. Switching to save function. Error information below:')
# print(sys.exc_info()[0])
# print(sys.exc_info()[1])
# print()
# print('#--------------------')
# print()
# print()
# print(traceback.format_exc())
# print()
# print()
# print('#--------------------')
# print()
# save = True #Calculate since there aren't enough points
if save:
savedata(r,a,comp,'Mbh'+str(M),file=comp+'.hdf5',**kwargs)
return a
else:
return a
################################
########### Bulge ##############
################################
#I'm not sure how many of these we need to be defined -- I kept everything that was called outside of another function.
#We can condense the number of functions once we know for certain if there are things we don't need again.
def b_gammafunc(x,n=n_c):
return ss.gammainc(2*n,x)*ss.gamma(2*n)-0.5*ss.gamma(2*n)
b_root = so.brentq(b_gammafunc,0,500000,rtol=0.000001,maxiter=100) #come within 1% of exact root within 100 iterations
def b_I0(n=n_c,re=re_c):
return L*(b_root**(2*n))/(re**2*2*np.pi*n*ss.gamma(2*n))
def b_r0(n=n_c,re=re_c):
return re/np.power(b_root,n)
def b_innerintegral(m,n=n_c,re=re_c):
f = lambda x,m,n,re: np.exp(-np.power(x/b_r0(n,re), (1/n)))*np.power(x/b_r0(n,re), 1/n-1)/(np.sqrt(x**2-m**2)) #Inner function
return si.quad(f, m, np.inf,args=(m,n,re))[0]
b_innerintegralv = np.vectorize(b_innerintegral)
def b_vsquare(r,n=n_c,re=re_c):
C = lambda n,re: (4*G*q*ups*b_I0(n,re))/(b_r0(n,re)*np.float(n))*(np.sqrt((np.sin(i)**2)+(1/(q**2))*(np.cos(i)**2)))
h = lambda m,r,n,re: C(n,re)*b_innerintegral(m,n,re)*(m**2)/(np.sqrt((r**2)-((m**2)*(e2)))) #integrate outer function
return si.quad(h, 0, r, args=(r,n,re))[0]
def b_vsquarev(r,n=n_c,re=re_c):
a = np.vectorize(b_vsquare)
return a(r,n,re)
def b_v(r,n=n_c,re=re_c,save=False,load=True,comp='bulge',**kwargs):
if isinstance(r,float) or isinstance(r,int):
r = np.asarray([r])
if load:
try: #load if exists
y = loaddata(comp,'n'+str(n)+'re'+str(re),file=comp+'.hdf5',**kwargs)[1]
x = loaddata(comp,'n'+str(n)+'re'+str(re),file=comp+'.hdf5',**kwargs)[0]
b = inter.InterpolatedUnivariateSpline(x,y,k=3) #k is the order of the polynomial
return b(r)
except KeyError: #if does not exist,
save = True #go to save function instead
except: #Attempting to catch problem with spline having too few points
print('An error has occured. Switching to save function. Error information below:')
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print()
print('#--------------------')
print()
print()
print(traceback.format_exc())
print()
print()
print('#--------------------')
print()
save = True #Calculate since there aren't enough points
a = b_vsquarev(r,n,re)**(1/2)
a[np.isnan(a)] = 0
if save:
savedata(r,a,comp,'n'+str(n)+'re'+str(re),file=comp+'.hdf5',**kwargs)
return a
else:
return a
################################
############ Halo ##############
################################
def h_rhat(r,z): #r-hat from Casertano eq(9)
return np.sqrt((r**2)+(z**2))
def h_rho(r,rho00=hrho00_c,rc=h_rc): #Isothermal Density Profile
return rho00*((1+((r/rc)**2))**(-1))
def h_vcasertano(r,z,rc=h_rc,rho00=hrho00_c,gamma=h_gamma): #Velocity
v0h = lambda r,rho00,rc,z: np.sqrt(h_rho(r,rho00,rc)*4*np.pi*G*(h_rhat(r,z)**2)) #eq 9 casertano
return v0h(r,rho00,rc,z)*((r/rc)**gamma) #eq 10 casertano
def h_vjimenez(r,rc=h_rc,rho00=hrho00_c):
return np.sqrt(4*np.pi*G*rho00*(rc**2)*(1-((rc/r)*np.arctan(r/rc))))
def h_vNFW(r,save=True,comp='hNFW',**kwargs):
rho = lambda r: rho_s/((r/rs)*((1+r/rs)**2))
f = lambda R: 4*np.pi*rho(R)*(R**2) #NFW Density Profile
mdm = lambda r: si.quad(f, 0, r)[0] #M(r)
vdm2 = lambda r: (G*mdm(r))/r #v^2: GM(r)/r
vdm2v = np.vectorize(vdm2)
a = np.sqrt(vdm2v(r))
a[np.isnan(a)] = 0
if save:
savedata(r,a,comp,'n'+str('PLACEHOLDER'),file=comp+'.hdf5',**kwargs)
return a
elif load:
return loaddata(comp,'n'+str('PLACEHOLDER'),file=comp+'.hdf5',**kwargs)
else:
return a(r)
def h_viso(r,rc=h_rc,rho00=hrho00_c,load=True,save=False,comp='halo',**kwargs): #h_v iso
if isinstance(r,float) or isinstance(r,int):
r = np.asarray([r])
a = np.zeros(len(r))
i = 1
while i < len(r):
a[i] = np.sqrt(
4*np.pi*G*rho00*(rc**2)*(1-(
(rc/r[i])*np.arctan(r[i]/rc))
)
)
i += 1
a[np.isnan(a)] = 0
if load:
try: #Load if exists
y = loaddata(comp,'rc'+str(rc)+'rho00'+str(rho00),file=comp+'.hdf5',**kwargs)[1]
x = loaddata(comp,'rc'+str(rc)+'rho00'+str(rho00),file=comp+'.hdf5',**kwargs)[0]
b = inter.InterpolatedUnivariateSpline(x,y,k=3) #k is the order of the polynomial
return b(r)
except KeyError: #If does not exist,
save = True #Calculate and save
except: #Attempting to catch problem with spline having too few points
print('An error has occured. Switching to save function. Error information below:')
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print()
print('#--------------------')
print()
print()
print(traceback.format_exc())
print()
print()
print('#--------------------')
print()
save = True #Calculate since there aren't enough points
if save:
savedata(r,a,comp,'rc'+str(rc)+'rho00'+str(rho00),file=comp+'.hdf5',**kwargs)
return a
else:
return a
h_v = h_viso
################################
############ Disk ##############
################################
#----- To Fit For --------
#h, rho00
#----- Multiples of h ----
z0 = lambda h: 0.2*h #half-thickness (kpc)
R = lambda h: 4*h #cut-off radius (kpc)
d = lambda h: 0.2*h #cut-off length upper limits (kpc)
#----- Functions ---------
def d_px(r,u,xi): #Initial Function
#Matches Casertano
x = lambda r,u,xi: (r**2+u**2+xi**2)/(2*r*u)
try:
return x(r,u,xi)-np.sqrt(x(r,u,xi)**2-1)
except ZeroDivisionError: #If dividing by zero, return infinity instead of error. (Mostly at 0)
return np.nan #This will allow nan handling later
def d_rho0(r, h=h_c, d_rho00=drho00_c): #density piecewise function
#Matches Casertano
conditions = [r <= R(h),
(r > R(h)) & (r <= R(h)+d(h)),
r > R(h)+d(h)]
functions = [lambda r,h,d_rho00: d_rho00*np.exp(-r/h),
lambda r,h,d_rho00: d_rho00*np.exp(-R(h)/h)*(1-(r-R(h))/d(h)),
lambda r,h,d_rho00: 0]
return np.piecewise(r, conditions, functions, h, d_rho00)
def d_durho0(r, h=h_c, d_rho00=drho00_c): #partial derivative of rho(u,xi)
#Doesn't seem to be written explicitly, but should match Casertano (derivative should be accurate | |
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The Bgp_global parser templates file. This contains
a list of parser definitions and associated functions that
facilitates both facts gathering and native command generation for
the given network resource.
"""
import re
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network_template import (
NetworkTemplate,
)
def _tmplt_router_bgp_cmd(config_data):
command = "router bgp {as_number}".format(**config_data)
return command
def _tmplt_bgp_address_family(config_data):
command = ""
if config_data.get("vrf"):
command = "vrf {vrf}\n".format(**config_data)
command += "address-family {afi}".format(**config_data)
if config_data.get("safi"):
command += " {safi}".format(**config_data)
return command
def _tmplt_bgp_params(config_data):
command = "bgp"
if config_data["bgp_params"].get("additional_paths"):
command += " additional-paths {additional_paths}".format(
**config_data["bgp_params"]
)
if config_data["bgp_params"]["additional_paths"] == "send":
command += " any"
elif config_data["bgp_params"].get("next_hop_address_family"):
command += " next-hop address-family ipv6"
elif config_data["bgp_params"].get("next_hop_unchanged"):
command += " next-hop-unchanged"
elif config_data["bgp_params"].get("redistribute_internal"):
command += " redistribute-internal"
elif config_data["bgp_params"].get("route"):
command += " route install-map {route}".format(
**config_data["bgp_params"]
)
return command
def _tmplt_bgp_graceful_restart(config_data):
command = "graceful-restart"
return command
def _tmplt_bgp_neighbor(config_data):
command = "neighbor {peer}".format(**config_data["neighbor"])
if config_data["neighbor"].get("additional_paths"):
command += " additional-paths {additional_paths}".format(
**config_data["neighbor"]
)
if config_data["neighbor"]["additional_paths"] == "send":
command += "any"
elif config_data["neighbor"].get("activate"):
command += " activate"
elif config_data["neighbor"].get("default_originate"):
command += " default-originate"
if config_data["neighbor"]["default_originate"].get("route_map"):
command += " route-map {route_map}".format(
**config_data["neighbor"]["default_originate"]
)
if config_data["neighbor"]["default_originate"].get("always"):
command += " always"
elif config_data["neighbor"].get("graceful_restart"):
command += " graceful-restart"
elif config_data["neighbor"].get("next_hop_unchanged"):
command += " next-hop-unchanged"
elif config_data["neighbor"].get("next_hop_address_family"):
command += " next-hop addres-family ipv6"
elif config_data["neighbor"].get("prefix_list"):
command += " prefix-list {name} {direction}".format(
**config_data["neighbor"]["prefix_list"]
)
elif config_data["neighbor"].get("route_map"):
command += " route-map {name} {direction}".format(
**config_data["neighbor"]["route_map"]
)
elif config_data["neighbor"].get("weight"):
command += " weight {weight}".format(**config_data["neighbor"])
elif config_data["neighbor"].get("encapsulation"):
command += " encapsulation {transport}".format(
**config_data["neighbor"]
)
if config_data["neighbor"]["encapsulation"].get("source_interface"):
command += (
" next-hop-self source-interface {source_interface}".format(
**config_data["neighbor"]
)
)
return command
def _tmplt_bgp_network(config_data):
command = "network {address}".format(**config_data)
if config_data.get("route_map"):
command += " route-map {route_map}".format(**config_data)
return command
def _tmplt_bgp_redistribute(config_data):
command = "redistribute {protocol}".format(**config_data)
if config_data.get("isis_level"):
command += " {isis_level}".format(**config_data)
if config_data.get("ospf_route"):
command += " match {ospf_route}".format(**config_data)
if config_data.get("route_map"):
command += " route-map {route_map}".format(**config_data)
return command
def _tmplt_bgp_route_target(config_data):
command = "route-target {mode} {target}".format(
**config_data["route_target"]
)
return command
class Bgp_afTemplate(NetworkTemplate):
def __init__(self, lines=None):
super(Bgp_afTemplate, self).__init__(lines=lines, tmplt=self)
# fmt: off
PARSERS = [
{
"name": "router",
"getval": re.compile(
r"""
^router\s
bgp
\s(?P<as_num>\S+)
$""",
re.VERBOSE,
),
"setval": _tmplt_router_bgp_cmd,
"compval": "as_number",
"result": {"as_number": "{{ as_num }}"},
"shared": True
},
{
"name": "address_family",
"getval": re.compile(
r"""
\s*(?P<vrf>vrf\s\S+)*
\s*address-family
\s(?P<afi>ipv4|ipv6|evpn)
\s*(?P<type>\S+)*
$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_address_family,
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"afi": "{{ afi }}",
"safi": "{{ type }}",
"vrf": "{{ vrf.split(" ")[1] }}"
}
}
},
"shared": True,
},
{
"name": "bgp_params_additional_paths",
"getval": re.compile(
r"""
\s*bgp
\s+additional-paths
\s+(?P<action>\S+)
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_params,
"compval": "bgp_params.additional_paths",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"bgp_params": {
"additional_paths": "{{ action }}"
}
}
}
},
},
{
"name": "bgp_params.nexthop_address_family",
"getval": re.compile(
r"""
\s*bgp
\s+next-hop
\s+address-family
\s+ipv6
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_params,
"compval": "bgp_params.next_hop_address_family",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"bgp_params": {
"next_hop_unchanged": "{{ 'ipv6' }}"
}
}
}
},
},
{
"name": "bgp_params.nexthop_unchanged",
"getval": re.compile(
r"""
\s*bgp
\s+next-hop-unchanged
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_params,
"compval": "bgp_params.next_hop_unchanged",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"bgp_params": {
"next_hop_unchanged": "{{ True }}"
}
}
}
},
},
{
"name": "bgp_params.redistribute_internal",
"getval": re.compile(
r"""
\s*bgp
\s+redistribute-internal
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_params,
"compval": "bgp_params.redistribute_internal",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"bgp_params": {
"redistribute_internal": "{{ True }}"
}
}
}
},
},
{
"name": "bgp_params.route",
"getval": re.compile(
r"""
\s*bgp
\s+route
\s+install-map
\s+(?P<route>\S+)
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_params,
"compval": "bgp_params.route",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"bgp_params": {
"route": "{{ route }}"
}
}
}
},
},
{
"name": "graceful_restart",
"getval": re.compile(
r"""
\s*graceful-restart
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_graceful_restart,
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"graceful_restart": "{{ True }}"
}
}
},
},
{
"name": "neighbor.activate",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+activate
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.activate",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"activate": "{{ True }}",
}
}
}
}
},
},
{
"name": "neighbor.additional_paths",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+additional-paths
\s+(?P<action>\S+)
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.additional_paths",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"additional_paths": "{{ action }}",
}
}
}
}
},
},
{
"name": "neighbor.default_originate",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+default-originate
\s*(?P<route_map>route-map\s\S+)*
\s*(?P<always>always)*
$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.default_originate",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"default_originate": {
"route_map": "{{ route_map.split(" ")[1] }}",
"always": "{{ True if always is defined }}"
}
}
}
}
}
},
},
{
"name": "neighbor.graceful_restart",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+graceful-restart
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.graceful_restart",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"graceful_restart": "{{ True }}",
}
}
}
}
},
},
{
"name": "neighbor.next_hop_unchanged",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+next-hop-unchanged
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.next_hop_unchanged",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"next_hop_unchanged": "{{ True }}",
}
}
}
}
},
},
{
"name": "neighbor.next_hop_address_family",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+next-hop
\s+address-family
\s+ipv6
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.next_hop_address_family",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"next_hop_address_family": "{{ 'ipv6' }}",
}
}
}
}
},
},
{
"name": "neighbor.prefix_list",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+prefix-list
\s+(?P<name>\S+)
\s+(?P<dir>in|out)
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.prefix_list",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"prefix_list": {
"name": "{{ name }}",
"direction": "{{ dir }}"
}
}
}
}
}
},
},
{
"name": "neighbor.route_map",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+route-map
\s+(?P<name>\S+)
\s+(?P<dir>in|out)
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.route_map",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"route_map": {
"name": "{{ name }}",
"direction": "{{ dir }}"
}
}
}
}
}
},
},
{
"name": "neighbor.weight",
"getval": re.compile(
r"""
\s*neighbor
\s+(?P<peer>\S+)
\s+weight
\s+(?P<weight>\d+)
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.weight",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"weight": "{{ weight }}",
}
}
}
}
},
},
{
"name": "neighbor.encapsulation",
"getval": re.compile(
r"""
\s*neighbor
\s+default
\s+encapsulation
\s+(?P<type>mpls|vxlan)
\s*(next-hop-self)*
\s*(source-interface)*
\s*(?P<interface>\S+\s\S+)*
$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_neighbor,
"compval": "neighbor.encapsulation",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"neighbor": {
"{{ peer }}": {
"peer": "{{ peer }}",
"encapsulation": {
"transport": "{{ type }}",
"source_interface": "{{ interface }}"
}
}
}
}
}
},
},
{
"name": "network",
"getval": re.compile(
r"""
\s*network
\s+(?P<address>\S+)
\s*(route-map)*
\s*(?P<route_map>\S+)*
$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_network,
"compval": "network",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"network": {
"{{ address }}": {
"address": "{{ address }}",
"route_map": "{{ route_map }}",
}
}
}
}
},
},
{
"name": "redistribute",
"getval": re.compile(
r"""
\s*redistribute
\s+(?P<route>\S+)
\s*(?P<level>level-1|level-2|level-1-2)*
\s*(?P<match>match\s\S+)*
\s*(?P<route_map>route-map\s\S+)*
$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_redistribute,
"compval": "redistribute",
"result": {
"address_family": {
'{{ afi + "_" + vrf|d() }}': {
"redistribute": [
{
"protocol": "{{ route }}",
"route_map": "{{ route_map.split(" ")[1] }}",
"isis_level": "{{ level }}",
"ospf_route": "{{ match.split(" ")[1] }}"
}
]
}
}
},
},
{
"name": "route_target",
"getval": re.compile(
r"""
\s*route-target
\s+(?P<mode>both|import|export)
\s+(?P<target>\S+)
*$""",
re.VERBOSE,
),
"setval": _tmplt_bgp_route_target,
"compval": "route_target",
"result": {
"address_family": {
'{{ afi + "_" | |
<filename>visnav/dataset-gen.py<gh_stars>1-10
import argparse
import csv
import sys
import math
import re
import cv2
import numpy as np
import quaternion
from tqdm import tqdm
from visnav.algo import tools
from visnav.algo.keypoint import KeypointAlgo
from visnav.algo.tools import PositioningException
from visnav.batch1 import get_system_model
from visnav.iotools import cnn_export
from visnav.testloop import TestLoop
from visnav.settings import *
# TODO:
# - Include image rendered at cropped pose (for autoencoder target)
# - Save a config file about parameters used for dataset generation
# - Support usage of real navcam images for generating a validation/testing dataset
# (- Upload dataset to triton?)
def parse_arguments():
missions = ('rose', 'orex', 'didy1n', 'didy1w', 'didy2n', 'didy2w')
parser = argparse.ArgumentParser(description='Asteroid Image Data Generator')
parser.add_argument('--relative', '-R', action='store_true',
help='generate pairs of images with a pose change instead of a single image')
parser.add_argument('--cache', '-c', metavar='DIR', default=CACHE_DIR,
help='path to cache dir (default: %s), ./[mission]/[id] is added to the path' % CACHE_DIR)
parser.add_argument('--output', '-o', metavar='DIR', default=None,
help='path to output dir, default: %s/[mission]/final-[id]' % CACHE_DIR)
parser.add_argument('--mission', '-m', metavar='M', default='rose', choices=missions,
help='mission: %s (default: rose)' % (' | '.join(missions)))
parser.add_argument('--count', '-n', default='10', type=str, metavar='N',
help='number of images to be generated, accepts also format [start:end]')
parser.add_argument('--crop', default=0, type=int,
help='detect object and crop exported images, only when "--relative" not set')
parser.add_argument('--id', '-i', default=None, type=str, metavar='N',
help='a unique dataset id, defaults to a hash calculated from image generation parameters')
parser.add_argument('--max-rot-err', default=10, type=float, metavar='A',
help='Max rotation error (in deg) allowed when determining pose with AKAZE-PnP-RANSAC (default: %f)' % 10)
parser.add_argument('--res-mult', type=float, default=1.0, help="scale camera resultion, default=1.0")
parser.add_argument('--sm-lores', default=False, help="use low resolution shape model", action='store_true')
parser.add_argument('--sm-noise', default=0, type=float, metavar='SD',
help='Shape model noise level (default: %f)' % 0)
parser.add_argument('--sm-noise-len-sc', default=SHAPE_MODEL_NOISE_LEN_SC, type=float, metavar='SC',
help='Shape model noise length scale (default: %f)' % SHAPE_MODEL_NOISE_LEN_SC)
parser.add_argument('--tx-noise', default=0, type=float, metavar='SD',
help='Texture noise level (default: %f)' % 0)
parser.add_argument('--tx-noise-len-sc', default=SHAPE_MODEL_NOISE_LEN_SC, type=float, metavar='SC',
help='Texture noise length scale (default: %f)' % SHAPE_MODEL_NOISE_LEN_SC)
parser.add_argument('--haze', default=0.0, type=float, metavar='HZ',
help='Max haze brightness (uniform-dist) (default: %f)' % 0.0)
parser.add_argument('--jets', default=0.0, type=float, metavar='JN',
help='Average jet count (exp-distr) (default: %f)' % 0.0)
parser.add_argument('--jet-int-mode', '--jm', default=0.001, type=float, metavar='JM',
help='Jet intensity mode [0, 1], beta-distributed, (default: %f)' % 0.001)
parser.add_argument('--jet-int-conc', '--jc', default=10, type=float, metavar='JC',
help='Jet intensity concentration [1, 1000] (default: %f)' % 10)
parser.add_argument('--max-phase-angle', default=100, type=float, metavar='A',
help='Max phase angle allowed when generating system state (default: %f)' % 100)
parser.add_argument('--max-sc-distance', default=1.0, type=float, metavar='A',
help='Max spacecraft distance as min_dist+A*(max_dist-min_dist) (default: %f)' % 1.0)
parser.add_argument('--max-sc-lateral-disp', default=1.0, type=float, metavar='A',
help='Max spacecraft lateral displacement [0-1] where 0 is always centered (default: %f)' % 1.0)
parser.add_argument('--hapke-noise', '--hn', default=0.0, type=float, metavar='SD',
help=('Randomize all Hapke reflection model parameters by multiplying with log normally'
' distributed random variable with given SD (default: %f)') % 0.0)
parser.add_argument('--hapke-th-sd', '--h1', default=None, type=float, metavar='SD',
help='Override Hapke effective roughness, th_p [deg] param noise sd (default: %f)' % 0.0)
parser.add_argument('--hapke-w-sd', '--h2', default=None, type=float, metavar='SD',
help='Override Hapke single scattering albedo, w [0, 1] param noise sd (default: %f)' % 0.0)
parser.add_argument('--hapke-b-sd', '--h3', default=None, type=float, metavar='SD',
help='Override Hapke SPPF asymmetry parameter, b [-1, 1] param noise sd (default: %f)' % 0.0)
parser.add_argument('--hapke-c-sd', '--h4', default=None, type=float, metavar='SD',
help='Override Hapke SPPF asymmetry parameter, b [0, 1] param noise sd (default: %f)' % 0.0)
parser.add_argument('--hapke-shoe', '--h5', default=None, type=float, metavar='SD',
help='Override Hapke amplitude of shadow-hiding opposition effect (SHOE), B_SH0 [>=0] param noise sd (default: %f)' % 0.0)
parser.add_argument('--hapke-shoe-w', '--h6', default=None, type=float, metavar='SD',
help='Override Hapke angular half width of SHOE [rad] param noise sd (default: %f)' % 0.0)
parser.add_argument('--hapke-cboe', '--h7', default=None, type=float, metavar='SD',
help='Override Hapke amplitude of coherent backscatter opposition effect (CBOE), B_CB0 [>=0] param noise sd (default: %f)' % 0.0)
parser.add_argument('--hapke-cboe-w', '--h8', default=None, type=float, metavar='SD',
help='Override Hapke angular half width of CBOE [rad] param noise sd (default: %f)' % 0.0)
# only related to "relative" data generation mode
parser.add_argument('--noise-time', '--nt', default=0, type=float, metavar='SD',
help='time noise when generating second frame, noise sd [s] (default: %f)' % 0.0)
parser.add_argument('--noise-ast-rot-axis', '--nar', default=0, type=float, metavar='SD',
help='asteroid axis rotation noise when generating second frame, noise sd [deg] (default: %f)' % 0.0)
parser.add_argument('--noise-ast-phase-shift', '--nap', default=0, type=float, metavar='SD',
help='asteroid rotation phase shift noise when generating second frame, noise sd [deg] (default: %f)' % 0.0)
parser.add_argument('--noise-sco-lat', '--nsa', default=0, type=float, metavar='SD',
help='spacecraft latitudinal orientation noise when generating second frame, noise sd [deg] (default: %f)' % 0.0)
parser.add_argument('--noise-sco-lon', '--nso', default=0, type=float, metavar='SD',
help='spacecraft longitudinal orientation noise when generating second frame, noise sd [deg] (default: %f)' % 0.0)
parser.add_argument('--noise-sco-rot', '--nsr', default=0, type=float, metavar='SD',
help='spacecraft roll orientation noise when generating second frame, noise sd [deg] (default: %f)' % 0.0)
parser.add_argument('--noise-lateral', '--nl', default=0, type=float, metavar='SD',
help='spacecraft lateral translational noise when generating second frame, noise sd [m/m, ratio] (default: %f)' % 0.0)
parser.add_argument('--noise-altitude', '--na', default=0, type=float, metavar='SD',
help='spacecraft radial translational noise when generating second frame, noise sd [m/m, ratio] (default: %f)' % 0.0)
parser.add_argument('--noise-phase-angle', '--npa', default=0, type=float, metavar='SD',
help='change in phase angle when generating second frame, noise sd [deg] (default: %f)' % 0.0)
parser.add_argument('--noise-light-dir', '--nld', default=0, type=float, metavar='SD',
help='change in light direction when generating second frame, noise sd [deg] (default: %f)' % 0.0)
args = parser.parse_args()
return args
def main():
args = parse_arguments()
sm = get_system_model(args.mission, hi_res_shape_model=not args.sm_lores, res_mult=args.res_mult)
file_prefix_mod = ''
img_file_prefix = 'cm' if args.sm_noise > 0 else ''
log_prefix = ('r-' if args.relative else '')+'dsg-'+args.mission+'-'+args.id+'-'
cache_path = os.path.join(args.cache, args.mission, args.id)
# operational zone only
sm.min_distance = sm.min_med_distance
sm.max_distance = sm.min_distance + args.max_sc_distance * (sm.max_med_distance - sm.min_distance)
sm.min_elong = 180 - args.max_phase_angle # max phase angle = 100 deg
traj_len = 2 if args.relative else 1
state_gen = lambda sm: sm.random_state(uniform_distance=False, opzone_only=True,
max_sc_lateral_disp=args.max_sc_lateral_disp)
print('starting to generate images')
tl = TestLoop(sm, file_prefix_mod=file_prefix_mod, est_real_ast_orient=False,
state_generator=state_gen, uniform_distance_gen=False, operation_zone_only=True,
cache_path=cache_path,
sm_noise=0, sm_noise_len_sc=SHAPE_MODEL_NOISE_LEN_SC,
navcam_cache_id=img_file_prefix, save_depth=True, save_coords=True,
traj_len=traj_len, traj_prop_dt=60,
real_sm_noise=args.sm_noise, real_sm_noise_len_sc=args.sm_noise_len_sc,
real_tx_noise=args.tx_noise, real_tx_noise_len_sc=args.tx_noise_len_sc,
haze=args.haze,
jets=args.jets, jet_int_mode=args.jet_int_mode, jet_int_conc=args.jet_int_conc,
hapke_noise=args.hapke_noise,
hapke_th_sd=args.hapke_th_sd, hapke_w_sd=args.hapke_w_sd,
hapke_b_sd=args.hapke_b_sd, hapke_c_sd=args.hapke_c_sd,
hapke_shoe=args.hapke_shoe, hapke_shoe_w=args.hapke_shoe_w,
hapke_cboe=args.hapke_cboe, hapke_cboe_w=args.hapke_cboe_w,
noise_time=args.noise_time, noise_ast_rot_axis=args.noise_ast_rot_axis,
noise_ast_phase_shift=args.noise_ast_phase_shift, noise_sco_lat=args.noise_sco_lat,
noise_sco_lon=args.noise_sco_lon, noise_sco_rot=args.noise_sco_rot,
noise_lateral=args.noise_lateral, noise_altitude=args.noise_altitude,
noise_phase_angle=args.noise_phase_angle, noise_light_dir=args.noise_light_dir,
ext_noise_dist=True
)
# check if can skip testloop iterations in case the execution died during previous execution
log_entries = read_logfiles(sm, log_prefix, args.max_rot_err, traj_len=traj_len)
if args.relative:
entry_exists = [i for i, fs in log_entries if np.all(
[os.path.exists(f) and f == tl.cache_file(i, postfix='%d.png' % j)
for j, f in enumerate(fs)])
]
else:
entry_exists = [i for i, f in log_entries if os.path.exists(f) and f == tl.cache_file(i)+'.png']
row_range = get_range(args.count, entry_exists)
if row_range is not None:
tl.run(row_range, log_prefix=log_prefix,
constant_sm_noise=True, smn_cache_id='lo',
method='keypoint' if args.max_rot_err > 0 else 'centroid', feat=1, verbose=0)
# export
print('starting to export images')
img_file_prefix = (img_file_prefix + '_') if img_file_prefix else ''
os.makedirs(args.output, exist_ok=True) # make sure output folder exists
# skip export of already existing images, process only images in given iteration range
if ':' in args.count:
start, end = map(int, args.count.split(':'))
else:
start, end = 0, int(args.count)
imgfiles = read_logfiles(sm, log_prefix, args.max_rot_err, traj_len=traj_len)
if args.relative:
imgfiles = [(f, os.path.join(args.output, os.path.basename(f))) for i, fs in imgfiles if start <= i < end for f in fs]
else:
imgfiles = [(f, os.path.join(args.output, os.path.basename(f))) for i, f in imgfiles if start <= i < end]
ok_files = cnn_export.get_files_with_metadata(args.output, traj_len=traj_len)
imgfiles = [sf for sf, df in imgfiles if not os.path.exists(df) or os.path.basename(df) not in ok_files]
if args.relative:
# Copies images as they are, saves system states in format "traj id, frame id, sc_ast_r (3), sc_ast_q (4), sc_light_u (3)"
# Data about keypoints and their matches generated at nn data loader, could be something like
# "traj id, frame id1, frame id2, ix1, iy1, x1, y1, z1, ix2p, iy2p, ix2, iy2, x2, y2, z2, d_2d, d_3d"
cnn_export.export_relative(sm, args.output, src_imgs=imgfiles, img_prefix=img_file_prefix,
title="Synthetic Image Set, mission=%s, id=%s" % (args.mission, args.id), debug=0)
else:
cnn_export.export(sm, args.output, src_imgs=imgfiles, trg_shape=(224, 224), img_prefix=img_file_prefix,
title="Synthetic Image Set, mission=%s, id=%s" % (args.mission, args.id),
crop=args.crop, debug=0)
def sample_mosaic():
import cv2
args = parse_arguments()
files = cnn_export.get_files_with_metadata(args.output)
s = 56
r, c = 6, 24
comp = np.ones(((s+1)*r-1, (s+1)*c-1), dtype=np.uint8)*255
for i, file in enumerate([f for f in files if f][0:r*c]):
img = cv2.imread(os.path.join(args.output, file), cv2.IMREAD_GRAYSCALE)
k, j = i // c, i % c
comp[k*(s+1):(k+1)*(s+1)-1, j*(s+1):(j+1)*(s+1)-1] = cv2.resize(img, (s, s))
cv2.imwrite('mosaic.png', comp)
cv2.imshow('mosaic.png', comp)
cv2.waitKey()
def get_range(org_range, exists):
if ':' in org_range:
start, end = map(int, org_range.split(':'))
else:
start, end = 0, int(org_range)
if len(exists) > 0:
start = max(start, np.max(exists)+1)
| |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from IPython import embed
from multiprocessing import Pool, cpu_count
#import mega_nn
import numpy as np
import scipy as sc
import scipy.stats as stats
import pandas as pd
from itertools import product, chain
import pickle
import os
import sys
import time
networks_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../networks'))
NNDB_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../NNDB'))
training_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../training'))
qlk4D_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../../QLK4DNN'))
sys.path.append(networks_path)
sys.path.append(NNDB_path)
sys.path.append(training_path)
sys.path.append(qlk4D_path)
from model import Network, NetworkJSON, PostprocessSlice, ComboNetwork, MultiNetwork, no_elements_in_list, any_element_in_list, db
from run_model import QuaLiKizNDNN, QuaLiKizDuoNN
from train_NDNN import shuffle_panda
from functools import partial
if __name__ == '__main__':
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
from matplotlib import gridspec, cycler
pretty = False
from load_data import nameconvert
from load_data import load_data, load_nn, prettify_df
from collections import OrderedDict
from peewee import AsIs, fn, SQL
import re
import gc
def mode_to_settings(mode):
settings = {}
if mode == 'debug':
settings['plot'] = True
settings['plot_pop'] = True
settings['plot_nns'] = True
settings['plot_slice'] = True
settings['plot_poplines'] = True
settings['plot_threshlines'] = True
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = True
settings['parallel'] = False
settings['plot_threshslope'] = False
elif mode == 'quick':
settings['plot'] = False
settings['plot_pop'] = False
settings['plot_nns'] = False
settings['plot_slice'] = False
settings['plot_poplines'] = False
settings['plot_threshlines'] = False
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = False
settings['parallel'] = True
settings['plot_threshslope'] = False
elif mode == 'pretty':
settings['plot'] = True
settings['plot_pop'] = False
settings['plot_nns'] = True
settings['plot_slice'] = False
settings['plot_poplines'] = False
settings['plot_threshlines'] = False
settings['plot_zerocolors'] = False
settings['plot_thresh1line'] = False
settings['calc_thresh1'] = False
settings['hide_qualikiz'] = False
settings['debug'] = True
settings['parallel'] = False
settings['plot_threshslope'] = True
return settings
def get_similar_not_in_table(table, max=20, only_dim=None, only_sep=False, no_particle=False, no_divsum=False,
no_mixed=True):
for cls, field_name in [(Network, 'network'),
(ComboNetwork, 'combo_network'),
(MultiNetwork, 'multi_network')
]:
non_sliced = (cls
.select()
.where(~fn.EXISTS(table.select().where(getattr(table, field_name) == cls.id)))
)
if only_dim is not None:
non_sliced &= cls.select().where(SQL("array_length(feature_names, 1)=" + str(only_dim)))
if no_mixed:
non_sliced &= cls.select().where(~(SQL("(array_to_string(target_names, ',') like %s)", ['%pf%']) &
(SQL("(array_to_string(target_names, ',') like %s)", ['%ef%'])))
)
tags = []
if no_divsum is True:
tags.extend(["div", "plus"])
if no_particle is True:
tags.append('pf')
if len(tags) != 0:
non_sliced &= no_elements_in_list(cls, 'target_names', tags)
if only_sep is True:
non_sliced &= any_element_in_list(cls, 'target_names', ['TEM', 'ITG', 'ETG'])
if non_sliced.count() > 0:
network = non_sliced.get()
break
non_sliced &= (cls.select()
.where(cls.target_names == AsIs(network.target_names))
.where(cls.feature_names == AsIs(network.feature_names))
)
non_sliced = non_sliced.limit(max)
return non_sliced
def nns_from_NNDB(max=20, only_dim=None):
db.connect()
non_sliced = get_similar_not_in_table(PostprocessSlice, max=max, only_sep=True, no_particle=False, no_divsum=True, only_dim=only_dim)
network = non_sliced.get()
style = 'mono'
if len(network.target_names) == 2:
match_0 = re.compile('^(.f)(.)(ITG|ETG|TEM)_GB').findall(network.target_names[0])
match_1 = re.compile('^(.f)(.)(ITG|ETG|TEM)_GB').findall(network.target_names[1])
if len(match_0) == 1 and len(match_1) == 1:
group_0 = match_0[0]
group_1 = match_1[0]
if ((group_0[1] == 'e' and group_1[1] == 'i') or
(group_0[1] == 'i' and group_1[1] == 'e')):
style='duo'
else:
raise Exception('non-matching target_names. Not sure what to do.. {s}'
.format(network.target_names))
matches = []
for target_name in network.target_names:
matches.extend(re.compile('^.f.(ITG|ETG|TEM)_GB').findall(target_name))
if matches[1:] == matches[:-1]:
if matches[0] == 'ITG':
slicedim = 'Ati'
elif matches[0] == 'TEM' or matches[0] == 'ETG':
slicedim = 'Ate'
else:
raise Exception('Unequal stability regime. Cannot determine slicedim')
nn_list = {network.id: str(network.id) for network in non_sliced}
print('Found {:d} {!s} with target {!s}'.format(non_sliced.count(), network.__class__, network.target_names))
nns = OrderedDict()
for dbnn in non_sliced:
nn = dbnn.to_QuaLiKizNN()
nn.label = '_'.join([str(el) for el in [dbnn.__class__.__name__ , dbnn.id]])
nns[nn.label] = nn
db.close()
return slicedim, style, nns
def populate_nn_list(nn_set):
if nn_set == 'c_L2':
nn_list = OrderedDict([(61, '$c_{L2} = 0.0$'),
# (48, '$c_{L2} = 0.05$'),
(37, '$c_{L2} = 0.1$'),
# (50, '$c_{L2} = 0.2$'),
# (51, '$c_{L2} = 0.35$'),
(49, '$c_{L2} = 0.5$'),
# (52, '$c_{L2} = 1.0$'),
(53, '$c_{L2} = 2.0$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'topo':
nn_list = OrderedDict([(65, 'neurons = $(10, 10)$'),
(64, 'neurons = $(30, 30)$'),
(73, 'neurons = $(30, 30, 30)$'),
(83, 'neurons = $(45, 45)$'),
(34, 'neurons = $(60, 60)$'),
(38, 'neurons = $(80, 80)$'),
(66, 'neurons = $(120, 120)$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'filter':
#nn_list = OrderedDict([(37, 'filter = 3'),
# (58, 'filter = 4'),
# (60, 'filter = 5')])
nn_list = OrderedDict([(37, '$max(\chi_{ETG,e}) = 60$'),
(60, '$max(\chi_{ETG,e}) = 100$')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'goodness':
nn_list = OrderedDict([(62, 'goodness = mabse'),
(37, 'goodness = mse')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'early_stop':
nn_list = OrderedDict([(37, 'stop measure = loss'),
#(11, '$early_stop = mse'),
(18, 'stop measure = MSE')])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'similar':
nn_list = OrderedDict([
(37, '37'),
(67, '67'),
(68, '68'),
(69, '69'),
(70, '70'),
(71, '71'),
(72, '72'),
(73, '73'),
(74, '74'),
])
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'best':
nn_list = OrderedDict([(46, '')]) #efeETG
nn_list = OrderedDict([(88, '')]) #efiITG
slicedim = 'Ate'
style = 'mono'
elif nn_set == 'duo':
nn_list = OrderedDict([
(205, 'es_20'),
(204, 'es_5'),
(203, 'es_wrong')
])
slicedim = 'Ati'
style = 'duo'
return slicedim, style, nn_list
def nns_from_nn_list(nn_list, slicedim, labels=True):
nns = OrderedDict()
for nn_index, nn_label in nn_list.items():
nn = nns[nn_index] = load_nn(nn_index)
if labels:
nn.label = nn_label
else:
nn.label = ''
return nns
def nns_from_manual():
nns = OrderedDict()
#div_nn = load_nn(405)
#sum_nn = load_nn(406)
#nn = QuaLiKizDuoNN(['efiITG_GB', 'efeITG_GB'], div_nn, sum_nn, [lambda x, y: x * y/(x + 1), lambda x, y: y/(x + 1)])
#nn.label = 'div_style'
#nns[nn.label] = nn
#nn_efi = load_nn(88)
#nn_efe = load_nn(89)
#nn = QuaLiKizDuoNN(['efiITG_GB', 'efeITG_GB'], nn_efi, nn_efe, [lambda x, y: x, lambda x, y: y])
#nn.label = 'sep_style'
#nns[nn.label] = nn
#nn = load_nn(205)
#nn.label = 'combo_style'
#nns[nn.label] = nn
#subnn = (ComboNetwork.select()
# .where(ComboNetwork.id == 78)
# ).get()
#nn = subnn.to_QuaLiKizComboNN()
#nn.label = 'bla'
#nns[nn.label] = nn
#dbnn = Network.by_id(135).get()
dbnns = []
#dbnns.append(MultiNetwork.by_id(119).get())
dbnns.append(ComboNetwork.by_id(3333).get())
#dbnns.append(ComboNetwork.by_id(1050).get())
#dbnns.append(MultiNetwork.by_id(102).get())
for dbnn in dbnns:
nn = dbnn.to_QuaLiKizNN()
nn.label = '_'.join([str(el) for el in [dbnn.__class__.__name__ , dbnn.id]])
nns[nn.label] = nn
#nns[nn.label] = QuaLiKizNDNN.from_json('nn.json')
slicedim = 'Ati'
style='duo'
style='mono'
#from qlkANNk import QuaLiKiz4DNN
#nns['4D'] = QuaLiKiz4DNN()
#nns['4D'].label = '4D'
#nns['4D']._target_names = ['efeITG_GB', 'efiITG_GB']
db.close()
return slicedim, style, nns
def prep_df(store, nns, unstack, filter_less=np.inf, filter_geq=-np.inf, shuffle=True, calc_maxgam=False, clip=False, slice=None, frac=1):
nn0 = list(nns.values())[0]
target_names = nn0._target_names
feature_names = nn0._feature_names
input = store['megarun1/input']
try:
input['logNustar'] = np.log10(input['Nustar'])
del input['Nustar']
except KeyError:
print('No Nustar in dataset')
if ('Zeffx' == feature_names).any() and not ('Zeffx' in input.columns):
print('WARNING! creating Zeffx. You should use a 9D dataset')
input['Zeffx'] = np.full_like(input['Ati'], 1.)
raise Exception
if ('logNustar' == feature_names).any() and not ('logNustar' in input.columns):
print('WARNING! creating logNustar. You should use a 9D dataset')
input['logNustar'] = np.full_like(input['Ati'], np.log10(0.009995))
if len(feature_names) == 4:
print('WARNING! Slicing 7D to 4D dataset. You should use a 4D dataset')
idx = input.index[(
np.isclose(input['Ate'], 5.75, atol=1e-5, rtol=1e-3) &
np.isclose(input['An'], 2, atol=1e-5, rtol=1e-3) &
np.isclose(input['x'], .45, atol=1e-5, rtol=1e-3)
)]
else:
idx = input.index
input = input[feature_names]
data = store.select('megarun1/flattened', columns=target_names)
input = input.loc[idx]
data = data.loc[input.index]
df = input.join(data[target_names])
if calc_maxgam is True:
df_gam = store.select('/megarun1/flattened', columns=['gam_leq_GB', 'gam_great_GB'])
df_gam = (df_gam.max(axis=1)
.to_frame('maxgam')
)
df = df.join(df_gam)
#itor = zip(['An', 'Ate', 'Ti_Te', 'qx', 'smag', 'x'], ['0.00', '10.00', '1.00', '5.00', '0.40', '0.45'])
#itor = zip(['Zeffx', 'Ate', 'An', 'qx', 'smag', 'x', 'Ti_Te', 'logNustar'], [1.0, 5.75, 2.5, 2.0, 0.10000000149011612, 0.33000001311302185, 1.0, -2.000217201545864])
if slice is not None:
for name, val in slice:
df = df[np.isclose(df[name], float(val), atol=1e-5, rtol=1e-3)]
if clip is True:
df[target_names] = df[target_names].clip(filter_less, filter_geq, axis=1)
else:
# filter
df = df[(df[target_names] < filter_less).all(axis=1)]
df = df[(df[target_names] >= filter_geq).all(axis=1)]
#print(np.sum(df['target'] < 0)/len(df), ' frac < 0')
#print(np.sum(df['target'] == 0)/len(df), ' frac == 0')
#print(np.sum(df['target'] > 0)/len(df), ' frac > 0')
#uni = {col: input[col].unique() for col in input}
#uni_len = {key: len(value) for key, value in uni.items()}
#input['index'] = input.index
df.set_index([col for col in input], inplace=True)
df = df.astype('float64')
df = df.sort_index(level=unstack)
df = df.unstack(unstack)
if shuffle:
df = shuffle_panda(df)
#df.sort_values('smag', inplace=True)
#input, data = prettify_df(input, data)
#input = input.astype('float64')
# Filter
if frac < 1:
idx = int(frac * len(df))
df = df.iloc[:idx, :]
#df = df.iloc[1040:2040,:]
print('dataset loaded!')
return df, target_names
def is_unsafe(df, nns, slicedim):
unsafe = True
for nn in nns.values():
slicedim_idx = nn._feature_names[nn._feature_names == slicedim].index[0]
varlist = list(df.index.names)
varlist.insert(slicedim_idx, slicedim)
try:
if ~np.all(varlist == nn._feature_names):
unsafe = False
except ValueError:
raise Exception('Dataset has features {!s} but dataset has features {!s}'.format(varlist, | |
<filename>miner/strategies/kubernetes/dynamic/k8sDynamicMiner.py
from miner.generic.dynamic.dynamicMiner import DynamicMiner
from topology.node import Node, Direction
from topology.communication import Communication
from topology.communicationFactory import CommunicationFactory
from topology.concreteCommunicationFactory import ConcreteCommunicationFactory
from topology.microToscaTypes import NodeType
from topology.errors import EdgeExistsError, EdgeNotExistsError
from .errors import WrongFolderError, DeploymentError, MonitoringError, TestError
from os.path import isdir, isfile, join, exists
from kubernetes import client, config, utils
from kubernetes.client import configuration
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_container import V1Container
from kubernetes.stream import stream
from os import listdir
from ruamel import yaml
from ruamel.yaml import YAML
from pathlib import Path
from typing import Type
from pprint import pprint
import logging
import copy
import shutil
import importlib
import ijson.backends.yajl2_c as ijson
import hashlib
import json
import time
import os
import re
class K8sDynamicMiner(DynamicMiner):
log = logging.getLogger(__name__)
config = {}
packetToPodHosts = {}
controllers = []
def __init__(self):
pass
@classmethod
def updateTopology(cls, source: str, info: dict, nodes: dict):
cls.config = info
config.load_kube_config()
configuration.assert_hostname = False
k8sClient = client.ApiClient()
loader = YAML(typ='safe')
files = cls._listFiles(source)
newNodes = []
try:
#Search ingress controller already deployed
cls.controllers = cls._searchIngressControllers()
for controller in cls.controllers:
newNode = cls._addControllerToTopology(controller, nodes)
if newNode:
newNodes.append(newNode)
else:
cls.controllers.remove(controller)
#Deployment of the application
print(' Deploying the application...')
i = 0
for k8sFile in files:
yamls = re.split('^---\n', cls._readFile(k8sFile), flags=re.MULTILINE)
for contentStr in yamls:
contentDict = loader.load(contentStr)
if not contentDict:
continue
cls._prepareYaml(contentStr, contentDict)
with open(join(cls.config['modDeploymentFiles'], str(i) + '.yml'), 'w') as f:
try:
f.write(yaml.dump(contentDict))
utils.create_from_dict(k8sClient, contentDict)
except utils.FailToCreateError:
cls._cleanEnvironment()
raise DeploymentError('Error deploying ' + k8sFile)
i = i + 1
#Wait until the deployment is completed
v1 = client.CoreV1Api()
deploymentCompleted = False
while not deploymentCompleted:
pods = v1.list_pod_for_all_namespaces(watch = False)
deploymentCompleted = True
for pod in pods.items:
if pod.spec.hostname in nodes:
if pod.status.phase != 'Running' and pod.status.phase != 'Succeeded':
deploymentCompleted = False
break
for containerStatus in pod.status.container_statuses:
if not containerStatus.ready:
deploymentCompleted = False
break
if not deploymentCompleted:
break
if not deploymentCompleted:
time.sleep(3)
print(' Deployment completed')
#Start monitoring
print(' Monitoring in progress...')
pods = v1.list_pod_for_all_namespaces(watch = False)
containerName = ''.join(c for c in cls.config['monitoringContainer'] if c.isalnum())
for pod in pods.items:
if pod.spec.hostname in nodes or (pod.metadata.annotations and 'archMinerName' in pod.metadata.annotations and pod.metadata.annotations['archMinerName'] in nodes) and pod.status.phase == 'Running':
fileName = pod.spec.hostname if pod.spec.hostname in nodes else pod.metadata.annotations['archMinerName']
filePath = join('/home/dump', fileName + '.json')
command = [
'./bin/sh',
'-c',
'tshark -i eth0 -a duration:' + str(info['time'] + 3) + ' -N nNdt -T json > ' + filePath + ' 2>/dev/null &']
try:
resp = stream(v1.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
command = command,
container = containerName,
stderr=False, stdin=False,
stdout=True, tty=False)
except ApiException as e:
cls._cleanEnvironment()
raise MonitoringError(pod.metadata.name)
#Start tests
time.sleep(3)
if info['test']:
try:
testModule = importlib.import_module(info['test'])
testModule.runTest()
except:
cls._cleanEnvironment()
raise TestError('')
#Wait until monitoring is finished
time.sleep(info['time']+5)
print(' Monitoring completed')
#Save on local host the packets
pods = v1.list_pod_for_all_namespaces(watch = False)
for pod in pods.items:
if pod.spec.hostname in nodes or (pod.metadata.annotations and 'archMinerName' in pod.metadata.annotations and pod.metadata.annotations['archMinerName'] in nodes) and pod.status.phase == 'Running':
fileName = pod.spec.hostname if pod.spec.hostname in nodes else pod.metadata.annotations['archMinerName']
remoteFilePath = join('home/dump', fileName + '.json')
localFilePath = join(cls.config['monitoringFiles'], fileName + '.json')
os.system('kubectl cp -c ' + containerName + ' ' + pod.metadata.namespace + '/' + pod.metadata.name + ':' + remoteFilePath + ' ' + localFilePath)
#Create edges
print(' Analyzing packets...')
try:
files = cls._listFiles(cls.config['monitoringFiles'])
except WrongFolderError:
cls._cleanEnvironment()
raise
for monitoringFilePath in files:
if os.path.getsize(monitoringFilePath) == 0:
continue
srcNodeName = monitoringFilePath.split('/')[-1].replace('.json', '')
with open(monitoringFilePath, 'rb') as monitoringFile:
for packet in ijson.items(monitoringFile, 'item'):
if cls._isOutgoingPacket(packet, nodes, srcNodeName):
cls._createEdge(packet, nodes, srcNodeName)
#Create communications
commFactory = ConcreteCommunicationFactory()
for monitoringFilePath in files:
if os.path.getsize(monitoringFilePath) == 0:
continue
srcNodeName = monitoringFilePath.split('/')[-1].replace('.json', '')
with open(monitoringFilePath, 'rb') as monitoringFile:
for packet in ijson.items(monitoringFile, 'item'):
if cls._isOutgoingPacket(packet, nodes, srcNodeName):
cls._createCommunication(packet, nodes, commFactory, srcNodeName)
for newNode in newNodes:
edges = nodes[newNode['controller']].getEdges(Direction.OUTGOING)
if not edges:
nodes.pop(newNode['controller'], None)
for service in newNode['services']:
nodes.pop(service, None)
finally:
cls._cleanEnvironment()
@classmethod
def _readFile(cls, path: str) -> str:
with open(path) as f:
text = f.read()
return text
@classmethod
def _listFiles(cls, folderPath: str) -> []:
if not exists(Path(folderPath)) or not isdir(Path(folderPath)):
raise WrongFolderError('')
return [join(folderPath, f) for f in listdir(folderPath) if isfile(join(folderPath, f))]
@classmethod
def _prepareYaml(cls, contentStr: str, contentDict: dict):
workloads = ['Deployment', 'ReplicaSet', 'DaemonSet', 'ReplicationController', 'StatefulSet', 'Job']
podSpec = {}
if contentDict['kind'] in workloads and 'template' in contentDict['spec']:
podSpec = contentDict['spec']['template']['spec']
elif contentDict['kind'] == 'CronJob' and 'template' in (jobSpec := contentDict['spec']['jobTemplate']['spec']):
podSpec = jobSpec['template']['spec']
elif contentDict['kind'] == 'Pod':
podSpec = contentDict['spec']
if podSpec:
if not 'hostname' in podSpec:
podSpec['hostname'] = hashlib.sha1(contentStr.encode('utf-8')).hexdigest()
cls.log.info(contentDict['metadata']['name'] + ':' + podSpec['hostname'])
podSpec['containers'].append({'name': ''.join(c for c in cls.config['monitoringContainer'] if c.isalnum()), 'image': cls.config['monitoringContainer']})
@classmethod
def _isOutgoingPacket(cls, packet: dict, nodes: dict, srcNodeName: str) -> bool:
packetLayers = packet['_source']['layers']
if not 'ip' in packetLayers:
return False
packetSrc = packetLayers['ip']['ip.src_host']
if 'svc' in packetSrc.split('.'):
return False
if packetSrc != srcNodeName:
packetSrc = cls._getPodAnnotation(packetLayers['ip']['ip.src'])
if packetSrc != srcNodeName:
return False
return True
@classmethod
def _getPodAnnotation(cls, ip: str) -> str:
v1 = client.CoreV1Api()
pods = v1.list_pod_for_all_namespaces(watch = False)
for pod in pods.items:
if pod.status.pod_ip == ip:
if pod.metadata.annotations and 'archMinerName' in pod.metadata.annotations:
return pod.metadata.annotations['archMinerName']
return ''
@classmethod
def _getPodHostname(cls, ip: str, host: str) -> str:
if host in cls.packetToPodHosts:
return cls.packetToPodHosts[host]
v1 = client.CoreV1Api()
pods = v1.list_pod_for_all_namespaces(watch = False)
for pod in pods.items:
if pod.status.pod_ip == ip:
cls.packetToPodHosts[host] = pod.spec.hostname
return pod.spec.hostname
return ''
@classmethod
def _getDstNode(cls, packet: dict, nodes: dict) -> (str, Node):
packetLayers = packet['_source']['layers']
if not 'ip' in packetLayers:
return ('', None)
dstNodeName = packetLayers['ip']['ip.dst_host']
if 'svc' in dstNodeName.split('.'):
dstNodeName = dstNodeName.split('.svc.')[0] + '.svc'
if not dstNodeName in nodes:
dstNodeName = cls._getPodHostname(packetLayers['ip']['ip.dst'], dstNodeName)
if not dstNodeName in nodes:
dstNodeName = cls._getPodAnnotation(packetLayers['ip']['ip.dst'])
if not dstNodeName in nodes:
return ('', None)
return (dstNodeName, nodes[dstNodeName])
@classmethod
def _createEdge(cls, packet: dict, nodes: dict, srcNodeName: str):
packetLayers = packet['_source']['layers']
srcNode = nodes[srcNodeName]
dst = cls._getDstNode(packet, nodes)
dstNodeName = dst[0]
dstNode = dst[1]
if not dstNodeName:
return
dstIsService = '.svc.' in dstNodeName
if ('tcp' in packetLayers and packetLayers['tcp']['tcp.flags_tree']['tcp.flags.syn'] == '1' and packetLayers['tcp']['tcp.flags_tree']['tcp.flags.ack'] == '0') or (dstIsService):
try:
srcNode.addEdge(dstNodeName, Direction.OUTGOING)
dstNode.addEdge(srcNodeName, Direction.INCOMING)
except EdgeExistsError:
srcNode.setIsMicroToscaEdge(dstNodeName, True)
if dstIsService:
edges = dstNode.getEdges(Direction.OUTGOING)
for adjacentName in edges.keys():
try:
nodes[adjacentName].addEdge(srcNodeName, Direction.OUTGOING, isMicroToscaEdge = False)
srcNode.addEdge(adjacentName, Direction.INCOMING)
except EdgeExistsError:
pass
@classmethod
def _createCommunication(cls, packet: dict, nodes: dict, commFactory: Type[CommunicationFactory], srcNodeName: str):
srcNode = nodes[srcNodeName]
dst = cls._getDstNode(packet, nodes)
dstNodeName = dst[0]
dstNode = dst[1]
if not dstNodeName:
return
packet['_source']['layers']['ip']['ip.dst_host'] = dstNodeName
dstIsService = '.svc.' in dstNodeName
communication = commFactory.build(copy.deepcopy(packet))
if communication:
try:
srcNode.addCommunication(dstNodeName, communication)
dstNode.addCommunication(srcNodeName, communication)
except EdgeNotExistsError:
cls.log.warn('Edge (' + srcNodeName + ', ' + dstNodeName + ') not exists')
pass
if dstIsService:
packet['_source']['layers']['ip']['ip.src_host'] = dstNodeName
edges = dstNode.getEdges(Direction.OUTGOING)
for adjacentName in edges.keys():
packet['_source']['layers']['ip']['ip.dst_host'] = adjacentName
communication = commFactory.build(copy.deepcopy(packet))
if communication:
dstNode.addCommunication(adjacentName, communication)
nodes[adjacentName].addCommunication(dstNodeName, communication)
@classmethod
def _cleanEnvironment(cls):
print('Cleaning environment')
files = cls._listFiles(cls.config['modDeploymentFiles'])
for yamlFile in files:
os.system('kubectl delete -f ' + yamlFile + ' 1>/dev/null 2>/dev/null')
os.remove(yamlFile)
files = cls._listFiles(cls.config['monitoringFiles'])
for monitoringFile in files:
os.remove(monitoringFile)
for controller in cls.controllers:
os.system('kubectl delete pod -n ' + controller.metadata.namespace + ' ' + controller.metadata.name + ' 1>/dev/null 2>/dev/null &')
@classmethod
def _searchIngressControllers(cls):
loader = YAML(typ='safe')
controllerImages = loader.load(Path(cls.config['controllerImages']))['INGRESS-CONTROLLERS']
v1 = client.CoreV1Api()
pods = v1.list_pod_for_all_namespaces(watch = False)
controllers = []
for pod in pods.items:
if pod.status.phase != 'Running':
continue
for container in pod.spec.containers:
containerImage = re.sub(':.*', '', container.image)
found = False
for controllerImage in controllerImages:
if controllerImage == containerImage:
found = True
controllers.append(pod)
break
if found:
break
return controllers
@classmethod
def _addControllerToTopology(cls, controller, nodes: dict) -> []:
isController = False
serviceNodes = []
if controller.spec.host_network:
isController = True
if not isController:
labels = controller.metadata.labels
v1 = client.CoreV1Api()
services = v1.list_service_for_all_namespaces()
for service in services.items:
if service.spec.type == 'NodePort' or service.spec.type == 'LoadBalancer':
for key1, value1 in service.spec.selector.items():
found = False
for key2, value2 in labels.items():
if key1 == key2 and value1 == value2:
found = True
break
if not found:
break
if found:
serviceNodes.append(service)
isController = True
newNodes = {}
if isController:
hostname = cls._prepareController(controller)
namespace = controller.metadata.namespace if controller.metadata.namespace else 'default'
controllerNode = Node(controller.metadata.name + '.' + namespace, {})
nodes[hostname] = controllerNode
controllerNode.setType(NodeType.MICROTOSCA_NODES_MESSAGE_ROUTER)
controllerNode.setIsEdge(True)
newNodes['services'] = []
for serviceNode in | |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# -*- mode: Python -*-
'''
LJ Laser Server v0.8.2
Inspiration for a future WebUI icon menu :
https://codepen.io/AlbertFeynman/pen/mjXeMV
Laser server + webUI servers (ws + OSC)
- get point list to draw : /pl/lasernumber
- for report /lstt/lasernumber /lack/lasernumber /cap/lasernumber
- a nice ws debug tool : websocat
- a "plugin" is a generator that send points to LJ. Plugins if they have an open OSC port can be checked and restart if in the same computer.
All used ports:
8002 OSC incoming
9001 Websocket communication with WebGUI
Plugins OSC Ports (see LJ.conf)
'''
#import pdb
from libs3 import log
print("")
print("")
log.infog("LJ Laser Server")
log.infog("v0.8.2")
print("")
print("-h will display help")
print("")
import redis
import os
ljpath = r'%s' % os.getcwd().replace('\\','/')
import sys
#sys.path.append('libs3/')
from libs3 import gstt, settings
gstt.ljpath= ljpath
log.info("Reading " + gstt.ConfigName + " setup file...")
settings.Read()
# Arguments may alter .conf file so import settings first then cli
from libs3 import cli
settings.Write()
from multiprocessing import Process, set_start_method
import random, ast
from libs3 import plugins
from libs3 import tracer3 as tracer
from libs3 import homographyp, commands, font1
#from webui import build
#import subprocess
import os
#import midi
from libs3 import OSC3
from websocket_server import WebsocketServer
#import socket
import types, _thread, time
r = redis.StrictRedis(host=gstt.LjayServerIP , port=6379, db=0)
# r = redis.StrictRedis(host=gstt.LjayServerIP , port=6379, db=0, password='-+<PASSWORD>+-')
args =[0,0]
def dac_process(number, pl):
import sys
from libs3 import gstt
print("Starting dac process", number)
while True:
try:
d = tracer.DAC(number,pl)
d.play_stream()
except Exception as e:
import sys
import traceback
if gstt.debug > 0:
log.err('\n---------------------')
log.err('Exception: %s' % e)
log.err('- - - - - - - - - - -')
traceback.print_tb(sys.exc_info()[2])
print("\n")
pass
except KeyboardInterrupt:
sys.exit(0)
#
# Servers init variables
#
print("Start Scene number :",gstt.SceneNumber)
print("WebUI connect to :", gstt.wwwIP)
serverIP = gstt.LjayServerIP
print("Redis IP :", serverIP)
oscserverIP = gstt.oscIPin
print("OSCserver IP :", oscserverIP)
nozoscIP = gstt.nozoscip
print("Nozosc IP :", nozoscIP)
debug = gstt.debug
print("Debug :", debug)
# Websocket listening port
wsPORT = 9001
# oscserver
# OSC Server : accept OSC message on port 8002
#oscIPin = "192.168.1.10"s
oscserverIPin = serverIP
print("oscserverIPin", oscserverIPin)
oscserverPORTin = 8002
# OSC Client : to send OSC message to an IP port 8001
oscserverIPout = oscserverIP
oscserverPORTout = 8001
# Nozoid OSC Client : to send OSC message to Nozoid inport 8003
NozoscIPout = nozoscIP
NozoscPORTout = plugins.Port("nozoid")
# Planetarium OSC Client : to send OSC message to planetarium inport 8005
planetIPout = nozoscIP
planetPORTout = plugins.Port("planet")
'''
# Bank0 OSC Client : to send OSC message to bank0 inport 8010
bank0IPout = nozoscIP
bank0PORTout = plugins.Port("bank0")
'''
#
# DACs available checks ?
#
import socket
#retry = 1
#delay = 1
#
# OSC
#
oscserver = OSC3.OSCServer( (oscserverIPin, oscserverPORTin) )
oscserver.timeout = 0
OSCRunning = True
def handle_timeout(self):
self.timed_out = True
oscserver.handle_timeout = types.MethodType(handle_timeout, oscserver)
# OSC default path handler : send incoming OSC message to UI via websocket 9001
def handler(path, tags, args, source):
oscpath = path.split("/")
if gstt.debug > 0:
print("")
print("OSC handler in main said : path", path," oscpath ", oscpath," args", args)
if oscpath[1] != "pong":
sendWSall(path + " " + str(args[0]))
commands.handler(oscpath,args)
# RAW OSC Frame available ?
def osc_frame():
#print 'oscframe'
# clear timed_out flag
oscserver.timed_out = False
# handle all pending requests then return
while not oscserver.timed_out:
oscserver.handle_request()
def PingAll():
if gstt.debug > 0:
print("Pinging all plugins...")
for plugin in list(gstt.plugins.keys()):
if gstt.debug > 0:
print("pinging", plugin)
#sendWSall("/"+ plugin + "/start 0")
plugins.Ping(plugin)
# OSC server Thread : handler, dacs reports and simulator points sender to UI.
def osc_thread():
#while True:
try:
while True:
time.sleep(0.1)
osc_frame()
for laserid in range(0,gstt.LaserNumber): # Laser not used -> led is not lit
lstate = {'0': 'IDLE', '1': 'PREPARE', '2': "PLAYING", '64': "NOCONNECTION ?" }
lstt = r.get('/lstt/'+ str(laserid)).decode('ascii')
#print ("laserid", laserid,"lstt",lstt, type(lstt))
if gstt.debug >1:
print("DAC", laserid, "is in (lstt) :", lstt , lstate[str(lstt)])
if lstt == "0": # Dac IDLE state(0) -> led is blue (3)
sendWSall("/lstt/" + str(laserid) + " 3")
if lstt == "1": # Dac PREPARE state (1) -> led is cyan (2)
sendWSall("/lstt/" + str(laserid) + " 2")
if lstt == "2": # Dac PLAYING (2) -> led is green (1)
sendWSall("/lstt/" + str(laserid) + " 1")
ackstate = {'61': 'ACK', '46': 'FULL', '49': "INVALID", '21': 'STOP', '64': "NOCONNECTION ?", '35': "NOCONNECTION ?" , '97': 'ACK', '70': 'FULL', '73': "INVALID", '33': 'STOP', '100': "NOCONNECTION", '48': "NOCONNECTION", 'a': 'ACK', 'F': 'FULL', 'I': "INVALID", '!': 'STOP', 'd': "NOCONNECTION", '0': "NOCONNECTION"}
lack= r.get('/lack/'+str(laserid)).decode('ascii')
if gstt.debug >1:
print("DAC", laserid, "answered (lack):", lack, chr(int(lack)), ackstate[str(lack)])
if chr(int(lack)) == 'a': # Dac sent ACK ("a") -> led is green (1)
sendWSall("/lack/" + str(laserid) +" 1")
if chr(int(lack)) == 'F': # Dac sent FULL ("F") -> led is orange (5)
sendWSall("/lack/" + str(laserid) +" 5")
if chr(int(lack)) == 'I': # Dac sent INVALID ("I") -> led is yellow (4)
sendWSall("/lack/" + str(laserid)+" 4")
#print lack
if lack == "64" or lack =="35": # no connection to dac -> leds are red (6)
sendWSall("/lack/" + str(laserid) + " 6")
sendWSall("/lstt/" + str(laserid) + " 6")
#sendWSall("/lstt/" + str(laserid) + " 0")
sendWSall("/points/" + str(laserid) + " 6")
else:
# last number of points sent to etherdream buffer
sendWSall("/points/" + str(laserid) + " " + str(r.get('/cap/'+str(laserid)).decode('ascii')))
#print "Sending simu frame from",'/pl/'+str(gstt.SceneNumber)+'/'+str(gstt.Laser)
#print r.get('/pl/'+str(gstt.SceneNumber)+'/'+str(gstt.Laser))
sendWSall("/simul" +" "+ str(r.get('/pl/'+str(gstt.SceneNumber)+'/'+str(gstt.Laser)).decode('ascii')))
except Exception as e:
import sys, traceback
print('\n---------------------')
print('Exception: %s' % e)
print('- - - - - - - - - - -')
traceback.print_tb(sys.exc_info()[2])
print("\n")
#
# Websocket part
#
# Called for every WS client connecting (after handshake)
def new_client(client, wserver):
print("New WS client connected and was given id %d" % client['id'])
sendWSall("/status Hello " + str(client['id']))
for laserid in range(0,gstt.LaserNumber):
sendWSall("/ip/" + str(laserid) + " " + str(gstt.lasersIPS[laserid]))
sendWSall("/kpps/" + str(laserid)+ " " + str(gstt.kpps[laserid]))
#sendWSall("/laser"+str(laserid)+"/start 1")
sendWSall("/laser "+str(laserid))
#print("/laser "+str(laserid))
sendWSall("/lack/" + str(laserid) + " 6")
#print("/lack/" + str(laserid) + " 6")
sendWSall("/lstt/" + str(laserid) + " 6")
#print("/lstt/" + str(laserid) + " 6")
sendWSall("/points/" + str(laserid) + " 0")
#print("/points/" + str(laserid) + " 0")
if gstt.swapX[laserid] == 1:
sendWSall("/swap/X/" + str(laserid)+ " 1")
else:
sendWSall("/swap/X/" + str(laserid)+ " 0")
if gstt.swapY[laserid] == 1:
sendWSall("/swap/Y/" + str(laserid)+ " 1")
else:
sendWSall("/swap/Y/" + str(laserid)+ " 0")
# Called for every WS client disconnecting
def client_left(client, wserver):
print("WS Client(%d) disconnected" % client['id'])
# Called for each WS received message.
def message_received(client, wserver, message):
#if len(message) > 200:
# message = message[:200]+'..'
#if gstt.debug >0:
# print ("")
# print("WS Client(%d) said: %s" % (client['id'], message))
oscpath = message.split(" ")
#print "WS Client", client['id'], "said :", message, "splitted in an oscpath :", oscpath
if gstt.debug > 0:
print("WS Client", client['id'], "said :", message, "splitted in an oscpath :", oscpath)
PingAll()
message4plugin = False
# WS received Message is for a plugin ?
for plugin in list(gstt.plugins.keys()):
if oscpath[0].find(plugin) != -1:
message4plugin = True
#print(oscpath)
if plugins.Send(plugin, oscpath):
print("plugins sent incoming WS correctly to", plugin)
else:
print("plugins detected", plugin, "offline.")
# WS received message is an LJ command
if message4plugin == False:
if len(oscpath) == 1:
args[0] = "noargs"
#print "noargs command"
elif len(oscpath) > 1:
args[0] = str(oscpath[1])
#print "arg",oscpath[1]
commands.handler(oscpath[0].split("/"),args)
# if needed a loop back : WS Client -> server -> WS Client
#sendWSall("ws"+message)
def handle_timeout(self):
self.timed_out = True
def sendWSall(message):
#if gstt.debug >0:
#print("WS sending %s" % (message))
wserver.send_message_to_all(message)
'''
print ""
print "Midi Configuration"
midi.InConfig()
midi.OutConfig()
'''
# Creating a startup point list for each laser : 0,1,2,...
print("")
log.info("Creating startup point lists...")
if r.set("/clientkey","/pl/"+str(gstt.SceneNumber)+"/")==True:
print("sent clientkey : /pl/"+str(gstt.SceneNumber)+"/")
#pdb.set_trace()
for sceneid in range(0,gstt.MaxScenes+1):
print("Scene "+ str(sceneid))
#digit_points = font1.DigitsDots(sceneid,65280)
# Order all lasers to show the laser client number at startup -> tell all 4 laser process to USER PLs
for laserid in range(0,gstt.LaserNumber):
digit_points = font1.DigitsDots(laserid,65280)
if r.set('/pl/'+str(sceneid)+'/'+str(laserid), str(digit_points)) == True:
pass
#print( ast.literal_eval(r.get('/pl/'+str(sceneid)+'/'+str(laserid)).decode('ascii')))
#print("/pl/"+str(sceneid)+"/"+str(laserid)+" "+str(ast.literal_eval(r.get('/pl/'+str(sceneid)+'/'+str(laserid)).decode('ascii'))))
r.set('/order/'+str(laserid), 0)
#
# Starts one DAC process per requested Laser
#
def fff(name):
print()
print('HELLO', name ) #indent
print()
if __name__ == '__main__':
# Bug in 3.8.4 MacOS default multiprocessing start method is spawn. Spawn doesn't work properly
set_start_method('fork')
print("")
if gstt.LaserNumber == -1:
log.infog("Autodetected DACs mode")
commands.DAChecks()
print("dacs", gstt.dacs)
else:
log.infog("Resquested DACs mode")
lasernumber = gstt.LaserNumber -1
print("LaserNumber = ", gstt.LaserNumber)
log.info("Starting "+str(gstt.LaserNumber) + " DACs process...")
# Launch one process (a newdacp instance) by etherdream
dac_worker0= Process(target=dac_process, args=(0,0,))
dac_worker0.start()
print("Tracer | |
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
"""
Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
"""
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
"""
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@pulumi.input_type
class ControllerServiceNodePortsArgs:
def __init__(__self__, *,
http: Optional[pulumi.Input[str]] = None,
https: Optional[pulumi.Input[str]] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
if http is not None:
pulumi.set(__self__, "http", http)
if https is not None:
pulumi.set(__self__, "https", https)
if tcp is not None:
pulumi.set(__self__, "tcp", tcp)
if udp is not None:
pulumi.set(__self__, "udp", udp)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def https(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "https")
@https.setter
def https(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "https", value)
@property
@pulumi.getter
def tcp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "tcp")
@tcp.setter
def tcp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "tcp", value)
@property
@pulumi.getter
def udp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "udp")
@udp.setter
def udp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "udp", value)
@pulumi.input_type
class ControllerServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
enable_http: Optional[pulumi.Input[bool]] = None,
enable_https: Optional[pulumi.Input[bool]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
health_check_node_port: Optional[pulumi.Input[int]] = None,
internal: Optional[pulumi.Input['ControllerServiceInternalArgs']] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_ports: Optional[pulumi.Input['ControllerServiceNodePortsArgs']] = None,
ports: Optional[pulumi.Input['ControllerPortArgs']] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
target_ports: Optional[pulumi.Input['ControllerPortArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_ips: List of IP addresses at which the controller services are available Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
:param pulumi.Input[str] external_traffic_policy: Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
:param pulumi.Input[int] health_check_node_port: specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, the service controller allocates a port from your cluster’s NodePort range. Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
:param pulumi.Input['ControllerServiceInternalArgs'] internal: Enables an additional internal load balancer (besides the external one). Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
:param pulumi.Input[str] session_affinity: Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if enable_http is not None:
pulumi.set(__self__, "enable_http", enable_http)
if enable_https is not None:
pulumi.set(__self__, "enable_https", enable_https)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if health_check_node_port is not None:
pulumi.set(__self__, "health_check_node_port", health_check_node_port)
if internal is not None:
pulumi.set(__self__, "internal", internal)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if node_ports is not None:
pulumi.set(__self__, "node_ports", node_ports)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if target_ports is not None:
pulumi.set(__self__, "target_ports", target_ports)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="enableHttp")
def enable_http(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_http")
@enable_http.setter
def enable_http(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_http", value)
@property
@pulumi.getter(name="enableHttps")
def enable_https(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_https")
@enable_https.setter
def enable_https(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_https", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of IP addresses at which the controller services are available Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
"""
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
"""
Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
"""
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter(name="healthCheckNodePort")
def health_check_node_port(self) -> Optional[pulumi.Input[int]]:
"""
specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, the service controller allocates a port from your cluster’s NodePort range. Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
"""
return pulumi.get(self, "health_check_node_port")
@health_check_node_port.setter
def health_check_node_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_node_port", value)
@property
@pulumi.getter
def internal(self) -> Optional[pulumi.Input['ControllerServiceInternalArgs']]:
"""
Enables an additional internal load balancer (besides the external one). Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
"""
return pulumi.get(self, "internal")
@internal.setter
def internal(self, value: Optional[pulumi.Input['ControllerServiceInternalArgs']]):
pulumi.set(self, "internal", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="nodePorts")
def node_ports(self) -> Optional[pulumi.Input['ControllerServiceNodePortsArgs']]:
return pulumi.get(self, "node_ports")
@node_ports.setter
def node_ports(self, value: Optional[pulumi.Input['ControllerServiceNodePortsArgs']]):
pulumi.set(self, "node_ports", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input['ControllerPortArgs']]:
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input['ControllerPortArgs']]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
"""
Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="targetPorts")
def target_ports(self) -> Optional[pulumi.Input['ControllerPortArgs']]:
return pulumi.get(self, "target_ports")
@target_ports.setter
def target_ports(self, value: Optional[pulumi.Input['ControllerPortArgs']]):
pulumi.set(self, "target_ports", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerTcpArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations to be added to the tcp config configmap.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if config_map_namespace is not None:
pulumi.set(__self__, "config_map_namespace", config_map_namespace)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to the tcp config configmap.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="configMapNamespace")
def config_map_namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_namespace")
@config_map_namespace.setter
def config_map_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_namespace", value)
@pulumi.input_type
class ControllerUdpArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations to be added to the udp config configmap.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if config_map_namespace is not None:
pulumi.set(__self__, "config_map_namespace", config_map_namespace)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to the udp config configmap.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="configMapNamespace")
def config_map_namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_namespace")
@config_map_namespace.setter
def config_map_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_namespace", value)
@pulumi.input_type
class ControllerUpdateStrategyArgs:
def __init__(__self__, *,
rolling_update: Optional[pulumi.Input['ControllerRollingUpdateArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
if rolling_update is not None:
pulumi.set(__self__, "rolling_update", rolling_update)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="rollingUpdate")
def rolling_update(self) -> Optional[pulumi.Input['ControllerRollingUpdateArgs']]:
return pulumi.get(self, "rolling_update")
@rolling_update.setter
def rolling_update(self, value: Optional[pulumi.Input['ControllerRollingUpdateArgs']]):
pulumi.set(self, "rolling_update", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerArgs:
def __init__(__self__, *,
add_headers: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
admission_webhooks: Optional[pulumi.Input['ContollerAdmissionWebhooksArgs']] = None,
affinity: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']] = None,
allow_snippet_annotations: Optional[pulumi.Input[bool]] = None,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
autoscaling: Optional[pulumi.Input['AutoscalingArgs']] = None,
autoscaling_template: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]]] = None,
config: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
config_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
container_port: Optional[pulumi.Input['ControllerPortArgs']] | |
<reponame>sparkslabs/kamaelia_orig<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
============================
Multi-source Raw Audio Mixer
============================
A component that mixes raw audio data from an unknown number of sources, that
can change at any time. Audio data from each source is buffered until a minimum
threshold amount, before it is included in the mix. The mixing operation is a
simple addition. Values are not scaled down.
Example Usage
-------------
Mixing up to 3 sources of audio (sometimes a source is active, sometimes it
isn't)::
Graphline(
MIXER = RawAudioMixer( sample_rate=8000,
channels=1,
format="S16_LE",
readThreshold=1.0,
bufferingLimit=2.0,
readInterval=0.1),
),
A = pipeline( SometimesOn_RawAudioSource(), Entuple(prefix="A") ),
B = pipeline( SometimesOn_RawAudioSource(), Entuple(prefix="B") ),
C = pipeline( SometimesOn_RawAudioSource(), Entuple(prefix="C") ),
OUTPUT = RawSoundOutput( sample_rate=8000,
channels=1
format="S16_LE",
),
linkages = {
(A, "outbox") : (MIXER, "inbox"),
(B, "outbox") : (MIXER, "inbox"),
(C, "outbox") : (MIXER, "inbox"),
(MIXER, "outbox") : (OUTPUT, "inbox"),
},
).run()
Each source is buffered for 1 second before it is output. If more than 2 seconds
of audio are buffered, then samples are dropped.
How does it work?
-----------------
Send (id, raw-audio) tuples to RawAudioMixer's inbox. Where 'id' is any value
that uniquely distinguishes each source of audio.
RawAudioMixer buffers each source of audio, and mixes them together additively,
outputting the resulting stream of audio data.
Constructor arguments:
* sample_rate, channels, format
The format of audio to be mixed. The only format understood at the moment
is "S16_LE"
* readThreshold
number of seconds of audio that will be buffered before RawAudioMixer
starts mixing it into its output.
* bufferingLimit
maximum number of seconds of audio that will be buffered. If more piles up
then some audio will be lost.
* readInterval
number of seconds between each time RawAudioMixer outputs a chunk of audio
data.
RawAudioMixer buffers each source of audio separately. If the amount of audio in
any buffer exceeds the 'buffering limit' then the oldest samples buffered will
be lost.
When one or more buffered sources reaches the 'read threshold' then they are
mixed together and output. How often audio is output is determined by setting
the 'read Interval'.
Mixing is done additively and is *not* scaled down (ie. it is a sum() function,
not an average() ). Therefore, ensure that the sum of the sources being mixed
does not exceed the range of values that samples can take.
Why the buffering, thresholds, and read intervals? It is done this way so that
RawAudioMixer can mix without needing to know what sources of audio there are,
and whether they are running or stopped. It also enables RawAudioMixer to cope
with audio data arriving from different sources at different times.
You may introduce new audio sources at any time - simply send audio data tagged
with a new, unique identifier.
You may stop an audio source at any time too - simply stop sending audio data.
The existing buffered data will be output, until there is not left.
If there is not enough audio in any of the buffers (or perhaps there are no
sources of audio) then RawAudioMixer will not output anything, not even
'silence'.
If a shutdownMicroprocess or producerFinished message is received on this
component's "control" inbox this component will cease reading in data from any
audio sources. If it is currently outputting audio from any of its buffers, it
will continue to do so until these are empty. The component will then forward
on the shutdown message it was sent, out of its "signal" outbox and immediately
terminate.
TODO:
* Needs a timeout mechanism to discard very old data (otherwise this is
effectively a memory leak!)
- If an audio source sends less than the readThreshold amount of audio
data, then stops; then this data never gets flushed out.
"""
from Axon.Ipc import shutdownMicroprocess, producerFinished
import time as _time
# want pausing capability in threadedcomponent
import sys
from Axon.ThreadedComponent import threadedcomponent
class AudioBuffer(object):
"""\
AudioBuffer(activationThreshold, sizeLimit) -> new AudioBuffer component.
Doesn't 'activate' until threshold amount of data arrives. Until it does,
attempts to read data will just return nothing.
Keyword arguments:
-- activationThreshold - Point at which the buffer is deemed activated
-- sizeLimit - Filling the buffer beyond this causes samples to be dropped
"""
def __init__(self, activationThreshold, sizeLimit, silence):
super(AudioBuffer,self).__init__()
self.size = 0
self.sizeLimit = sizeLimit
self.activationThreshold = activationThreshold
self.buffer = []
self.active = False
self.silence = silence
def __len__(self):
# return how much data there is
return self.size
def append(self, newdata):
# add new data to the buffer, if there is too much, drop the oldest data
self.buffer.append(newdata)
self.size += len(newdata)
if self.size >= self.activationThreshold:
self.active = True
if self.size > self.sizeLimit:
self.drop(self.size - self.sizeLimit)
def drop(self,amount):
self.size -= amount
while amount > 0:
fragment = self.buffer[0]
if len(fragment) <= amount:
amount -= len(fragment)
del self.buffer[0]
else:
self.buffer[0] = fragment[amount:]
amount = 0
self.size -= amount
def pop(self, amount):
if not self.active:
return ""
data = []
padding_silence = ""
if amount > self.size:
padding_silence = self.silence * ((amount-self.size)/len(self.silence))
amount = self.size
self.size -= amount
while amount > 0:
fragment = self.buffer[0]
if len(fragment) <= amount:
data.append(fragment)
amount -= len(fragment)
del self.buffer[0]
else:
data.append(fragment[:amount])
self.buffer[0] = fragment[amount:]
amount = 0
data.append(padding_silence)
if self.size==0:
self.active = False
return "".join(data)
class RawAudioMixer(threadedcomponent):
"""\
RawAudioMixer([sample_rate][,channels][,format][,readThreshold][,bufferingLimit][,readInterval]) -> new RawAudioMixer component.
Mixes raw audio data from an unknown number of sources, that
can change at any time. Audio data from each source is buffered until a minimum
threshold amount, before it is included in the mix. The mixing operation is a
simple addition. Values are not scaled down.
Send (uniqueSourceIdentifier, audioData) tuples to the "inbox" inbox and
mixed audio data will be sent out of the "outbox" outbox.
Keyword arguments:
- sample_rate -- The sample rate of the audio in Hz (default=8000)
- channels -- Number of channels in the audio (default=1)
- format -- Sample format of the audio (default="S16_LE")
- readThreshold -- Duration to buffer audio before it starts being used in seconds (default=1.0)
- bufferingLimit -- Maximum buffer size for each audio source in seconds (default=2.0)
- readInterval -- Time between each output chunk in seconds (default=0.1)
"""
def __init__(self, sample_rate=8000, channels=1, format="S16_LE",
readThreshold=1.0, bufferingLimit=2.0, readInterval=0.1):
super(RawAudioMixer,self).__init__()
self.sample_rate = sample_rate
self.bufferingLimit = bufferingLimit
self.readThreshold = readThreshold
self.readInterval = readInterval
if format=="S16_LE":
self.mix = self.mix_S16_LE
self.quanta = channels*2 # bytes per sample
self.silence = "\0\0"
else:
raise ValueError("Format '"+str(format)+"' not (yet) supported. Sorry!")
def checkForShutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, (producerFinished,shutdownMicroprocess)):
return msg
else:
self.send(msg,"signal")
return False
def main(self):
buffers = {}
self.MAXBUFSIZE = int(self.sample_rate*self.bufferingLimit*self.quanta)
self.BUFTHRESHOLD = int(self.sample_rate*self.readThreshold*self.quanta)
READCHUNKSIZE = int(self.sample_rate*self.readInterval)*self.quanta
shutdown = False
while not shutdown:
# whilst none of the buffers are active (ie. full enough to start reading out data)
anyActive=False
while not anyActive and not shutdown:
while self.dataReady("inbox") and not anyActive:
activated = self.fillBuffer(buffers, self.recv("inbox"))
anyActive = anyActive or activated
shutdown = shutdown or self.checkForShutdown()
if shutdown:
break
if not anyActive:
self.pause()
# switch to reading from buffers (active) mode
nextReadTime = _time.time()
# dump out audio until all buffers are empty
while len(buffers) and not shutdown:
# if we're not shutting down, and its not yet time to output audio
# then read in more data into the buffers
while not shutdown and self.dataReady("inbox") and _time.time() < nextReadTime:
reading = self.fillBuffer(buffers, self.recv("inbox"))
now = _time.time()
if now | |
<gh_stars>1-10
import math
import os
import pickle
import typing
from concurrent.futures.thread import ThreadPoolExecutor
import torch
from sklearn import mixture
from sklearn.exceptions import NotFittedError
from torch import nn as nn
from torch.nn import functional as F
from config import WAVENET_LAYERS, WAVENET_BLOCKS, WAVENET_DILATION_CHANNELS, WAVENET_RESIDUAL_CHANNELS, \
WAVENET_SKIP_CHANNELS, \
WAVENET_END_CHANNELS, WAVENET_CLASSES, WAVENET_OUTPUT_LENGTH, WAVENET_KERNEL_SIZE, LSTM_HIDDEN_SIZE, \
LSTM_NUM_LAYERS, LSTM_DROPOUT_PROB, WAVEFORM_RANDOM_CROP_SEQUENCE_LENGTH, \
WNTF_TRANSFORMER_D_MODEL, WNTF_TRANSFORMER_N_HEAD, WNTF_TRANSFORMER_N_LAYERS, FEATURES_DATA_PATH, \
GMM_COMPONENT_NUMBER, \
GMM_FIT_FRAME_LIMIT, WNTF_WAVENET_LAYERS, WNTF_WAVENET_BLOCKS, \
WNLSTM_WAVENET_LAYERS, WNLSTM_WAVENET_BLOCKS, CPU_NUM_WORKERS, CONV1D_FEATURE_DIM, RNN1D_DOWNSAMPLER_OUT_CHANNELS, \
RNN1D_DROPOUT_PROB, RNN1D_HIDDEN_SIZE, RNN1D_LSTM_LAYERS, RNN1D_BIDIRECTIONAL, RNN1D_DOWNSAMPLER_STRIDE, \
RNN1D_DOWNSAMPLER_KERNEL_SIZE, RNN1D_DOWNSAMPLER_DILATION, CONV1D_KERNEL_SIZE, CONV1D_STRIDE, CONV1D_DILATION, \
LSTM_FC1_OUTPUT_DIM, LSTM_FC2_OUTPUT_DIM, CONV1D_FC1_OUTPUT_DIM, CONV1D_FC2_OUTPUT_DIM, WNTF_FC1_OUTPUT_DIM, \
WNTF_FC2_OUTPUT_DIM, WNTF_TRANSFORMER_DIM_FEEDFORWARD, LSTM_BIDIRECTIONALITY, WAVENET_FC1_OUTPUT_DIM, \
WAVENET_FC2_OUTPUT_DIM, RNN1D_MAX_SIZE, RNN1D_FC1_INPUT_SIZE, RNN1D_FC1_OUTPUT_SIZE, RNN1D_FC2_OUTPUT_SIZE, \
CONV1D_MAX_SIZE, WNTF_MAX_SIZE, WNLSTM_MAX_SIZE, WN_MAX_SIZE
from util.wavenet.wavenet_model import WaveNetModel
class GMMClassifier(nn.Module):
def __call__(self, *input, **kwargs) -> typing.Any:
"""
Hack to fix '(input: (Any, ...), kwargs: dict) -> Any' warning in PyCharm auto-complete.
:param input:
:param kwargs:
:return:
"""
return super().__call__(*input, **kwargs)
def __init__(self, num_classes, n_components=GMM_COMPONENT_NUMBER, fit_frame_limit=GMM_FIT_FRAME_LIMIT):
super(GMMClassifier, self).__init__()
self.fit_frame_limit = fit_frame_limit
self.gmm_list = []
for _ in range(num_classes):
# one gmm per singer as stated in Tsai; Fujihara; Mesaros et. al works on SID
self.gmm_list.append(
mixture.GaussianMixture(n_components=n_components)
)
def forward(self, x):
"""
Do a forward pass obtaining de score for each element.
If all elements are of the same length (same n_frames), the process is optimized by doing a flatten.
:param x: torch.Tensor MFCC of a track with shape (n_element, n_features, n_frames, )
:return: torch.Tensor The prediction for each track calculated as:
Singer_id = arg max_i [1/T sum^T_t=1 [log p(X_t / P_i)]]
where t is time frame and
i is the singer GMM
shape is (batch_size, n_classes)
"""
# asume that all the samples has equal frame number
x = x.permute(0, 2, 1) # shape (batch_size, n_features, n_frames) to (batch_size, n_frames, n_features)
batch_size = x.size(0)
n_frames = x.size(1)
n_features = x.size(2)
x = x.reshape(batch_size * n_frames,
n_features) # flatten 2 first dimensions into one (n_total_frames, n_features)
x = self.forward_score(x) # output shape is (n_classes, n_total_frames)
n_classes = x.size(0)
x = x.view(n_classes, batch_size, n_frames) # un_flatten to recover elementwise frames
x = torch.sum(x, dim=2) # sum the probability of each element's frames; shape is (n_classes, batch_size)
x = x.permute(1, 0) # swap axes to match signature
return x
def fit(self, x, y):
"""
Fit a sequence of frames of the same class into one of the
gmm.
:param x: Training data (batch_element, n_features, n_frames)
:param y: class id integer singleton tensor
:return:
"""
# sklearn GMM expects (n, n_features)
debug = True
x = x.permute(0, 2, 1)
x = x.reshape(-1, 20)
# print('Debug: y = {}'.format(y)) if debug else None
# print('Debug: x = {}'.format(x)) if debug else None
# print('Debug: gmm_list = {}'.format(self.gmm_list)) if debug else None
print('info: Fitting GMM...')
data = x[:self.fit_frame_limit, :]
print('Debug: Training data have shape {}'.format(data.shape)) if debug else None
self.gmm_list[y[0].item()].fit(data)
print('info: Done!')
def save_gmm(self, gmm_idx: int, path):
"""
Save indexed GMM on storage.
:param gmm_idx: Index of the GMM corresponding to the nominal label trained to predict.
:param path: Absolute path to the storage file to open.
:return:
"""
assert not os.path.isfile(path), 'error: Saving GMM instance noted that {} already exists'.format(path)
pickle.dump(self.gmm_list[gmm_idx], open(path, 'wb'))
def load_gmm(self, gmm_idx: int, path):
"""
Loaded GMM from storage to this instance given index.
May raise FileNotFoundException if path doesn't exists.
:param gmm_idx: Index of the GMM representing the corresponding nominal label trained to predict.
:param path: Absolute path to the storage file to open.
:return:
"""
self.gmm_list[gmm_idx] = pickle.load(open(path, 'rb'))
def forward_score(self, x):
"""
:param x: MFCC of a track with shape (frames, coefficients, )
:return: The Log Likelihood for each track and frame tested on every GMM (one per singer / class) as:
log likelihood = log p(X_t / P_i)]
where t is time frame and
i is the singer GMM
with shape: (n_classes/n_gmm, n_frames)
"""
def get_scores_from_gmm(gmm):
try:
return torch.from_numpy(gmm.score_samples(x)) # output is a (n_frames)
except NotFittedError:
return torch.zeros(n_frames, dtype=torch.double) + float('-inf')
n_frames = x.size(0)
# n_features = x.size(1)
with ThreadPoolExecutor(CPU_NUM_WORKERS) as e:
framewise_scores = e.map(get_scores_from_gmm, self.gmm_list)
y = torch.stack(list(framewise_scores)) # reshape to tensor (n_classes, n_frames)
return y
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x * math.sqrt(self.d_model)
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class Conv1DClassifier(nn.Module):
def __call__(self, *input, **kwargs) -> typing.Any:
"""
Hack to fix '(input: (Any, ...), kwargs: dict) -> Any' warning in PyCharm auto-complete.
:param input:
:param kwargs:
:return:
"""
return super().__call__(*input, **kwargs)
def __init__(self, num_classes):
super(Conv1DClassifier, self).__init__()
# first encoder
# neural audio embeddings
# captures local representations through convolutions
# note: x.shape is (bs, 1, ~80000)
n_layers = int(math.log2(CONV1D_FEATURE_DIM))
self.conv_layers = nn.ModuleList()
for layer_idx in range(n_layers):
self.conv_layers.append(
nn.Conv1d(
in_channels=2 ** layer_idx,
out_channels=2 ** (layer_idx + 1),
kernel_size=CONV1D_KERNEL_SIZE,
stride=CONV1D_STRIDE,
dilation=CONV1D_DILATION,
)
)
self.max_pool = nn.AdaptiveAvgPool1d(CONV1D_MAX_SIZE)
self.avg_pool = nn.AvgPool1d(CONV1D_MAX_SIZE)
# or
# x = torch.flatten(x, N) # shape n_data, encoder_out_dim, N to shape n_data, encoder_out_dim * N
conv_1d_input_dim = CONV1D_FEATURE_DIM
self.fc1 = nn.Linear(conv_1d_input_dim, CONV1D_FC1_OUTPUT_DIM)
self.fc2 = nn.Linear(CONV1D_FC1_OUTPUT_DIM, CONV1D_FC2_OUTPUT_DIM)
self.fc3 = nn.Linear(CONV1D_FC2_OUTPUT_DIM, num_classes)
def forward(self, x):
# assert x.shape is (BS, In_CHNL, ~80000) --> it is!
# assert In_CHNL is 1 or 2 --> it is 1.
# nn.Conv1D: (N, Cin, Lin) -> (N, Cout, Lout)
for conv_layer in self.conv_layers:
x = conv_layer(x)
# AdaptativeMaxPooling
# Max_pool expected input is (N, Cout, Lout)
x = self.max_pool(x)
x = self.avg_pool(x)
x = x.squeeze(2)
# Classification
# Expects shape (N_data, fc1_input_size)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class RNNClassifier(nn.Module):
def __call__(self, *input, **kwargs) -> typing.Any:
"""
Hack to fix '(input: (Any, ...), kwargs: dict) -> Any' warning in PyCharm auto-complete.
:param input:
:param kwargs:
:return:
"""
return super().__call__(*input, **kwargs)
def __init__(self, num_classes):
super(RNNClassifier, self).__init__()
# first encoder
# neural audio embeddings
# captures local representations through convolutions
# note: x.shape is (bs, 1, ~80000)
n_layers = int(math.log2(RNN1D_DOWNSAMPLER_OUT_CHANNELS))
self.conv_layers = nn.ModuleList()
for layer_idx in range(n_layers):
# assumes input is monoaural i.e. shape is (bs, 1, len)
self.conv_layers.append(
nn.Conv1d(
in_channels=2 ** layer_idx,
out_channels=2 ** (layer_idx + 1),
kernel_size=RNN1D_DOWNSAMPLER_KERNEL_SIZE,
stride=RNN1D_DOWNSAMPLER_STRIDE,
dilation=RNN1D_DOWNSAMPLER_DILATION
)
)
self.rnn = nn.LSTM(
input_size=RNN1D_DOWNSAMPLER_OUT_CHANNELS,
hidden_size=RNN1D_HIDDEN_SIZE,
num_layers=RNN1D_LSTM_LAYERS,
dropout=RNN1D_DROPOUT_PROB,
bidirectional=RNN1D_BIDIRECTIONAL
)
self.max_pool = nn.AdaptiveAvgPool1d(RNN1D_MAX_SIZE)
self.avg_pool = nn.AvgPool1d(RNN1D_MAX_SIZE)
self.fc1 = nn.Linear(RNN1D_FC1_INPUT_SIZE, RNN1D_FC1_OUTPUT_SIZE)
self.fc2 = nn.Linear(RNN1D_FC1_OUTPUT_SIZE, RNN1D_FC2_OUTPUT_SIZE)
self.fc3 = nn.Linear(RNN1D_FC2_OUTPUT_SIZE, num_classes)
def forward(self, x):
# assert x.shape is (BS, In_CHNL, ~80000) --> it is!
# assert In_CHNL is 1 or 2 --> it is 1.
# nn.Conv1D: (N, Cin, Lin) -> (N, Cout, Lout)
for conv_layer in self.conv_layers:
x = conv_layer(x)
# question for the reader:
# Why PyTorch have different input shape for CNNs (N, Cin, Lin) compared to RNNs (Lin, N, Cin)
x = x.transpose(0, 2).transpose(1, 2) # (N, Cout, Lout) -> (Lout, Cout, N) -> (Lout, N, Cout)
self.rnn.flatten_parameters()
x, _ = self.rnn(x) # shape n_sequence, n_data, lstm_hidden_size (dropped _ is (h_n, c_n))
x = x.transpose(1, 0) # (Lout, N, Cout) -> (N, Lout, Cout)
x = x.transpose(1, 2) # (N, Lout, Cout) -> (N, Cout, Lout)
# AdaptativeMaxPooling
# Max_pool expected input is (N, Cout, Lout)
x = self.max_pool(x)
x = self.avg_pool(x)
x = x.squeeze(2)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class WaveNetLSTMClassifier(nn.Module):
def __call__(self, *input, **kwargs) -> typing.Any:
"""
Hack to fix '(input: (Any, ...), kwargs: dict) -> Any' warning in PyCharm auto-complete.
:param input:
:param kwargs:
:return:
"""
return super().__call__(*input, **kwargs)
def __init__(self, num_classes):
super(WaveNetLSTMClassifier, self).__init__()
# first encoder
# neural audio embeddings
# captures local representations through convolutions
self.wavenet = WaveNetModel(
WNLSTM_WAVENET_LAYERS,
WNLSTM_WAVENET_BLOCKS,
WAVENET_DILATION_CHANNELS,
WAVENET_RESIDUAL_CHANNELS,
WAVENET_SKIP_CHANNELS,
WAVENET_END_CHANNELS,
WAVENET_CLASSES,
WAVENET_OUTPUT_LENGTH,
WAVENET_KERNEL_SIZE
)
# Conv1d to reduce sequence length from 180k to 2k
stride = 64
self.conv1d_1 = nn.Conv1d(
in_channels=WAVENET_END_CHANNELS,
out_channels=256,
kernel_size=4,
stride=stride,
dilation=16
)
self.conv1d_list = [self.conv1d_1, ]
self.enc_lstm = nn.LSTM(
256,
LSTM_HIDDEN_SIZE,
LSTM_NUM_LAYERS,
bidirectional=LSTM_BIDIRECTIONALITY,
dropout=LSTM_DROPOUT_PROB)
self.max_pool = nn.AdaptiveAvgPool1d(WNLSTM_MAX_SIZE)
self.avg_pool = nn.AvgPool1d(WNLSTM_MAX_SIZE)
| |
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: atts_assign.py
#
# Tests: Behavior of assignment for attribute objects. Ensures good cases
# succeed and bad cases fail with specific python exceptions. Tests variety
# of types present in members of VisIt attribute objects. Tests both
# assignment usage (e.g. atts.memberName=...) and setter function usage
# (e.g. atts.SetMemberName(...))
#
# <NAME>, Tue Jun 8 15:51:59 PDT 2021
#
# Modifications:
# <NAME>, Tue July 27, 2021
# Assigning Max32BitInt+1 to int on Windows causes TypeError, not
# ValueError, so change expected results in those cases.
#
# ----------------------------------------------------------------------------
import copy, io, sys
# Some useful global variables
X = [2,4,6]
Max32BitInt = 2147483647
Max32BitInt1 = Max32BitInt+1
MaxIntAs32BitFloat = 16777216
MaxIntAs32BitFloat1 = MaxIntAs32BitFloat+1
MaxIntAs64BitFloat = 9007199254740992
MaxIntAs64BitFloat1 = MaxIntAs64BitFloat+1
Max32BitFloat = 3.402823E+38
Max32BitFloatA = 3.402820E+37 # One order mag down from Max
Max32BitFloatB = 3.402823E+39 # One order mag up from Max
Min32BitFloat = 1.175494E-38
# version of repr that strips parens at end
def repr2(s):
return repr(s).lstrip('(').rstrip(')')
def TestAssignmentToTuple():
TestSection('Assignment to tuple, "point1", member (of CylinderAttributes())')
ca = CylinderAttributes()
# Non-existent member name 'point'
try:
ca.point = 1,2,3
TestFOA('ca.point=1,2,3', LINE())
except NameError:
TestPOA('ca.point=1,2,3')
pass
except:
TestFOA('ca.point=1,2,3', LINE())
pass
# Non-existent member name 'point'
try:
ca.SetPoint(1,2,3)
TestFOA('ca.SetPoint(1,2,3)', LINE())
except ValueError:
TestPOA('ca.SetPoint(1,2,3)')
pass
except:
TestFOA('ca.SetPoint(1,2,3)', LINE())
pass
# CSV too short
try:
ca.point1 = 1,2
TestFOA('ca.point1=1,2', LINE())
except TypeError:
TestPOA('ca.point1=1,2')
pass
except:
TestFOA('ca.point1=1,2', LINE())
pass
# CSV too long
try:
ca.point1 = 1,2,3,4
TestFOA('ca.point1=1,2,3,4', LINE())
except TypeError:
TestPOA('ca.point1=1,2,3,4')
pass
except:
TestFOA('ca.point1=1,2,3,4', LINE())
pass
# The above cases can't be put in a loop. Put remaining cases in a loop
fails = [(1,2), (1,2,3,4), '123', (1,1+2j,3), (1,X,3), (1,'b',3), (1,None,3)]
for i in range(len(fails)):
try:
ca.point1 = fails[i]
TestFOA('ca.point1=%s'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('ca.point1=%s'%repr2(fails[i]))
pass
except:
TestFOA('ca.point1=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetPoint1(fails[i])
TestFOA('ca.SetPoint1(%s)'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('ca.SetPoint1(%s)'%repr2(fails[i]))
pass
except:
TestFOA('ca.SetPoint1(%s)'%repr2(fails[i]), LINE())
pass
try:
ca.point1 = 1,2,3
TestPOA('ca.point1=1,2,3')
except:
TestFOA('ca.point1=1,2,3', LINE())
pass
works = [(1,2,3), (1.1,2.2,3.3), tuple(X)]
for i in range(len(works)):
try:
ca.point1 = works[i]
TestPOA('ca.point1=%s'%repr2(works[i]))
except:
TestFOA('ca.point1=%s'%repr2(works[i]), LINE())
pass
for i in range(len(works)):
try:
ca.SetPoint1(*works[i])
TestPOA('ca.SetPoint1(%s)'%repr2(works[i]))
except:
TestFOA('ca.SetPoint1(%s)'%repr2(works[i]), LINE())
pass
def TestAssignmentToBool():
TestSection('Assignment to bool member, "inverse", (of CylinderAttributes())')
ca = CylinderAttributes()
try:
ca.inverse = 1,2
TestFOA('ca.inverse=1,2', LINE())
except TypeError:
TestPOA('ca.inverse=1,2')
pass
except:
TestFOA('ca.inverse=1,2', LINE())
pass
fails = [ '123', 1+2j, X, None, 5]
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError]
for i in range(len(fails)):
try:
ca.inverse = fails[i]
TestFOA('ca.inverse=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ca.inverse=%s'%repr(fails[i]))
pass
except:
TestFOA('ca.inverse=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetInverse(fails[i])
TestFOA('ca.SetInverse(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ca.SetInverse(%s)'%repr(fails[i]))
pass
except:
TestFOA('ca.SetInverse(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, True, False]
for i in range(len(works)):
try:
ca.inverse = works[i]
TestPOA('ca.inverse=%s'%repr(works[i]))
except:
TestFOA('ca.inverse=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ca.SetInverse(works[i])
TestPOA('ca.SetInverse(%s)'%repr(works[i]))
except:
TestFOA('ca.SetInverse(%s)'%repr(works[i]), LINE())
def TestAssignmentToInt():
TestSection('Assignment to int member, "samplesPerRay", (of VolumeAttributes())')
va = VolumeAttributes()
try:
va.samplesPerRay = 1,2
TestFOA('va.samplesPerRay=1,2', LINE())
except TypeError:
TestPOA('va.samplesPerRay=1,2')
pass
except:
TestFOA('va.samplesPerRay=1,2', LINE())
pass
fails = [ '123', 1+2j, None, X, Max32BitInt1]
if sys.platform.startswith("win"):
excpts = [TypeError, TypeError, TypeError, TypeError, TypeError]
else:
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError]
for i in range(len(fails)):
try:
va.samplesPerRay = fails[i]
TestFOA('va.samplesPerRay=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('va.samplesPerRay=%s'%repr(fails[i]))
pass
except:
TestFOA('va.samplesPerRay=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
va.SetSamplesPerRay(fails[i])
TestFOA('va.SetSamplesPerRay(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('va.SetSamplesPerRay(%s)'%repr(fails[i]))
pass
except:
TestFOA('va.SetSamplesPerRay(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, -1, 5, True, False, Max32BitInt]
for i in range(len(works)):
try:
va.samplesPerRay = works[i]
TestPOA('va.samplesPerRay=%s'%repr(works[i]))
except:
TestFOA('va.samplesPerRay=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
va.SetSamplesPerRay(works[i])
TestPOA('va.SetSamplesPerRay(%s)'%repr(works[i]))
except:
TestFOA('va.SetSamplesPerRay(%s)'%repr(works[i]), LINE())
def TestAssignmentToFloat():
TestSection('Assignment to float member, "opacityAttenuation", (of VolumeAttributes())')
va = VolumeAttributes()
try:
va.opacityAttenuation = 1,2
TestFOA('va.opacityAttenuation=1,2', LINE())
except TypeError:
TestPOA('va.opacityAttenuation=1,2')
pass
except:
TestFOA('va.opacityAttenuation=1,2', LINE())
pass
fails = [ '123', 1+2j, None, X, Max32BitFloatB]
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError]
for i in range(len(fails)):
try:
va.opacityAttenuation = fails[i]
TestFOA('va.opacityAttenuation=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('va.opacityAttenuation=%s'%repr(fails[i]))
pass
except:
TestFOA('va.opacityAttenuation=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
va.SetOpacityAttenuation(fails[i])
TestFOA('va.SetOpacityAttenuation(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('va.SetOpacityAttenuation(%s)'%repr(fails[i]))
pass
except:
TestFOA('va.SetOpacityAttenuation(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, -1, 0.3, Max32BitFloatA, True, False]
for i in range(len(works)):
try:
va.opacityAttenuation = works[i]
TestPOA('va.opacityAttenuation=%s'%repr(works[i]))
except:
TestFOA('va.opacityAttenuation=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
va.SetOpacityAttenuation(works[i])
TestPOA('va.SetOpacityAttenuation(%s)'%repr(works[i]))
except:
TestFOA('va.SetOpacityAttenuation(%s)'%repr(works[i]), LINE())
def TestAssignmentToDouble():
TestSection('Assignment to double member, "radius", (of CylinderAttributes())')
ca = CylinderAttributes()
try:
ca.radius = 1,2
TestFOA('ca.radius=1,2', LINE())
except TypeError:
TestPOA('ca.radius=1,2')
pass
except:
TestFOA('ca.radius=1,2', LINE())
pass
fails = ['123', 1+2j, None, X]
for i in range(len(fails)):
try:
ca.radius = fails[i]
TestFOA('ca.radius=%s'%repr(fails[i]), LINE())
except TypeError:
TestPOA('ca.radius=%s'%repr(fails[i]))
pass
except:
TestFOA('ca.radius=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetRadius(fails[i])
TestFOA('ca.SetRadius(%s)'%repr(fails[i]), LINE())
except TypeError:
TestPOA('ca.SetRadius(%s)'%repr(fails[i]))
pass
except:
TestFOA('ca.SetRadius(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, -1, 5.5, 1.1E-479, 1.1E+479, True, False]
for i in range(len(works)):
try:
ca.radius = works[i]
TestPOA('ca.radius=%s'%repr(works[i]))
except:
TestFOA('ca.radius=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ca.SetRadius(works[i])
TestPOA('ca.SetRadius(%s)'%repr(works[i]))
except:
TestFOA('ca.SetRadius(%s)'%repr(works[i]), LINE())
def TestAssignmentToString():
TestSection('Assignment to string member, "designator", (of CurveAttributes())')
ca = CurveAttributes()
try:
ca.designator = "123","abc"
TestFOA('ca.designator="123","abc"', LINE())
except TypeError:
TestPOA('ca.designator="123","abc"')
pass
except:
TestFOA('ca.designator="123","abc"', LINE())
pass
fails = [0, 1, 1.1, 1+2j, None, X]
for i in range(len(fails)):
try:
ca.designator = fails[i]
TestFOA('ca.designator=%s'%repr(fails[i]), LINE())
except TypeError:
TestPOA('ca.designator=%s'%repr(fails[i]))
pass
except:
TestFOA('ca.designator=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ca.SetDesignator(fails[i])
TestFOA('ca.SetDesignator(%s)'%repr(fails[i]), LINE())
except TypeError:
TestPOA('ca.SetDesignator(%s)'%repr(fails[i]))
pass
except:
TestFOA('ca.SetDesignator(%s)'%repr(fails[i]), LINE())
pass
works = ['123', 'abc', '']
for i in range(len(works)):
try:
ca.designator = works[i]
TestPOA('ca.designator=%s'%repr(works[i]))
except:
TestFOA('ca.designator=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ca.SetDesignator(works[i])
TestPOA('ca.SetDesignator(%s)'%repr(works[i]))
except:
TestFOA('ca.SetDesignator(%s)'%repr(works[i]), LINE())
def TestAssignmentToGlyphType():
TestSection('Assignment to GlyphType member, "pointType", (of MeshAttributes())')
ma = MeshAttributes()
# Test direct assignment with = operator
try:
ma.pointType = 1
TestPOA('ma.pointType=1')
except:
TestFOA('ma.pointType=1', LINE())
pass
fails = [ '123', 1+2j, None, X, -1, 123123123123123123123123123123]
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError, TypeError]
for i in range(len(fails)):
try:
ma.pointType = fails[i]
TestFOA('ma.pointType=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ma.pointType=%s'%repr(fails[i]))
pass
except:
TestFOA('ma.pointType=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ma.SetPointType(fails[i])
TestFOA('ma.SetPointType(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ma.SetPointType(%s)'%repr(fails[i]))
pass
except:
TestFOA('ma.SetPointType(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, 5, True, False, ma.Point]
for i in range(len(works)):
try:
ma.pointType = works[i]
TestPOA('ma.pointType=%s'%repr(works[i]))
except:
TestFOA('ma.pointType=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ma.SetPointType(works[i])
TestPOA('ma.SetPointType(%s)'%repr(works[i]))
except:
TestFOA('ma.SetPointType(%s)'%repr(works[i]), LINE())
def TestAssignmentToEnum():
TestSection('Assignment to Enum member, "smoothingLevel", (of MeshAttributes())')
ma = MeshAttributes()
# Test direct assignment with = operator
try:
ma.smoothingLevel = 1
TestPOA('ma.smoothingLevel=1')
except:
TestFOA('ma.smoothingLevel=1', LINE())
pass
fails = [ '123', 1+2j, None, X, -1, 123123123, 123123123123123123123123123123]
excpts = [TypeError, TypeError, TypeError, TypeError, ValueError, ValueError, TypeError]
for i in range(len(fails)):
try:
ma.smoothingLevel = fails[i]
TestFOA('ma.smoothingLevel=%s'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ma.smoothingLevel=%s'%repr(fails[i]))
pass
except:
TestFOA('ma.smoothingLevel=%s'%repr(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
ma.SetSmoothingLevel(fails[i])
TestFOA('ma.SetSmoothingLevel(%s)'%repr(fails[i]), LINE())
except excpts[i]:
TestPOA('ma.SetSmoothingLevel(%s)'%repr(fails[i]))
pass
except:
TestFOA('ma.SetSmoothingLevel(%s)'%repr(fails[i]), LINE())
pass
works = [0, 1, 2, True, False, ma.Fast]
for i in range(len(works)):
try:
ma.smoothingLevel = works[i]
TestPOA('ma.smoothingLevel=%s'%repr(works[i]))
except:
TestFOA('ma.smoothingLevel=%s'%repr(works[i]), LINE())
for i in range(len(works)):
try:
ma.SetSmoothingLevel(works[i])
TestPOA('ma.SmoothingLevel(%s)'%repr(works[i]))
except:
TestFOA('ma.SetSmoothingLevel(%s)'%repr(works[i]), LINE())
def TestAssignmentToUCharVector():
TestSection('Assignment to ucharVector member, "changedColors", (of MultiCurveAttributes())')
mca = MultiCurveAttributes()
# Test direct assignment with = operator
try:
mca.changedColors = 1,2,3
TestPOA('mca.changedColors=1,2,3')
except:
TestFOA('mca.changedColors=1,2,3', LINE())
pass
fails = [(1,123123123123123123123123123123,3), (1,1+2j,3), (1,X,3), (1,'b',3), (1,None,3), ('123',)]
for i in range(len(fails)):
try:
mca.changedColors = fails[i]
TestFOA('mca.changedColors=%s'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('mca.changedColors=%s'%repr2(fails[i]))
pass
except:
TestFOA('mca.changedColors=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
mca.SetChangedColors(*fails[i])
TestFOA('mca.SetChangedColors(%s)'%repr2(fails[i]), LINE())
except TypeError:
TestPOA('mca.SetChangedColors(%s)'%repr2(fails[i]))
pass
except:
TestFOA('mca.SetChangedColors(%s)'%repr2(fails[i]), LINE())
pass
works = [(1,2,3), tuple(X), (1,True,3), (1,False,3)]
for i in range(len(works)):
try:
mca.changedColors = works[i]
TestPOA('mca.changedColors=%s'%repr2(works[i]))
except:
TestFOA('mca.changedColors=%s'%repr2(works[i]), LINE())
for i in range(len(works)):
try:
mca.SetChangedColors(*works[i])
TestPOA('mca.SetChangedColors(%s)'%repr2(works[i]))
except:
TestFOA('mca.SetChangedColors(%s)'%repr2(works[i]), LINE())
def TestAssignmentToIntVector():
TestSection('Assignment to intVector member, "index", (of OnionPeelAttributes())')
opa = OnionPeelAttributes()
# Test direct assignment with = operator
try:
opa.index = 1,2,3
TestPOA('opa.index=1,2,3')
except:
TestFOA('opa.index=1,2,3', LINE())
pass
fails = [(Max32BitInt1,), (1+2j,), ('b',), (None,), (1,Max32BitInt1,3),
(1,1+2j,3), (1,X,3), (1,'b',3), (1,None,3)]
if sys.platform.startswith("win"):
excpts = [TypeError, TypeError, TypeError, TypeError, TypeError,
TypeError, TypeError, TypeError, TypeError]
else:
excpts = [ValueError, TypeError, TypeError, TypeError, ValueError,
TypeError, TypeError, TypeError, TypeError]
for i in range(len(fails)):
try:
opa.index = fails[i]
TestFOA('opa.index=%s'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('opa.index=%s'%repr2(fails[i]))
pass
except:
TestFOA('opa.index=%s'%repr2(fails[i]), LINE())
pass
for i in range(len(fails)):
try:
opa.SetIndex(*fails[i])
TestFOA('opa.SetIndex(%s)'%repr2(fails[i]), LINE())
except excpts[i]:
TestPOA('opa.SetIndex(%s)'%repr2(fails[i]))
pass
except:
TestFOA('opa.SetIndex(%s)'%repr2(fails[i]), LINE())
pass
works = [(1,2,3), X, tuple(X), (1,True,3), (1,False,3), (1,Max32BitInt,3)]
for i in range(len(works)):
try:
opa.index = works[i]
TestPOA('opa.index=%s'%repr2(works[i]))
except:
TestFOA('opa.index=%s'%repr2(works[i]), LINE())
for | |
<filename>test/unit_tests/braket/aws/test_aws_quantum_job.py<gh_stars>0
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import json
import logging
import os
import tarfile
import tempfile
from unittest.mock import Mock, patch
import pytest
from botocore.exceptions import ClientError
from braket.aws import AwsQuantumJob, AwsSession
@pytest.fixture
def aws_session(quantum_job_arn, job_region):
_aws_session = Mock(spec=AwsSession)
_aws_session.create_job.return_value = quantum_job_arn
_aws_session.default_bucket.return_value = "default-bucket-name"
_aws_session.get_default_jobs_role.return_value = "default-role-arn"
_aws_session.construct_s3_uri.side_effect = (
lambda bucket, *dirs: f"s3://{bucket}/{'/'.join(dirs)}"
)
def fake_copy_session(region):
_aws_session.region = region
return _aws_session
_aws_session.copy_session.side_effect = fake_copy_session
_aws_session.list_keys.return_value = ["job-path/output/model.tar.gz"]
_aws_session.region = "us-test-1"
_braket_client_mock = Mock(meta=Mock(region_name=job_region))
_aws_session.braket_client = _braket_client_mock
return _aws_session
@pytest.fixture
def generate_get_job_response():
def _get_job_response(**kwargs):
response = {
"ResponseMetadata": {
"RequestId": "d223b1a0-ee5c-4c75-afa7-3c29d5338b62",
"HTTPStatusCode": 200,
},
"algorithmSpecification": {
"scriptModeConfig": {
"entryPoint": "my_file:start_here",
"s3Uri": "s3://amazon-braket-jobs/job-path/my_file.py",
}
},
"checkpointConfig": {
"localPath": "/opt/omega/checkpoints",
"s3Uri": "s3://amazon-braket-jobs/job-path/checkpoints",
},
"createdAt": datetime.datetime(2021, 6, 28, 21, 4, 51),
"deviceConfig": {
"device": "arn:aws:braket:::device/qpu/rigetti/Aspen-10",
},
"hyperParameters": {
"foo": "bar",
},
"inputDataConfig": [
{
"channelName": "training_input",
"dataSource": {
"s3DataSource": {
"s3Uri": "s3://amazon-braket-jobs/job-path/input",
}
},
}
],
"instanceConfig": {
"instanceCount": 1,
"instanceType": "ml.m5.large",
"volumeSizeInGb": 1,
},
"jobArn": "arn:aws:braket:us-west-2:875981177017:job/job-test-20210628140446",
"jobName": "job-test-20210628140446",
"outputDataConfig": {"s3Path": "s3://amazon-braket-jobs/job-path/data"},
"roleArn": "arn:aws:iam::875981177017:role/AmazonBraketJobRole",
"status": "RUNNING",
"stoppingCondition": {"maxRuntimeInSeconds": 1200},
}
response.update(kwargs)
return response
return _get_job_response
@pytest.fixture
def generate_cancel_job_response():
def _cancel_job_response(**kwargs):
response = {
"ResponseMetadata": {
"RequestId": "857b0893-2073-4ad6-b828-744af8400dfe",
"HTTPStatusCode": 200,
},
"cancellationStatus": "CANCELLING",
"jobArn": "arn:aws:braket:us-west-2:875981177017:job/job-test-20210628140446",
}
response.update(kwargs)
return response
return _cancel_job_response
@pytest.fixture
def quantum_job_name():
return "job-test-20210628140446"
@pytest.fixture
def job_region():
return "us-west-2"
@pytest.fixture
def quantum_job_arn(quantum_job_name, job_region):
return f"arn:aws:braket:{job_region}:875981177017:job/{quantum_job_name}"
@pytest.fixture
def quantum_job(quantum_job_arn, aws_session):
return AwsQuantumJob(quantum_job_arn, aws_session)
def test_equality(quantum_job_arn, aws_session, job_region):
new_aws_session = Mock(braket_client=Mock(meta=Mock(region_name=job_region)))
quantum_job_1 = AwsQuantumJob(quantum_job_arn, aws_session)
quantum_job_2 = AwsQuantumJob(quantum_job_arn, aws_session)
quantum_job_3 = AwsQuantumJob(quantum_job_arn, new_aws_session)
other_quantum_job = AwsQuantumJob(
"arn:aws:braket:us-west-2:875981177017:job/other-job", aws_session
)
non_quantum_job = quantum_job_1.arn
assert quantum_job_1 == quantum_job_2
assert quantum_job_1 == quantum_job_3
assert quantum_job_1 is not quantum_job_2
assert quantum_job_1 is not quantum_job_3
assert quantum_job_1 is quantum_job_1
assert quantum_job_1 != other_quantum_job
assert quantum_job_1 != non_quantum_job
def test_hash(quantum_job):
assert hash(quantum_job) == hash(quantum_job.arn)
@pytest.mark.parametrize(
"arn, expected_region",
[
("arn:aws:braket:us-west-2:875981177017:job/job-name", "us-west-2"),
("arn:aws:braket:us-west-1:1234567890:job/job-name", "us-west-1"),
],
)
@patch("braket.aws.aws_quantum_job.boto3.Session")
@patch("braket.aws.aws_quantum_job.AwsSession")
def test_quantum_job_constructor_default_session(
aws_session_mock, mock_session, arn, expected_region
):
mock_boto_session = Mock()
aws_session_mock.return_value = Mock()
mock_session.return_value = mock_boto_session
job = AwsQuantumJob(arn)
mock_session.assert_called_with(region_name=expected_region)
aws_session_mock.assert_called_with(boto_session=mock_boto_session)
assert job.arn == arn
assert job._aws_session == aws_session_mock.return_value
@pytest.mark.xfail(raises=ValueError)
def test_quantum_job_constructor_invalid_region(aws_session):
arn = "arn:aws:braket:unknown-region:875981177017:job/quantum_job_name"
AwsQuantumJob(arn, aws_session)
@patch("braket.aws.aws_quantum_job.boto3.Session")
def test_quantum_job_constructor_explicit_session(mock_session, quantum_job_arn, job_region):
aws_session_mock = Mock(braket_client=Mock(meta=Mock(region_name=job_region)))
job = AwsQuantumJob(quantum_job_arn, aws_session_mock)
assert job._aws_session == aws_session_mock
assert job.arn == quantum_job_arn
mock_session.assert_not_called()
def test_metadata(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):
get_job_response_running = generate_get_job_response(status="RUNNING")
aws_session.get_job.return_value = get_job_response_running
assert quantum_job.metadata() == get_job_response_running
aws_session.get_job.assert_called_with(quantum_job_arn)
get_job_response_completed = generate_get_job_response(status="COMPLETED")
aws_session.get_job.return_value = get_job_response_completed
assert quantum_job.metadata() == get_job_response_completed
aws_session.get_job.assert_called_with(quantum_job_arn)
assert aws_session.get_job.call_count == 2
def test_metadata_caching(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):
get_job_response_running = generate_get_job_response(status="RUNNING")
aws_session.get_job.return_value = get_job_response_running
assert quantum_job.metadata(True) == get_job_response_running
get_job_response_completed = generate_get_job_response(status="COMPLETED")
aws_session.get_job.return_value = get_job_response_completed
assert quantum_job.metadata(True) == get_job_response_running
aws_session.get_job.assert_called_with(quantum_job_arn)
assert aws_session.get_job.call_count == 1
def test_state(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):
state_1 = "RUNNING"
get_job_response_running = generate_get_job_response(status=state_1)
aws_session.get_job.return_value = get_job_response_running
assert quantum_job.state() == state_1
aws_session.get_job.assert_called_with(quantum_job_arn)
state_2 = "COMPLETED"
get_job_response_completed = generate_get_job_response(status=state_2)
aws_session.get_job.return_value = get_job_response_completed
assert quantum_job.state() == state_2
aws_session.get_job.assert_called_with(quantum_job_arn)
assert aws_session.get_job.call_count == 2
def test_state_caching(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):
state_1 = "RUNNING"
get_job_response_running = generate_get_job_response(status=state_1)
aws_session.get_job.return_value = get_job_response_running
assert quantum_job.state(True) == state_1
state_2 = "COMPLETED"
get_job_response_completed = generate_get_job_response(status=state_2)
aws_session.get_job.return_value = get_job_response_completed
assert quantum_job.state(True) == state_1
aws_session.get_job.assert_called_with(quantum_job_arn)
assert aws_session.get_job.call_count == 1
@pytest.fixture()
def result_setup(quantum_job_name):
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
file_path = "results.json"
with open(file_path, "w") as write_file:
write_file.write(
json.dumps(
{
"braketSchemaHeader": {
"name": "braket.jobs_data.persisted_job_data",
"version": "1",
},
"dataDictionary": {"converged": True, "energy": -0.2},
"dataFormat": "plaintext",
}
)
)
with tarfile.open("model.tar.gz", "w:gz") as tar:
tar.add(file_path, arcname=os.path.basename(file_path))
yield
result_dir = f"{os.getcwd()}/{quantum_job_name}"
if os.path.exists(result_dir):
os.remove(f"{result_dir}/results.json")
os.rmdir(f"{result_dir}/")
if os.path.isfile("model.tar.gz"):
os.remove("model.tar.gz")
os.chdir("..")
@pytest.mark.parametrize("state", AwsQuantumJob.TERMINAL_STATES)
def test_results_when_job_is_completed(
quantum_job, aws_session, generate_get_job_response, result_setup, state
):
expected_saved_data = {"converged": True, "energy": -0.2}
get_job_response_completed = generate_get_job_response(status=state)
quantum_job._aws_session.get_job.return_value = get_job_response_completed
actual_data = quantum_job.result()
job_metadata = quantum_job.metadata(True)
s3_path = job_metadata["outputDataConfig"]["s3Path"]
output_bucket_uri = f"{s3_path}/output/model.tar.gz"
quantum_job._aws_session.download_from_s3.assert_called_with(
s3_uri=output_bucket_uri, filename="model.tar.gz"
)
assert actual_data == expected_saved_data
def test_download_result_when_job_is_running(
quantum_job, aws_session, generate_get_job_response, result_setup
):
poll_timeout_seconds, poll_interval_seconds, state = 1, 0.5, "RUNNING"
get_job_response_completed = generate_get_job_response(status=state)
aws_session.get_job.return_value = get_job_response_completed
job_metadata = quantum_job.metadata(True)
with pytest.raises(
TimeoutError,
match=f"{job_metadata['jobName']}: Polling for job completion "
f"timed out after {poll_timeout_seconds} seconds.",
):
quantum_job.download_result(
poll_timeout_seconds=poll_timeout_seconds, poll_interval_seconds=poll_interval_seconds
)
def test_download_result_when_extract_path_not_provided(
quantum_job, generate_get_job_response, aws_session, result_setup
):
state = "COMPLETED"
expected_saved_data = {"converged": True, "energy": -0.2}
get_job_response_completed = generate_get_job_response(status=state)
quantum_job._aws_session.get_job.return_value = get_job_response_completed
job_metadata = quantum_job.metadata(True)
job_name = job_metadata["jobName"]
quantum_job.download_result()
with open(f"{job_name}/results.json", "r") as file:
actual_data = json.loads(file.read())["dataDictionary"]
assert expected_saved_data == actual_data
def test_download_result_when_extract_path_provided(
quantum_job, generate_get_job_response, aws_session, result_setup
):
expected_saved_data = {"converged": True, "energy": -0.2}
state = "COMPLETED"
get_job_response_completed = generate_get_job_response(status=state)
aws_session.get_job.return_value = get_job_response_completed
job_metadata = quantum_job.metadata(True)
job_name = job_metadata["jobName"]
with tempfile.TemporaryDirectory() as temp_dir:
quantum_job.download_result(temp_dir)
with open(f"{temp_dir}/{job_name}/results.json", "r") as file:
actual_data = json.loads(file.read())["dataDictionary"]
assert expected_saved_data == actual_data
def test_empty_dict_returned_when_result_not_saved(
quantum_job, generate_get_job_response, aws_session
):
state = "COMPLETED"
get_job_response_completed = generate_get_job_response(status=state)
aws_session.get_job.return_value = get_job_response_completed
exception_response = {
"Error": {
"Code": "404",
"Message": "Not Found",
}
}
quantum_job._aws_session.download_from_s3 = Mock(
side_effect=ClientError(exception_response, "HeadObject")
)
assert quantum_job.result() == {}
def test_results_not_in_s3_for_download(quantum_job, generate_get_job_response, aws_session):
state = "COMPLETED"
get_job_response_completed = generate_get_job_response(status=state)
aws_session.get_job.return_value = get_job_response_completed
job_metadata = quantum_job.metadata(True)
output_s3_path = job_metadata["outputDataConfig"]["s3Path"]
error_message = f"Error retrieving results, could not find results at '{output_s3_path}"
exception_response = {
"Error": {
"Code": "404",
"Message": "Not Found",
}
}
quantum_job._aws_session.download_from_s3 = Mock(
side_effect=ClientError(exception_response, "HeadObject")
)
with pytest.raises(ClientError, match=error_message):
quantum_job.download_result()
def test_results_raises_error_for_non_404_errors(
quantum_job, generate_get_job_response, aws_session
):
state = "COMPLETED"
get_job_response_completed = generate_get_job_response(status=state)
aws_session.get_job.return_value = get_job_response_completed
error = "An error occurred \\(402\\) when calling the SomeObject operation: Something"
exception_response = {
"Error": {
"Code": "402",
"Message": "Something",
}
}
quantum_job._aws_session.download_from_s3 = Mock(
side_effect=ClientError(exception_response, "SomeObject")
)
with pytest.raises(ClientError, match=error):
quantum_job.result()
@patch("braket.aws.aws_quantum_job.AwsQuantumJob.download_result")
def test_results_json_file_not_in_tar(
result_download, quantum_job, aws_session, generate_get_job_response
):
state = "COMPLETED"
get_job_response_completed = generate_get_job_response(status=state)
quantum_job._aws_session.get_job.return_value = get_job_response_completed
assert quantum_job.result() == {}
@pytest.fixture
def entry_point():
return "test-source-module.entry_point:func"
@pytest.fixture
def bucket():
return "braket-region-id"
@pytest.fixture(
params=[
None,
"aws.location/custom-jobs:tag.1.2.3",
"other.uri/custom-name:tag",
"other-custom-format.com",
]
)
def image_uri(request):
return request.param
@pytest.fixture(params=["given_job_name", "default_job_name"])
def job_name(request):
if request.param == "given_job_name":
return "test-job-name"
@pytest.fixture
def s3_prefix(job_name):
return f"{job_name}/non-default"
@pytest.fixture(params=["local_source", "s3_source"])
def source_module(request, bucket, s3_prefix):
if request.param == "local_source":
return "test-source-module"
elif request.param == "s3_source":
return AwsSession.construct_s3_uri(bucket, "test-source-prefix", "source.tar.gz")
@pytest.fixture
def role_arn():
return "arn:aws:iam::0000000000:role/AmazonBraketInternalSLR"
@pytest.fixture(
params=[
"arn:aws:braket:us-test-1::device/qpu/test/device-name",
"arn:aws:braket:::device/qpu/test/device-name",
]
)
def device_arn(request):
return request.param
@pytest.fixture
def prepare_job_args(aws_session, device_arn):
return {
"device": device_arn,
"source_module": Mock(),
"entry_point": Mock(),
"image_uri": Mock(),
"job_name": Mock(),
"code_location": Mock(),
"role_arn": Mock(),
"hyperparameters": Mock(),
"input_data": Mock(),
"instance_config": Mock(),
"stopping_condition": Mock(),
"output_data_config": Mock(),
"copy_checkpoints_from_job": Mock(),
"checkpoint_config": Mock(),
"aws_session": aws_session,
"tags": Mock(),
}
def test_str(quantum_job):
expected = f"AwsQuantumJob('arn':'{quantum_job.arn}')"
assert str(quantum_job) == expected
def test_arn(quantum_job_arn, aws_session):
quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)
assert quantum_job.arn == quantum_job_arn
def test_name(quantum_job_arn, quantum_job_name, aws_session):
quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)
assert quantum_job.name == quantum_job_name
@pytest.mark.xfail(raises=AttributeError)
def test_no_arn_setter(quantum_job):
quantum_job.arn = 123
@pytest.mark.parametrize("wait_until_complete", [True, False])
@patch("braket.aws.aws_quantum_job.AwsQuantumJob.logs")
@patch("braket.aws.aws_quantum_job.prepare_quantum_job")
def test_create_job(
mock_prepare_quantum_job,
mock_logs,
aws_session,
prepare_job_args,
quantum_job_arn,
wait_until_complete,
):
test_response_args = {"testArgs": "MyTestArg"}
mock_prepare_quantum_job.return_value = test_response_args
job = AwsQuantumJob.create(wait_until_complete=wait_until_complete, **prepare_job_args)
mock_prepare_quantum_job.assert_called_with(**prepare_job_args)
aws_session.create_job.assert_called_with(**test_response_args)
if wait_until_complete:
mock_logs.assert_called_once()
else:
mock_logs.assert_not_called()
assert job.arn == quantum_job_arn
def test_create_fake_arg():
unexpected_kwarg = "create\\(\\) got an unexpected keyword argument 'fake_arg'"
with pytest.raises(TypeError, match=unexpected_kwarg):
AwsQuantumJob.create(
device="device",
source_module="source",
fake_arg="fake_value",
)
def test_cancel_job(quantum_job_arn, aws_session, generate_cancel_job_response):
cancellation_status = "CANCELLING"
aws_session.cancel_job.return_value = generate_cancel_job_response(
cancellationStatus=cancellation_status
)
quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)
status = quantum_job.cancel()
aws_session.cancel_job.assert_called_with(quantum_job_arn)
assert status == cancellation_status
@pytest.mark.xfail(raises=ClientError)
def test_cancel_job_surfaces_exception(quantum_job, aws_session):
exception_response = {
"Error": {
"Code": "ValidationException",
"Message": "unit-test-error",
}
}
aws_session.cancel_job.side_effect = ClientError(exception_response, "cancel_job")
quantum_job.cancel()
@pytest.mark.parametrize(
"generate_get_job_response_kwargs",
[
{
"status": "RUNNING",
},
{
"status": "COMPLETED",
},
{
"status": "COMPLETED",
"startedAt": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),
},
{"status": "COMPLETED", "endedAt": datetime.datetime(2021, 1, 1, 1, 0, 0, 0)},
{
"status": "COMPLETED",
"startedAt": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),
"endedAt": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),
},
],
)
@patch(
"braket.jobs.metrics_data.cwl_insights_metrics_fetcher."
"CwlInsightsMetricsFetcher.get_metrics_for_job"
)
def test_metrics(
metrics_fetcher_mock,
quantum_job,
aws_session,
generate_get_job_response,
generate_get_job_response_kwargs,
):
get_job_response_running = generate_get_job_response(**generate_get_job_response_kwargs)
aws_session.get_job.return_value = get_job_response_running
expected_metrics = {"Test": [1]}
metrics_fetcher_mock.return_value = expected_metrics
metrics = quantum_job.metrics()
assert metrics == expected_metrics
@pytest.fixture
def log_stream_responses():
return (
ClientError(
{
"Error": {
"Code": "ResourceNotFoundException",
"Message": "This shouldn't get raised...",
}
},
"DescribeLogStreams",
),
{"logStreams": []},
{"logStreams": [{"logStreamName": "stream-1"}]},
)
@pytest.fixture
def log_events_responses():
return (
{"nextForwardToken": None, "events": [{"timestamp": 1, "message": "hi there #1"}]},
{"nextForwardToken": None, "events": []},
{
"nextForwardToken": None,
"events": [
{"timestamp": 1, "message": "hi there #1"},
{"timestamp": 2, "message": "hi there #2"},
],
},
{"nextForwardToken": None, "events": []},
{
"nextForwardToken": None,
"events": [
{"timestamp": 2, "message": "hi there #2"},
{"timestamp": 2, "message": "hi there #2a"},
{"timestamp": 3, "message": "hi there #3"},
],
},
{"nextForwardToken": None, "events": []},
)
def test_logs(
| |
<gh_stars>0
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx_policy.infra.tier_0s.locale_services.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Bgp(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.bgp'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _BgpStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
tier0_id,
locale_service_id,
):
"""
Read BGP routing config
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.BgpRoutingConfig`
:return: com.vmware.nsx_policy.model.BgpRoutingConfig
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
})
class ByodServiceInstances(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.byod_service_instances'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ByodServiceInstancesStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
tier0_id,
locale_service_id,
service_instance_id,
):
"""
Delete BYOD policy service instance
:type tier0_id: :class:`str`
:param tier0_id: Tier-0 id (required)
:type locale_service_id: :class:`str`
:param locale_service_id: Locale service id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_instance_id': service_instance_id,
})
def get(self,
tier0_id,
locale_service_id,
service_instance_id,
):
"""
Read byod service instance
:type tier0_id: :class:`str`
:param tier0_id: Tier-0 id (required)
:type locale_service_id: :class:`str`
:param locale_service_id: Locale service id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstance`
:return: com.vmware.nsx_policy.model.ByodPolicyServiceInstance
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_instance_id': service_instance_id,
})
def list(self,
tier0_id,
locale_service_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Read all BYOD service instance objects under a tier-0
:type tier0_id: :class:`str`
:param tier0_id: Tier-0 id (required)
:type locale_service_id: :class:`str`
:param locale_service_id: Locale service id (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstanceListResult`
:return: com.vmware.nsx_policy.model.ByodPolicyServiceInstanceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
tier0_id,
locale_service_id,
service_instance_id,
byod_policy_service_instance,
):
"""
Create BYOD Service Instance which represent instance of service
definition created on manager.
:type tier0_id: :class:`str`
:param tier0_id: Tier-0 id (required)
:type locale_service_id: :class:`str`
:param locale_service_id: Locale service id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:type byod_policy_service_instance: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstance`
:param byod_policy_service_instance: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_instance_id': service_instance_id,
'byod_policy_service_instance': byod_policy_service_instance,
})
def update(self,
tier0_id,
locale_service_id,
service_instance_id,
byod_policy_service_instance,
):
"""
Create BYOD Service Instance which represent instance of service
definition created on manager.
:type tier0_id: :class:`str`
:param tier0_id: Tier-0 id (required)
:type locale_service_id: :class:`str`
:param locale_service_id: Locale service id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Byod service instance id (required)
:type byod_policy_service_instance: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstance`
:param byod_policy_service_instance: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstance`
:return: com.vmware.nsx_policy.model.ByodPolicyServiceInstance
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_instance_id': service_instance_id,
'byod_policy_service_instance': byod_policy_service_instance,
})
class IpsecVpnServices(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.ipsec_vpn_services'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _IpsecVpnServicesStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
tier0_id,
locale_service_id,
service_id,
):
"""
Delete IPSec VPN service for given locale service under Tier-0.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type service_id: :class:`str`
:param service_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_id': service_id,
})
def get(self,
tier0_id,
locale_service_id,
service_id,
):
"""
Get IPSec VPN service for given locale service under Tier-0.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type service_id: :class:`str`
:param service_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.IPSecVpnService`
:return: com.vmware.nsx_policy.model.IPSecVpnService
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_id': service_id,
})
def list(self,
tier0_id,
locale_service_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Get paginated list of all IPSec VPN services for given locale service
under Tier-0.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.IPSecVpnServiceListResult`
:return: com.vmware.nsx_policy.model.IPSecVpnServiceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
tier0_id,
locale_service_id,
service_id,
ip_sec_vpn_service,
):
"""
Create or patch IPSec VPN service for given locale service under
Tier-0.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type service_id: :class:`str`
:param service_id: (required)
:type ip_sec_vpn_service: :class:`com.vmware.nsx_policy.model_client.IPSecVpnService`
:param ip_sec_vpn_service: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'service_id': service_id,
'ip_sec_vpn_service': ip_sec_vpn_service,
})
def update(self,
tier0_id,
locale_service_id,
service_id,
ip_sec_vpn_service,
):
"""
Create or fully replace IPSec VPN service for given locale service
under Tier-0. Revision is optional for creation and required for
update.
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type service_id: :class:`str`
:param service_id: (required)
:type | |
from __future__ import absolute_import, unicode_literals, print_function, division
import datetime
import decimal
import json
import logging
import re
from dateutil.parser import parse as date_parse
from django.contrib.gis.geos import Point
from future.utils import raise_with_traceback
from tableschema import Field as TableField
from tableschema import Schema as TableSchema
from main.constants import MODEL_SRID, SUPPORTED_DATUMS, get_datum_srid, is_supported_datum, get_australian_zone_srid, \
is_projected_srid, get_datum_and_zone
YYYY_MM_DD_REGEX = re.compile(r'^\d{4}-\d{2}-\d{2}')
logger = logging.getLogger(__name__)
def is_blank_value(value):
return value is None or is_empty_string(value)
def is_empty_string(value):
return isinstance(value, str) and len(value.strip()) == 0
class InvalidDateType(Exception):
pass
class ObservationSchemaError(Exception):
# don't extend InvalidSchemaError (problem with message not showing in the str method)
pass
class SpeciesObservationSchemaError(Exception):
pass
class GeometrySchemaError(Exception):
pass
class InvalidDatumError(Exception):
pass
class FieldSchemaError(Exception):
pass
def parse_datetime_day_first(value):
"""
use the dateutil.parse() to parse a date/datetime with the date first (dd/mm/yyyy) (not month first mm/dd/yyyy)
in case of ambiguity
:param value:
:return:
"""
# there's a 'bug' in dateutil.parser.parse (2.5.3). If you are using
# dayfirst=True. It will parse YYYY-MM-DD as YYYY-DD-MM !!
# https://github.com/dateutil/dateutil/issues/268
dayfirst = not YYYY_MM_DD_REGEX.match(value)
return date_parse(value, dayfirst=dayfirst)
def cast_date_any_format(value):
if isinstance(value, datetime.date):
return value
try:
return parse_datetime_day_first(value).date()
except (TypeError, ValueError) as e:
raise_with_traceback(InvalidDateType(e))
def cast_datetime_any_format(value):
if isinstance(value, datetime.datetime):
return value
try:
return parse_datetime_day_first(value)
except (TypeError, ValueError) as e:
raise_with_traceback(InvalidDateType(e))
def find_unique_field(schema, biosys_type, column_name):
"""
Precedence Rules:
1- Look for field with biosys.type = biosys_type
2- Look for a field with name column_name case insensitive
:return: (field, errors). field = None if not found. The only error is if the field is not unique.
"""
if not isinstance(schema, GenericSchema):
schema = GenericSchema(schema)
fields = [f for f in schema.fields if f.biosys.type == biosys_type]
if len(fields) > 1:
msg = "More than one Biosys type {} field found: {}".format(biosys_type, [f.name for f in fields])
return None, [msg]
if len(fields) == 1:
return fields[0], None
# no Biosys type field found. Search for column name
fields = [f for f in schema.fields if f.name.lower() == column_name.lower()]
if len(fields) > 1:
msg = "More than one field named {} found.".format(column_name)
return None, [msg]
if len(fields) == 1:
return fields[0], None
return None, None
class BiosysSchema:
"""
The utility class for the biosys data within a schema field
{
name: "...."
constraints: ....
biosys: {
type: observationDate|latitude|longitude|...
}
}
"""
BIOSYS_KEY_NAME = 'biosys'
OBSERVATION_DATE_TYPE_NAME = 'observationDate'
LATITUDE_TYPE_NAME = 'latitude'
LONGITUDE_TYPE_NAME = 'longitude'
EASTING_TYPE_NAME = 'easting'
NORTHING_TYPE_NAME = 'northing'
DATUM_TYPE_NAME = 'datum'
ZONE_TYPE_NAME = 'zone'
SITE_CODE_TYPE_NAME = 'siteCode'
SPECIES_NAME_TYPE_NAME = 'speciesName'
SPECIES_NAME_ID_TYPE_NAME = 'speciesNameId'
GENUS_TYPE_NAME = 'genus'
SPECIES_TYPE_NAME = 'species'
INFRA_SPECIFIC_RANK_TYPE_NAME = 'infraspecificRank'
INFRA_SPECIFIC_NAME_TYPE_NAME = 'infraspecificName'
def __init__(self, descriptor):
self.descriptor = descriptor or {}
# implement some dict like methods
def __getitem__(self, item):
return self.descriptor.__getitem__(item)
def __str__(self):
return "BiosysSchema: {}".format(self.descriptor)
@property
def type(self):
return self.get('type')
def get(self, k, d=None):
return self.descriptor.get(k, d)
def is_observation_date(self):
return self.type == self.OBSERVATION_DATE_TYPE_NAME
def is_latitude(self):
return self.type == self.LATITUDE_TYPE_NAME
def is_longitude(self):
return self.type == self.LONGITUDE_TYPE_NAME
def is_easting(self):
return self.type == self.EASTING_TYPE_NAME
def is_northing(self):
return self.type == self.NORTHING_TYPE_NAME
def is_datum(self):
return self.type == self.DATUM_TYPE_NAME
def is_zone(self):
return self.type == self.ZONE_TYPE_NAME
def is_species_name(self):
return self.type == self.SPECIES_NAME_TYPE_NAME
def is_species_name_id(self):
return self.type == self.SPECIES_NAME_ID_TYPE_NAME
def is_genus(self):
return self.type == self.GENUS_TYPE_NAME
def is_species(self):
return self.type == self.SPECIES_TYPE_NAME
class SchemaField:
"""
Utility class for a field in a schema.
Uses a tableschema.Field (https://github.com/frictionlessdata/tableschema-py/blob/master/tableschema/field.py)
for help. It doesn't extend this class but compose with it, mostly for the use of the cast_value method.
"""
DATETIME_TYPES = ['date', 'datetime']
TRUE_VALUES = ['True', 'true', 'True', 'YES', 'yes', 'y', 'Y', 'Yes']
FALSE_VALUES = ['FALSE', 'false', 'False', 'NO', 'no', 'n', 'N', 'No']
def __init__(self, descriptor):
self.descriptor = self.__curate_descriptor(descriptor)
self.name = self.descriptor.get('name')
# We want to throw an exception if there is no name
if not self.name:
raise FieldSchemaError("A field without a name: {}".format(json.dumps(descriptor)))
# the tableschema field.
self.tableschema_field = TableField(self.descriptor)
# biosys specific
self.biosys = BiosysSchema(self.descriptor.get(BiosysSchema.BIOSYS_KEY_NAME))
self.constraints = SchemaConstraints(self.descriptor.get('constraints', {}))
# implement some dict like methods
def __getitem__(self, item):
return self.descriptor.__getitem__(item)
def get(self, k, d=None):
return self.descriptor.get(k, d)
@property
def title(self):
return self.descriptor.get('title')
@property
def type(self):
return self.descriptor.get('type')
@property
def column_name(self):
return self.name
@property
def required(self):
return self.constraints.required
@property
def aliases(self):
return self.descriptor['aliases'] if 'aliases' in self.descriptor else []
@property
def is_datetime_types(self):
return self.type in self.DATETIME_TYPES
@property
def is_date_type(self):
return self.type == 'date'
@property
def is_numeric(self):
return self.type in ['number', 'integer']
@property
def format(self):
return self.descriptor['format']
def has_alias(self, name, icase=False):
for alias in self.aliases:
if (alias == name) or (icase and alias.lower() == name.lower()):
return True
return False
def has_name_or_alias(self, name, alias, icase=False):
"""
Test is the field has a name name or an alias alias
:param name:
:param alias:
:param icase:
:return:
"""
has_name = (self.name == name) or (icase and self.name.lower() == name.lower())
return has_name or self.has_alias(alias, icase=icase)
def cast(self, value):
"""
Returns o native Python object of the expected format. Will throw an exception
if the value doesn't complies with any constraints.
This method delegates most of the cast to the tableschema.Field.cast_value. Except for
- date and dateTime with format='any'. This because the tableschema.Field.cast_value interprets an ambiguous
day/month/year date as month/day/year (american way)
:param value:
:return:
"""
# we want to strip strings
if isinstance(value, str):
value = value.strip()
# date or datetime with format='any
if self.is_datetime_types and self.format == 'any' and value:
return cast_date_any_format(value) if self.is_date_type else cast_datetime_any_format(value)
# delegates to tableschema.Field.cast_value
return self.tableschema_field.cast_value(value, constraints=True)
def validation_error(self, value):
"""
Return an error message if the value is not valid according to the schema.
It relies on exception thrown by the 'cast1 method of Type method.
:param value:
:return: None if value is valid or an error message string
"""
error = None
# override the integer validation. The default message is a bit cryptic if there's an error casting a string
# like '1.2' into an int.
if self.type == 'integer':
if not is_blank_value(value):
not_integer = False
try:
casted = self.cast(value)
# there's also the case where the case where a float 1.2 is successfully casted in 1
# (ex: int(1.2) = 1)
if str(casted) != str(value):
not_integer = True
except Exception:
not_integer = True
if not_integer:
return 'The field "{}" must be a whole number.'.format(self.name)
try:
self.cast(value)
except Exception as e:
error = "{}".format(e)
# Override the default enum exception message to include all possible values
if error.find('enum array') and self.constraints.enum:
values = [str(v) for v in self.constraints.enum]
error = "The value must be one the following: {}".format(values)
return error
def __curate_descriptor(self, descriptor):
"""
Apply some changes to the descriptor:
- Change default values for boolean (adding 'yes' and 'no')
Since TableSchema V1.0 the default true values are [ "true", "True", "TRUE", "1" ]
We want to be sure that 'yes' and 'no' (and variations) are included by default.
The schema specifications allows to override the true and false values with 'trueValues' and 'falseValues'
(see https://frictionlessdata.io/specs/table-schema/)
"""
if descriptor.get('type') == 'boolean':
descriptor['trueValues'] = descriptor.get('trueValues', self.TRUE_VALUES)
descriptor['falseValues'] = descriptor.get('falseValues', self.FALSE_VALUES)
return descriptor
def __str__(self):
return '{}'.format(self.name)
class SchemaConstraints:
"""
A helper class for a schema field constraints
"""
def __init__(self, descriptor):
self.descriptor = descriptor or {}
# implement some dict like methods
def __getitem__(self, item):
return self.descriptor.__getitem__(item)
def get(self, k, d=None):
return self.descriptor.get(k, d)
@property
def required(self):
return self.get('required', False)
@property
def enum(self):
return self.get('enum')
class SchemaForeignKey:
"""
A utility class for foreign key in schema
"""
def __init__(self, descriptor):
self.descriptor = descriptor
def __str__(self):
return 'Foreign Key {}'.format(self.descriptor)
# implement some dict like methods
def __getitem__(self, item):
return self.descriptor.__getitem__(item)
def get(self, k, d=None):
return self.descriptor.get(k, d)
@staticmethod
def _as_list(value):
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value]
else:
return list(value)
@property
def fields(self):
return self._as_list(self.descriptor.get('fields', []))
@property
def data_field(self):
return self.fields[0] if self.fields else None
@property
def parent_data_field_name(self):
# TODO: only one FK supported for now.
return self.reference_fields[0] if self.reference_fields else None
@property
def reference(self):
return self.descriptor.get('reference', {})
@property
def reference_fields(self):
return self._as_list(self.reference.get('fields', []))
@property
def reference_resource(self):
return self.reference.get('resource')
@property
def model(self):
return self.reference_resource
@property
def model_field(self):
return self.reference_fields[0] if self.reference_fields else None
class GenericSchema(object):
"""
A utility class for schema.
It uses internally an instance of the frictionless tableschema.Schema for help.
https://github.com/frictionlessdata/tableschema-py/blob/master/tableschema/schema.py
Will throw an exception | |
import logging
import sys
import pandas as pd
import pyprind
import six
import warnings
from joblib import Parallel, delayed
from collections import deque, OrderedDict
import heapq
from itertools import chain
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.blocker.blocker import Blocker
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, add_key_column
from py_entitymatching.utils.generic_helper import rem_nan
logger = logging.getLogger(__name__)
class SortedNeighborhoodBlocker(Blocker):
"""
WARNING: THIS IS AN EXPERIMENTAL CLASS. THIS CLASS IS NOT TESTED.
USE AT YOUR OWN RISK.
Blocks based on the sorted neighborhood blocking method
"""
def __init__(self):
# display warning message upon object initialization
print("WARNING: THIS IS AN EXPERIMENTAL COMMAND. THIS COMMAND IS NOT TESTED. USE AT YOUR OWN RISK.")
def block_tables(self, ltable, rtable, l_block_attr, r_block_attr, window_size=2,
l_output_attrs=None, r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
allow_missing=False, verbose=False, n_jobs=1):
"""
WARNING: THIS IS AN EXPERIMENTAL COMMAND. THIS COMMAND IS NOT TESTED.
USE AT YOUR OWN RISK.
Blocks two tables based on sorted neighborhood.
Finds tuple pairs from left and right tables such that when each table
is sorted based upon a blocking attribute, tuple pairs are within a
distance w of each other. The blocking attribute is created prior to calling
this function.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_block_attr (string): The blocking attribute for left table.
r_block_attr (string): The blocking attribute for right table.
window_size (int): size of sliding window. Defaults to 2
l_output_attrs (list): A list of attribute names from the left
table to be included in the
output candidate set (defaults to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the
output candidate set (defaults to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
allow_missing (boolean): A flag to indicate whether tuple pairs
with missing value in at least one of the
blocking attributes should be included in
the output candidate set (defaults to
False). If this flag is set to True, a
tuple in ltable with missing value in the
blocking attribute will be matched with
every tuple in rtable and vice versa.
verbose (boolean): A flag to indicate whether the debug information
should be logged (defaults to False).
n_jobs (int): The number of parallel jobs to be used for computation
(defaults to 1). If -1 all CPUs are used. If 0 or 1,
no parallel computation is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used (where n_cpus is the total number of CPUs in the
machine). Thus, for n_jobs = -2, all CPUs but one are used.
If (n_cpus + 1 + n_jobs) is less than 1, then no parallel
computation is used (i.e., equivalent to the default).
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_block_attr` is not of type string.
AssertionError: If `r_block_attr` is not of type string.
AssertionError: If `window_size` is not of type of
int or if window_size < 2.
AssertionError: If the values in `l_output_attrs` is not of type
string.
AssertionError: If the values in `r_output_attrs` is not of type
string.
AssertionError: If `l_output_prefix` is not of type
string.
AssertionError: If `r_output_prefix` is not of type
string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `allow_missing` is not of type boolean.
AssertionError: If `n_jobs` is not of type
int.
AssertionError: If `l_block_attr` is not in the ltable columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
AssertionError: If `l_out_attrs` are not in the ltable.
AssertionError: If `r_out_attrs` are not in the rtable.
"""
# Warning that this code is still in alpha stage
# display warning message
print("WARNING: THIS IS AN EXPERIMENTAL COMMAND. THIS COMMAND IS NOT TESTED. USE AT YOUR OWN RISK.")
# validate data types of input parameters
self.validate_types_params_tables(ltable, rtable,
l_output_attrs, r_output_attrs,
l_output_prefix,
r_output_prefix, verbose, n_jobs)
# validate data types of input blocking attributes
self.validate_types_block_attrs(l_block_attr, r_block_attr)
# validate data type of allow_missing
self.validate_allow_missing(allow_missing)
# validate input parameters
self.validate_block_attrs(ltable, rtable, l_block_attr, r_block_attr)
self.validate_output_attrs(ltable, rtable, l_output_attrs,
r_output_attrs)
# get and validate required metadata
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
# check if ltable or rtable are empty.
if ltable.empty:
raise AssertionError('Left table is empty')
if rtable.empty:
raise AssertionError('Right table is empty')
# check if window_size < 2
if window_size < 2:
raise AssertionError(
'window_size is < 2')
# # get metadata
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger,
verbose)
# # validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger,
verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger,
verbose)
# do blocking
# # determine number of processes to launch parallely
n_procs = self.get_num_procs(n_jobs, min(len(ltable), len(rtable)))
# handle potential missing values
c_missing = pd.DataFrame()
if n_procs <= 1:
# single process
c_splits, c_missing = _sn_block_tables_split(ltable, rtable, l_key, r_key,
l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs,
allow_missing)
else:
# multiprocessing
# Split l and r into n_procs chunks.
# each core will get an l and an r, merge them, sort them.
l_splits = pd.np.array_split(ltable, n_procs)
r_splits = pd.np.array_split(rtable, n_procs)
p_answer = Parallel(n_jobs=n_procs)(
delayed(_sn_block_tables_split)(l_splits[i], r_splits[i], l_key, r_key,
l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs,
allow_missing)
for i in range(n_procs))
c_splits, c_missing = zip(*p_answer)
c_splits = list(c_splits)
c_missing = pd.concat(c_missing)
# make a deque for the sliding window
sliding_window = deque()
result = []
c_missing = c_missing.to_dict(orient='records')
# Use generator function to merge sorted runs.
# If single core, generator is trivial (see fn below)
for row in _gen_iter_merge(c_splits):
row = row._asdict()
# if the sliding window is full, remove the largest. The new tuple will be
# compared against the (window_size-1) previously seen tuples.
# (if at the beginning just compare with whatever we have)
if len(sliding_window) >= window_size:
sliding_window.popleft()
# Now, iterate over the sliding window (plus any tuples missing BKV's,
# if that was called for):
for window_element in chain(sliding_window, c_missing):
ltable = window_element
rtable = row
# SN blocking is often implemented on a single table.
# In this implementation, we are only considering tuples that have
# one tuple from the left table and one tuple from the right table.
# Thus, only keep candidates that span both tables.
# However, the restriction is that matches need to be (left, right) so
# if we end up with (right, left) flip it.
if ltable["source"] != rtable["source"]: # Span both tables
if ltable["source"] == 'r': # Left is right, so flip it to make it sane again
ltable, rtable = rtable, ltable
merged = OrderedDict()
merged[l_output_prefix+"ID"] = ltable[l_key]
merged[r_output_prefix+"ID"] = rtable[r_key]
merged[l_output_prefix+l_key] = ltable[l_key]
merged[r_output_prefix+r_key] = rtable[r_key]
# # add l/r output attributes to the ordered dictionary
if l_output_attrs is not None:
for attr in l_output_attrs:
merged[l_output_prefix + attr] = ltable[attr]
if r_output_attrs is not None:
for attr in r_output_attrs:
merged[r_output_prefix + attr] = rtable[attr]
# # add the ordered dict to the list
result.append(merged)
sliding_window.append(row)
candset = pd.DataFrame(result, columns=result[0].keys())
# update catalog
key = get_name_for_key(candset.columns)
candset = add_key_column(candset, key)
cm.set_candset_properties(candset, key, l_output_prefix + l_key,
r_output_prefix + r_key, ltable, rtable)
return candset
@staticmethod
def block_candset(*args, **kwargs):
"""block_candset does not apply to sn_blocker, return unimplemented"""
# It isn't clear what SN on a candidate set would mean, throw an Assersion error
raise AssertionError('unimplemented')
@staticmethod
def block_tuples(*args, **kwargs):
"""block_tuples does not apply to sn_blocker, return unimplemented"""
# It also isn't clear what SN on a tuple pair would mean, throw an Assersion error
raise AssertionError('unimplemented')
# ------------------------------------------------------------
# utility functions specific to sorted neighborhood blocking
# validate the data types of the blocking attributes
@staticmethod
def validate_types_block_attrs(l_block_attr, r_block_attr):
"""validate the data types of the blocking attributes"""
if not isinstance(l_block_attr, six.string_types):
logger.error(
'Blocking attribute name of left table is not of type | |
{POS: ADP},
"<first-cjt>|PRP|@<OA": {POS: ADP},
"<first-cjt>|PRP|@<OC": {POS: ADP},
"<first-cjt>|PRP|@<PIV": {POS: ADP},
"<first-cjt>|PRP|@<PRED": {POS: ADP},
"<first-cjt>|PRP|@<SA": {POS: ADP},
"<first-cjt>|PRP|@<SC": {POS: ADP},
"<first-cjt>|PRP|@A<": {POS: ADP},
"<first-cjt>|PRP|@A<ARG": {POS: ADP},
"<first-cjt>|PRP|@ADVL": {POS: ADP},
"<first-cjt>|PRP|@ADVL>": {POS: ADP},
"<first-cjt>|PRP|@FS-APP": {POS: ADP},
"<first-cjt>|PRP|@FS-N<PRED": {POS: ADP},
"<first-cjt>|PRP|@H": {POS: ADP},
"<first-cjt>|PRP|@KOMP<": {POS: ADP},
"<first-cjt>|PRP|@N<": {POS: ADP},
"<first-cjt>|PRP|@N<ARG": {POS: ADP},
"<first-cjt>|PRP|@N<PRED": {POS: ADP},
"<first-cjt>|PRP|@OA>": {POS: ADP},
"<first-cjt>|PRP|@P<": {POS: ADP},
"<first-cjt>|PRP|@PASS": {POS: ADP},
"<first-cjt>|PRP|@PIV>": {POS: ADP},
"<first-cjt>|PRP|@PRED>": {POS: ADP},
"<first-cjt>|PRP|@QUE": {POS: ADP},
"<first-cjt>|PRP|@SC>": {POS: ADP},
"<first-cjt>|PRP|@STA": {POS: ADP},
"<first-cjt>|PRP|@UTT": {POS: ADP},
"<first-cjt>|PU|@PU": {POS: PUNCT},
"<first-cjt>|V|PCP|F|S|@N<": {POS: ADJ},
"<first-cjt>|V|PS|3S|IND|@P<": {POS: VERB},
"<foc>|ADV|@<ADVL": {POS: ADV},
"<foc>|ADV|@<FOC": {POS: ADV},
"<foc>|ADV|@>S": {POS: ADV},
"<foc>|ADV|@ADVL>": {POS: ADV},
"<foc>|ADV|@FOC>": {POS: ADV},
"<foc>|ADV|@PU": {POS: ADV},
"<hyphen>|<np-idf>|N|M|P|@<ACC": {POS: NOUN},
"<hyphen>|<obj>|PERS|M|3S|ACC|@<ACC": {POS: PRON},
"<hyphen>|<refl>|PERS|M/F|1S|DAT|@<DAT": {POS: PRON},
"<hyphen>|EC|@>N": {POS: PART},
"<hyphen>|PERS|F|3S|ACC|@<ACC": {POS: PRON},
"<hyphen>|PERS|F|3S|ACC|@<ACC-PASS": {POS: PRON},
"<hyphen>|PERS|M/F|3S/P|ACC|@<SUBJ": {POS: PRON},
"<hyphen>|PERS|M|3S|ACC|@<ACC-PASS": {POS: PRON},
"<hyphen>|PERS|M|3S|DAT|@<DAT": {POS: PRON},
"<hyphen>|PROP|F|P|@P<": {POS: PROPN},
"<ident>|<np-idf>|DET|F|S|@N<PRED": {POS: DET},
"<ident>|DET|F|P|@>N": {POS: DET},
"<ident>|DET|F|S|@>N": {POS: DET},
"<ident>|DET|F|S|@N<": {POS: DET},
"<ident>|DET|M|P|@>N": {POS: DET},
"<ident>|DET|M|P|@N<": {POS: DET},
"<ident>|DET|M|S|@>A": {POS: ADJ},
"<ident>|DET|M|S|@>N": {POS: DET},
"<ident>|DET|M|S|@N<": {POS: DET},
"<interr>|<quant>|ADV|@P<": {POS: ADV},
"<interr>|<quant>|DET|F|P|@>N": {POS: DET},
"<interr>|<quant>|DET|M|P|@>N": {POS: DET},
"<interr>|<quant>|DET|M|S|@>N": {POS: DET},
"<interr>|<quant>|DET|M|S|@ACC>": {POS: PRON},
"<interr>|<quant>|DET|M|S|@ADVL>": {POS: DET},
"<interr>|<quant>|DET|M|S|@P<": {POS: PRON},
"<interr>|ADV|@<ACC": {POS: ADV},
"<interr>|ADV|@<ADVL": {POS: ADV},
"<interr>|ADV|@>N": {POS: ADV},
"<interr>|ADV|@ADVL": {POS: ADV},
"<interr>|ADV|@ADVL>": {POS: ADV},
"<interr>|ADV|@N<PRED": {POS: ADV},
"<interr>|ADV|@P<": {POS: ADV},
"<interr>|ADV|@SA>": {POS: ADV},
"<interr>|ADV|@SC>": {POS: ADV},
"<interr>|ADV|@SUB": {POS: ADV},
"<interr>|DET|F|P|@>N": {POS: DET},
"<interr>|DET|F|P|@SC>": {POS: PRON},
"<interr>|DET|F|P|@SUBJ>": {POS: PRON},
"<interr>|DET|F|S|@<SC": {POS: PRON},
"<interr>|DET|F|S|@>N": {POS: DET},
"<interr>|DET|F|S|@SC>": {POS: PRON},
"<interr>|DET|M/F|S/P|@>A": {POS: DET},
"<interr>|DET|M/F|S|@SC>": {POS: PRON},
"<interr>|DET|M|P|@>N": {POS: DET},
"<interr>|DET|M|P|@SC>": {POS: PRON},
"<interr>|DET|M|S|@>N": {POS: DET},
"<interr>|DET|M|S|@SC>": {POS: PRON},
"<interr>|INDP|F|P|@ACC>": {POS: PRON},
"<interr>|INDP|F|S|@SC>": {POS: PRON},
"<interr>|INDP|M/F|P|@SUBJ>": {POS: PRON},
"<interr>|INDP|M/F|S/P|@P<": {POS: PRON},
"<interr>|INDP|M/F|S/P|@SUBJ>": {POS: PRON},
"<interr>|INDP|M/F|S|@<ACC": {POS: PRON},
"<interr>|INDP|M/F|S|@SC>": {POS: PRON},
"<interr>|INDP|M/F|S|@SUBJ>": {POS: PRON},
"<interr>|INDP|M|P|@SC>": {POS: PRON},
"<interr>|INDP|M|P|@SUBJ>": {POS: PRON},
"<interr>|INDP|M|S|@ACC>": {POS: PRON},
"<interr>|INDP|M|S|@P<": {POS: PRON},
"<interr>|INDP|M|S|@SC>": {POS: PRON},
"<interr>|INDP|M|S|@SUBJ>": {POS: PRON},
"<kc>|<-sam>|ADV|@P<": {POS: ADV},
"<kc>|<co-icl>|ADV|@CO": {POS: ADV},
"<kc>|<parkc-1>|<KOMP>|ADV|@CO": {POS: ADV},
"<kc>|ADV|@<ACC": {POS: ADV},
"<kc>|ADV|@<ADVL": {POS: ADV},
"<kc>|ADV|@<SA": {POS: ADV},
"<kc>|ADV|@<SC": {POS: ADV},
"<kc>|ADV|@>A": {POS: ADV},
"<kc>|ADV|@>N": {POS: ADV},
"<kc>|ADV|@>P": {POS: ADV},
"<kc>|ADV|@ADVL>": {POS: ADV},
"<kc>|ADV|@CO": {POS: ADV},
"<kc>|ADV|@P<": {POS: ADV},
"<kc>|ADV|@SA>": {POS: ADV},
"<kc>|ADV|@SUB": {POS: ADV},
"<kc>|KS|@ADVL>": {POS: SCONJ},
"<ks>|<rel>|ADV|@ADVL>": {POS: ADV},
"<ks>|<rel>|PRP|@ADVL>": {POS: ADP},
"<mente>|<NUM-ord>|ADJ|F|S|@>A": {POS: ADJ},
"<mente>|<NUM-ord>|ADJ|M/F|S|@ADVL>": {POS: ADJ},
"<mente>|ADJ|F|S|@<ADVL": {POS: ADJ},
"<meta>|<cjt>|<np-idf>|N|M|P|@<ACC": {POS: NOUN},
"<meta>|<first-cjt>|<np-def>|N|F|S|@P<": {POS: NOUN},
"<meta>|<np-def>|N|F|P|@<ACC": {POS: NOUN},
"<meta>|<np-def>|N|F|P|@P<": {POS: NOUN},
"<meta>|<np-def>|N|F|P|@SUBJ>": {POS: NOUN},
"<meta>|<np-def>|N|F|S|@<ACC": {POS: NOUN},
"<meta>|<np-def>|N|F|S|@<SC": {POS: NOUN},
"<meta>|<np-def>|N|F|S|@<SUBJ": {POS: NOUN},
"<meta>|<np-def>|N|F|S|@P<": {POS: NOUN},
"<meta>|<np-def>|N|F|S|@SUBJ>": {POS: NOUN},
"<meta>|<np-def>|N|M|P|@<ACC": {POS: NOUN},
"<meta>|<np-def>|N|M|P|@<SC": {POS: NOUN},
"<meta>|<np-def>|N|M|P|@<SUBJ": {POS: NOUN},
"<meta>|<np-def>|N|M|P|@P<": {POS: NOUN},
"<meta>|<np-def>|N|M|P|@SUBJ>": {POS: NOUN},
"<meta>|<np-def>|N|M|S|@<ACC": {POS: NOUN},
"<meta>|<np-def>|N|M|S|@<SC": {POS: NOUN},
"<meta>|<np-def>|N|M|S|@P<": {POS: NOUN},
"<meta>|<np-def>|N|M|S|@SUBJ>": {POS: NOUN},
"<meta>|<np-idf>|N|F|P|@P<": {POS: NOUN},
"<meta>|<np-idf>|N|F|S|@<ACC": {POS: NOUN},
"<meta>|<np-idf>|N|F|S|@<OC": {POS: NOUN},
"<meta>|<np-idf>|N|F|S|@<SC": {POS: NOUN},
"<meta>|<np-idf>|N|F|S|@<SUBJ": {POS: NOUN},
"<meta>|<np-idf>|N|F|S|@N<PRED": {POS: NOUN},
"<meta>|<np-idf>|N|F|S|@P<": {POS: NOUN},
"<meta>|<np-idf>|N|M|S|@<ACC": {POS: NOUN},
"<meta>|<np-idf>|N|M|S|@<SC": {POS: NOUN},
"<meta>|<np-idf>|N|M|S|@ACC>": {POS: NOUN},
"<meta>|<np-idf>|N|M|S|@NPHR": {POS: NOUN},
"<meta>|<np-idf>|N|M|S|@P<": {POS: NOUN},
"<meta>|<np-idf>|N|M|S|@SUBJ>": {POS: NOUN},
"<meta>|ADV|@>S": {POS: ADV},
"<meta>|ADV|@ADVL>": {POS: ADV},
"<mv>|<cjt>|<first-cjt>|V|FUT|3P|IND|@FS-N<": {POS: VERB},
"<mv>|<cjt>|<first-cjt>|V|FUT|3S|SUBJ|@FS-ADVL>": {POS: VERB},
"<mv>|<cjt>|<first-cjt>|V|INF|@ICL-P<": {POS: VERB},
"<mv>|<cjt>|<first-cjt>|V|PCP|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|<first-cjt>|V|PR|3P|IND|@FS-P<": {POS: VERB},
"<mv>|<cjt>|<first-cjt>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<first-cjt>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|GER|@ICL-<ADVL": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|IMPF|3S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|IMPF|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|INF|3P|@ICL-P<": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|INF|@ICL-<ACC": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|PR|3S|IND|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|PR|3S|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|PS|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|<se-passive>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|COND|3S|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|GER|@ICL-<ADVL": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|GER|@ICL-ADVL>": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|IMPF|1S|IND|@FS-KOMP<": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|IMPF|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|INF|@ICL-<SC": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|INF|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|INF|@ICL-P<": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PR|1S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PR|3P|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PR|3S|IND|@FS-<ADVL": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PR|3S|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PS|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PS|3S|IND|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PS|3S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|<hyphen>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<n>|V|PCP|M|P|@ICL-P<": {POS: VERB},
"<mv>|<cjt>|<n>|V|PCP|M|S|@ICL-P<": {POS: VERB},
"<mv>|<cjt>|<nil>|V|PS|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<sam->|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<se-passive>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<se-passive>|V|PR|3P|IND|@NPHR": {POS: VERB},
"<mv>|<cjt>|<se-passive>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|<se-passive>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|COND|1S|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|COND|3S|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|FUT|1S|IND|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|V|FUT|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|FUT|3S|IND|@<ACC": {POS: VERB},
"<mv>|<cjt>|V|FUT|3S|IND|@FS-N<": {POS: VERB},
"<mv>|<cjt>|V|FUT|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|FUT|3S|SUBJ|@FS-ADVL>": {POS: VERB},
"<mv>|<cjt>|V|GER|@ADVL>": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-<ADVL": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-<SC": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-A<": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-ADVL>": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-N<": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-PRED>": {POS: VERB},
"<mv>|<cjt>|V|GER|@ICL-STA": {POS: VERB},
"<mv>|<cjt>|V|GER|@N<": {POS: VERB},
"<mv>|<cjt>|V|GER|@PRED>": {POS: VERB},
"<mv>|<cjt>|V|IMPF|1P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|IMPF|1S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|IMPF|3P|IND|@FS-ADVL>": {POS: VERB},
"<mv>|<cjt>|V|IMPF|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|IMPF|3S|IND|@<ACC": {POS: VERB},
"<mv>|<cjt>|V|IMPF|3S|IND|@FS-ADVL>": {POS: VERB},
"<mv>|<cjt>|V|IMPF|3S|IND|@FS-N<": {POS: VERB},
"<mv>|<cjt>|V|IMPF|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|IMPF|3S|IND|@ICL-<ACC": {POS: VERB},
"<mv>|<cjt>|V|IMPF|3S|SUBJ|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|V|INF|3P|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|V|INF|3P|@ICL-P<": {POS: VERB},
"<mv>|<cjt>|V|INF|@<ADVL": {POS: VERB},
"<mv>|<cjt>|V|INF|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-<ACC": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-<SC": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-<SUBJ": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-APP": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-P<": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-SC>": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-STA": {POS: VERB},
"<mv>|<cjt>|V|INF|@ICL-SUBJ>": {POS: VERB},
"<mv>|<cjt>|V|MQP|1/3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|MQP|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PCP|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|P|@ICL-<OC": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|P|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|P|@ICL-N<": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|P|@ICL-N<PRED": {POS: ADJ},
"<mv>|<cjt>|V|PCP|F|S|@<SC": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|S|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|S|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|S|@ICL-N<": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|S|@ICL-UTT": {POS: VERB},
"<mv>|<cjt>|V|PCP|F|S|@N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|P|@ICL-<OC": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|P|@ICL-<PRED": {POS: ADJ},
"<mv>|<cjt>|V|PCP|M|P|@ICL-<SC": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|P|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|P|@ICL-N<": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|P|@N<": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|S|@ICL-<OC": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|S|@ICL-<SC": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|S|@ICL-AUX<": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|S|@ICL-N<": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|S|@ICL-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|S|@ICL-PRED>": {POS: ADJ},
"<mv>|<cjt>|V|PCP|M|S|@N<": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|S|@N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PCP|M|S|@PRED>": {POS: VERB},
"<mv>|<cjt>|V|PR|1/3S|SUBJ|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PR|1P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PR|1S|IND|@FS-<ADVL": {POS: AUX},
"<mv>|<cjt>|V|PR|1S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|V|PR|1S|IND|@FS-QUE": {POS: VERB},
"<mv>|<cjt>|V|PR|1S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PR|1|SUBJ|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PR|3P|IND|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|V|PR|3P|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|V|PR|3P|IND|@FS-N<": {POS: VERB},
"<mv>|<cjt>|V|PR|3P|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PR|3P|SUBJ|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|V|PR|3P|SUBJ|@FS-<ADVL": {POS: VERB},
"<mv>|<cjt>|V|PR|3P|SUBJ|@FS-N<": {POS: AUX},
"<mv>|<cjt>|V|PR|3S|IND|@ADVL>": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-<ADVL": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-<SUBJ": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-EXC": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-KOMP<": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-N<": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@N<": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|IND|@NPHR": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|SUBJ|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|SUBJ|@FS-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PR|3S|SUBJ|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PS/MQP|3P|IND|@P<": {POS: VERB},
"<mv>|<cjt>|V|PS|1P|IND|@FS-<SUBJ": {POS: VERB},
"<mv>|<cjt>|V|PS|1P|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|V|PS|1P|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PS|1P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PS|1S|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PS|1S|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PS|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<cjt>|V|PS|3P|IND|@N<": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-<ACC": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-<ADVL": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-<SUBJ": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-N<": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-P<": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-QUE": {POS: VERB},
"<mv>|<cjt>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<cjt>|PERS|M/F|3S|ACC|@<ACC-PASS": {POS: PRON},
"<mv>|<first-cjt>|<cjt>|V|INF|@ICL-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|<cjt>|V|PCP|M|S|@ICL-P<": {POS: VERB},
"<mv>|<first-cjt>|<cjt>|V|PR|1S|IND|@FS-QUE": {POS: VERB},
"<mv>|<first-cjt>|<cjt>|V|PR|3S|IND|@FS-KOMP<": {POS: VERB},
"<mv>|<first-cjt>|<cjt>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|<cjt>|V|IMPF|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|GER|@ICL-<ADVL": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|INF|@ICL-<ACC": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|INF|@ICL-P<": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|PR|3S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|PS/MQP|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|PS|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|<se-passive>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|GER|@ICL-<ADVL": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|IMPF|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|INF|1S|@ICL-P<": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|INF|@ICL-<SUBJ": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|INF|@ICL-P<": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PR|3S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PR|3S|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PR|3S|IND|@FS-P<": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PR|3S|SUBJ|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PS|2S|IND|@FS-QUE": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PS|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PS|3S|IND|@FS-<ACC": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PS|3S|IND|@FS-ACC>": {POS: VERB},
"<mv>|<first-cjt>|<hyphen>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|<n>|V|INF|3S|@ICL-P<": {POS: VERB},
"<mv>|<first-cjt>|<n>|V|INF|@ICL-<ACC": {POS: VERB},
"<mv>|<first-cjt>|<n>|V|PCP|F|P|@ICL-<ACC": {POS: VERB},
"<mv>|<first-cjt>|<n>|V|PCP|F|S|@ICL-APP": {POS: VERB},
"<mv>|<first-cjt>|<n>|V|PCP|M|P|@ICL-<ACC": {POS: VERB},
"<mv>|<first-cjt>|<n>|V|PCP|M|P|@ICL-KOMP<": {POS: VERB},
"<mv>|<first-cjt>|<n>|V|PCP|M|P|@ICL-P<": {POS: VERB},
"<mv>|<first-cjt>|<n>|V|PCP|M|S|@ICL-KOMP<": {POS: VERB},
"<mv>|<first-cjt>|<nil>|V|PCP|F|S|@ICL-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|<pc-ind>|V|PCP|@ICL-AUX<": {POS: VERB},
"<mv>|<first-cjt>|<se-passive>|V|INF|3S|@ICL-P<": {POS: VERB},
"<mv>|<first-cjt>|<se-passive>|V|PR|3S|IND|@FS-<ACC": {POS: VERB},
"<mv>|<first-cjt>|<se-passive>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|COND|3S|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|FUT|1/3S|SUBJ|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|FUT|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|FUT|3S|IND|@FS-N<": {POS: VERB},
"<mv>|<first-cjt>|V|FUT|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|FUT|3S|IND|@FS-UTT": {POS: VERB},
"<mv>|<first-cjt>|V|FUT|3S|SUBJ|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|FUT|3S|SUBJ|@FS-UTT": {POS: VERB},
"<mv>|<first-cjt>|V|GER|@ICL-<ACC": {POS: VERB},
"<mv>|<first-cjt>|V|GER|@ICL-<ADVL": {POS: VERB},
"<mv>|<first-cjt>|V|GER|@ICL-ADVL>": {POS: VERB},
"<mv>|<first-cjt>|V|GER|@ICL-AUX<": {POS: VERB},
"<mv>|<first-cjt>|V|GER|@ICL-N<": {POS: VERB},
"<mv>|<first-cjt>|V|GER|@ICL-PRED>": {POS: VERB},
"<mv>|<first-cjt>|V|GER|@ICL-STA": {POS: VERB},
"<mv>|<first-cjt>|V|GER|@ICL-UTT": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|1P|IND|@FS-ACC>": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|1P|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|1S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|3P|IND|@FS-P<": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|3P|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|3S|IND|@FS-<ACC": {POS: AUX},
"<mv>|<first-cjt>|V|IMPF|3S|IND|@FS-APP": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|3S|IND|@FS-N<": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|3S|IND|@FS-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|IMPF|3S|SUBJ|@FS-<ACC": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-<ACC": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-<PIV": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-<SC": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-<SUBJ": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-APP": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-AUX<": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-P<": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-QUE": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-SC>": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-SUBJ>": {POS: VERB},
"<mv>|<first-cjt>|V|INF|@ICL-UTT": {POS: VERB},
"<mv>|<first-cjt>|V|MQP|1/3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|MQP|1S|IND|@FS-STA": {POS: AUX},
"<mv>|<first-cjt>|V|MQP|3S|IND|@FS-STA": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|@ICL-AUX<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|P|@ICL-<SC": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|P|@ICL-AUX<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|P|@ICL-N<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|P|@ICL-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|S|@ICL-<SC": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|S|@ICL->N": {POS: ADJ},
"<mv>|<first-cjt>|V|PCP|F|S|@ICL-AUX<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|S|@ICL-N<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|S|@ICL-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|S|@ICL-PRED>": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|F|S|@ICL-UTT": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|P|@ICL-<SC": {POS: ADJ},
"<mv>|<first-cjt>|V|PCP|M|P|@ICL-AUX<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|P|@ICL-N<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|P|@ICL-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|P|@ICL-PRED>": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|S|@ICL-<OC": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|S|@ICL-<SC": {POS: ADJ},
"<mv>|<first-cjt>|V|PCP|M|S|@ICL-AUX<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|S|@ICL-N<": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|S|@ICL-N<PRED": {POS: VERB},
"<mv>|<first-cjt>|V|PCP|M|S|@ICL-PRED>": {POS: VERB},
| |
<gh_stars>1-10
from collections import deque
import pickle
class EpicTree:
"""Epic Tree module"""
# Constructor
def __init__(self, filename=''):
self.tree = {}
self.materialised_paths = []
self.garbage = []
# Load data file if provided
if filename != '':
self.tree = pickle.load(open(filename, "rb"))
# region Trees
def add_tree(self, tree_id):
"""Add Tree"""
if tree_id in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' already exists')
self.tree[tree_id] = {}
return
def remove_tree(self, tree_id):
"""Remove Tree"""
# Non-atomic function, so we use try..except
try:
del self.tree[tree_id]
except KeyError:
raise KeyError('Tree ' + str(tree_id) + ' does not exist')
# No GC as we killed the entire structure for the org
# Materialise
self.materialised_paths = [x for x in self.materialised_paths if not x.startswith(str(tree_id) + '/')]
return
def get_trees(self):
"""Get list of tree IDs"""
return [a for a, data in iter(self.tree.items())]
# endregion
# region Segments
def get_segments(self, tree_id):
"""Get the segments that belong to a tree"""
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
return [a for a, data in iter(self.tree[tree_id].items())]
def add_segment(self, tree_id, segment_id, root_node_id):
"""Segment adding"""
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
if segment_id in self.tree[tree_id]:
raise KeyError('Segment ' + str(segment_id) + ' already exists')
self.tree[tree_id][segment_id] = {root_node_id: (None, 'root', None, 1, None)}
self.materialised_paths.append(str(tree_id) + '/' + str(segment_id))
self.materialised_paths.append(str(tree_id) + '/' + str(segment_id) + '/' + str(root_node_id))
return
def remove_segment(self, tree_id, segment_id):
"""Segment removal"""
# Non-atomic function, so we use try..except
try:
root_node_id = self.get_segment_root_node(tree_id, segment_id)
del self.tree[tree_id][segment_id]
# GC (from root node)
self.garbage.append((tree_id, segment_id, root_node_id))
except KeyError:
raise KeyError('Segment ' + str(segment_id) + ' does not exist')
# Materialise
self.materialised_paths = [
x for x in self.materialised_paths
if not x.startswith(str(tree_id) + '/' + str(segment_id) + '/')
and x != str(tree_id) + '/' + str(segment_id)
]
return
def duplicate_segment(self, tree_id, from_segment_id, to_segment_id, segment_structure):
"""Segment duplication"""
if from_segment_id not in self.tree[tree_id]:
raise KeyError('Segment ' + from_segment_id + ' not found in tree when trying to duplicate it')
old_segment = self.tree[tree_id][from_segment_id]
self.tree[tree_id][to_segment_id] = old_segment
# TODO: Copy children! (segment_structure is a dict with hierarchical tree of new node ids)
# TODO: materialised path
return
def get_segment_root_node(self, tree_id, segment_id):
"""Find root node ID in segment"""
# TODO: Search materialised path first
# Does the segment exist?
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
if segment_id not in self.tree[tree_id]:
raise KeyError('Segment ' + str(segment_id) + ' not found')
# Search for the root
root_node_id = None
nodes = self.tree[tree_id][segment_id]
for node_id, node in iter(nodes.items()):
if node[1] == 'root':
root_node_id = node_id
break
return root_node_id
# endregion
# region Retrieval
def get_level(self, tree_id, segment_id, parent_node_id):
"""Get Level (children of a parent node)"""
# TODO: Search materialised path first
# Does the segment exist?
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
if segment_id not in self.tree[tree_id]:
raise KeyError('Segment ' + str(segment_id) + ' doesn\'t exist')
if parent_node_id not in self.tree[tree_id][segment_id]:
raise KeyError('Parent node ' + str(parent_node_id) + ' doesn\'t exist')
# Get the children IDs
parent_node = self.tree[tree_id][segment_id][parent_node_id]
children_ids = parent_node[4]
# Iterate through the list and build a nice array
results = []
if children_ids is not None:
for child_id in children_ids:
results.append({'id': child_id, 'child': self.tree[tree_id][segment_id][child_id]})
return results
def get_breadcrumbs(self, tree_id, segment_id, node_id):
"""Get Breadcrumbs (find ancestors)"""
# TODO: Search materialised path first
# Does the segment exist?
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
if segment_id not in self.tree[tree_id]:
raise KeyError('Segment ' + str(segment_id) + ' doesn\'t exist')
if node_id not in self.tree[tree_id][segment_id]:
raise KeyError('Node ' + str(node_id) + ' doesn\'t exist')
# Find node by backtracing
node = self.tree[tree_id][segment_id][node_id]
parent_node_id = node[0]
path = deque([node_id])
if parent_node_id is not None:
path.extendleft(reversed(self.get_breadcrumbs(tree_id, segment_id, parent_node_id)))
return list(path)
def get_tree_from_node(self, tree_id, segment_id, parent_node_id):
"""
Get tree (starting from a node) - sorted!
:param tree_id: int
:param segment_id: int
:param parent_node_id: int
:return: []
"""
# TODO: A lot of pressure is on you my little friend :) Too many functions depend on you!
return
def get_tree_from_segment(self, tree_id, segment_id):
"""
Get tree (full segment) - sorted!
:param tree_id: int
:param segment_id: int
:return: []
"""
# Does the segment exist?
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
if segment_id not in self.tree[tree_id]:
raise KeyError('Segment ' + str(segment_id) + ' doesn\'t exist')
root_node_id = self.get_segment_root_node(tree_id, segment_id)
return self.get_tree_from_node(tree_id, segment_id, root_node_id)
def get_tree_from_segments(self, tree_id, segment_ids):
"""Get tree (set of segments) - sorted!"""
# Does the segment exist?
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
for segment_id in segment_ids:
if segment_id not in self.tree[tree_id]:
raise KeyError('Segment ' + str(segment_id) + ' doesn\'t exist')
# Build structure (array containing trees representing each segment)
results = {}
for segment_id in segment_ids:
results[segment_id] = self.get_tree_from_segment(tree_id, segment_id)
return results
def get_tree(self, tree_id):
"""Get tree"""
# Does the segment exist?
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
segment_ids = self.get_segments(tree_id)
return self.get_tree_from_segments(tree_id, segment_ids)
def get_everything(self):
"""Get tree (everything)"""
tree_ids = self.get_trees()
results = {}
for tree_id in tree_ids:
results[tree_id] = self.get_tree(tree_id)
return results
# endregion
# region Directories
def add_directory(self, tree_id, segment_id, parent_node_id, node_id, sort, children):
"""Directory adding (optional sort which causes re-sorting, otherwise placed at end)"""
self.add_node(tree_id, segment_id, parent_node_id, node_id, sort, children, 'dir', None)
return
def remove_directory(self, tree_id, segment_id, node_id):
"""Directory removal"""
self.remove_node(tree_id, segment_id, node_id)
return
def duplicate_directory(self, tree_id, segment_id, node_id):
"""Directory duplication (re-sort only if duplicating in same level + materialise + copy all children)"""
# TODO
return
def move_directory(self, tree_id, segment_id, node_id, target_parent_id, sort):
"""Move directory to child, or another segment (add existing folder in another)"""
# Moving to another segment causes all children to be transported (with new ids)
# TODO
return
# endregion
# region Nodes
def add_node(self, tree_id, segment_id, parent_node_id, node_id, sort, children, node_type, payload):
"""Node adding (optional sort which causes re-sorting, otherwise placed at end)"""
# Does the segment exist?
if tree_id not in self.tree:
raise KeyError('Tree ' + str(tree_id) + ' doesn\'t exist')
if segment_id not in self.tree[tree_id]:
raise KeyError('Segment ' + str(tree_id) + ' doesn\'t exist')
# Get parent and level
re_sort = True
parent_node = self.tree[tree_id][segment_id][parent_node_id]
level_nodes = parent_node[4]
# If parent is not dir or root, don't allow addition
if parent_node[1] != 'root' and parent_node[1] != 'dir':
raise Exception('Can\'t add child node to non-directory or non-root node')
# If sort is set, but this is the first child in level, always use 1
if sort is not None:
if level_nodes is None or len(level_nodes) == 0:
sort = 1
re_sort = False
# If sort is set, but exceeds maximum current sort in level by more than 1 (or is equal), set to max + 1
if sort is not None:
if level_nodes is not None and len(level_nodes) > 1:
max_sort = self._get_max_sort_at_level(tree_id, segment_id, level_nodes)
if sort > (max_sort + 1) or sort == max_sort:
sort = max_sort + 1
re_sort = False
elif level_nodes is not None and len(level_nodes) == 1:
# New node is about to be added, we can assume first one has sort of 1
sort = 2
re_sort = False
# If sort is None, find greatest sort so far, add 1
if sort is None:
if level_nodes is None or len(level_nodes) == 0:
sort = 1
else:
max_sort = self._get_max_sort_at_level(tree_id, segment_id, level_nodes)
if max_sort is not None:
sort = max_sort + 1
else:
sort = 1
re_sort = False
# Add child
self.tree[tree_id][segment_id][node_id] = (parent_node_id, node_type, payload, sort, children)
if re_sort is True:
self._increment_sort_after_item(tree_id, segment_id, sort, parent_node_id, node_id)
# Add child to parent's list of children
parent_node = self.tree[tree_id][segment_id][parent_node_id]
new_children = parent_node[4]
if new_children is None:
new_children = []
new_children.append(node_id)
parent_node = (parent_node[0], parent_node[1], parent_node[2], parent_node[3], new_children)
self.tree[tree_id][segment_id][parent_node_id] = parent_node
| |
# Saves distribution and dependency for particular dimension
distributions.append(distribution)
dependencies.append(dependency)
# Add fit inspection data for current dimension
self.multiple_fit_inspection_data.append(fit_inspection_data)
# Save the used number of intervals
for dep_index, dep in enumerate(dependency):
if dep is not None:
self.dist_descriptions[dep]['used_number_of_intervals'] = \
used_number_of_intervals[dep_index]
# Add used number of intervals for dimensions with no dependency
for fit_inspection_data in self.multiple_fit_inspection_data:
if not fit_inspection_data.used_number_of_intervals:
fit_inspection_data.used_number_of_intervals = 1
# Save multivariate distribution
self.mul_var_dist = MultivariateDistribution(distributions, dependencies)
@staticmethod
def _fit_distribution(sample, name, fixed_parameters=(None, None, None, None)):
"""
Fits the distribution and returns the parameters.
Parameters
----------
sample : list of float
Raw data the distribution is fitted on.
name : str
Name of the distribution ("Weibull_2p", "Weibull_3p", "Lognormal" or
"Lognormal_SigmaMu", "Normal", "KernelDensity"). They keyword list
is defined in settings.py.
fixed_parameters : tuple of float
Specifies which value parameters are fixed and thus are not
fitted. None means that it is not fixed, but shall be estimated.
Returns
-------
tuple of ConstantParam
The computed parameters in the order of (shape, loc, scale, shape2).
Raises
------
ValueError
If the distribution is unknown.
"""
if fixed_parameters != (None, None, None, None) and \
name != WEIBULL_EXP_KEYWORD:
err_msg = "Fixing parameters is not implemented for the " \
"distribution {} yet.".format(name)
raise NotImplementedError(err_msg)
if name == WEIBULL_2P_KEYWORD:
# Do not fit the location parameter because it is 0 for a 2-p. dist.
params = sts.weibull_min.fit(sample, floc=0)
elif name == WEIBULL_3P_KEYWORD or \
name == WEIBULL_3P_KEYWORD_ALTERNATIVE:
params = sts.weibull_min.fit(sample)
if params[1] < 0:
warnings.warn('The estimated location parameter of a translated '
'Weibull distribution was negative ({}). However, '
'as this is likely unphysical and could lead to '
'problems with conditonal variables, the '
'location parameter is set to 0.'.format(params[1]),
RuntimeWarning, stacklevel=2)
params = (params[0], 0, params[2])
elif name == WEIBULL_EXP_KEYWORD:
dist = ExponentiatedWeibullDistribution()
params = dist.fit(sample, shape=fixed_parameters[0],
scale=fixed_parameters[1],
loc=fixed_parameters[2],
shape2=fixed_parameters[3])
elif name == NORMAL_KEYWORD:
params = list(sts.norm.fit(sample))
# Shape doesn't exist for normal
params.insert(0, 0)
elif name == LOGNORMAL_EXPMU_PARAMETER_KEYWORD or \
name == LOGNORMAL_MU_PARAMETER_KEYWORD:
# For lognormal loc is set to 0
params = sts.lognorm.fit(sample, floc=0)
elif name == 'KernelDensity':
dens = sm.nonparametric.KDEUnivariate(sample)
dens.fit(gridsize=2000)
# Kernel density doesn't have shape, loc, scale
return (dens.cdf, dens.icdf)
else:
err_msg = "Distribution '{}' is unknown.".format(name)
raise ValueError(err_msg)
if len(params) == 3:
constant_params = (ConstantParam(params[0]),
ConstantParam(params[1]),
ConstantParam(params[2]),
ConstantParam(None))
elif len(params) == 4:
constant_params = (ConstantParam(params[0]),
ConstantParam(params[1]),
ConstantParam(params[2]),
ConstantParam(params[3]))
else:
err_msg = "params must have a length of 4, but was '{}'."\
.format(len(params))
raise ValueError(err_msg)
return constant_params
@staticmethod
def _get_function(function_name):
"""
Returns the function.
Parameters
----------
function_name : str
Options are 'power3', 'exp3', 'lnsquare2', 'powerdecrease3',
'asymdecrease3', 'logistics4', 'alpha3'.
Returns
-------
func
The actual function named function_name.
Raises
------
ValueError
If the function is unknown.
"""
if function_name == 'power3':
return _power3
elif function_name == 'exp3':
return _exp3
elif function_name == 'lnsquare2':
return _lnsquare2
elif function_name == 'powerdecrease3':
return _powerdecrease3
elif function_name == 'asymdecrease3':
return _asymdecrease3
elif function_name == 'logistics4':
return _logistics4
elif function_name == 'alpha3':
return _alpha3
elif function_name is None:
return None
else:
err_msg = "Function '{}' is unknown.".format(function_name)
raise ValueError(err_msg)
@staticmethod
def _append_params(name, param_values, dependency, index, sample, fixed_parameters=(None, None, None, None)):
"""
Distributions are being fitted and the results are appended to param_points.
Parameters
----------
name : str
Name of distribution (e.g. 'Weibull_2p' or 'Lognormal').
param_values : list of list,
Contains lists that contain values for each param : order (shape, loc, scale).
dependency : list of int
Length of 3 in the order (shape, loc, scale) contains :
None -> no dependency
int -> depends on particular dimension
index : int
The current parameter as int in the order of (shape, loc, scale) (i.e. 0 -> shape).
sample : list of float
Values that are used to fit the distribution.
fixed_parameters : tuple of float
Specifies which value parameters are fixed and thus are not
fitted. None means that it is not fixed, but shall be estimated.
Returns
-------
BasicFit
The information of this single fit.
"""
# Fit distribution
current_params = Fit._fit_distribution(sample, name, fixed_parameters=fixed_parameters)
# Create basic fit object
basic_fit = BasicFit(*current_params, sample)
for i in range(index, len(dependency)):
# Check if there is a dependency and whether it is the right one
if dependency[i] is not None and \
dependency[i] == dependency[index]:
# Calculated parameter is appended to param_values
param_values[i].append(current_params[i])
return basic_fit
@staticmethod
def _get_fitting_values(sample, samples, name, dependency, index,
number_of_intervals=None, bin_width=None,
min_datapoints_for_fit=20,
fixed_parameters=(None, None, None, None)):
"""
Returns values for fitting.
Parameters
----------
sample : list of float
The current sample to fit.
samples : list of list
List that contains data to be fitted : samples[0] -> first variable (i.e. wave height)
samples[1] -> second variable
...
name : str
Name of distribution (e.g. 'Weibull_2p' or 'Lognormal').
dependency : list of int
Length of 3 in the order (shape, loc, scale) contains :
None -> no dependency
int -> depends on particular dimension
index : int
Order : (shape, loc, scale) (i.e. 0 -> shape).
number_of_intervals : int
Number of distributions used to fit shape, loc, scale.
min_datapoints_for_fit : int
Minimum number of datapoints required to perform the fit.
fixed_parameters : tuple of float
Specifies which value parameters are fixed and thus are not
fitted. None means that it is not fixed, but shall be estimated.
Notes
-----
For that case that number_of_intervals and also bin_width is given the parameter
number_of_intervals is used.
Returns
-------
interval_centers : ndarray
Array with length of the number of bins that contains the centers of the
calculated bins.
dist_values : list of list
List with length of the number of intervals that contains for each bin center
the used samples for the current fit.
param_values : list of list
List with length of three that contains for each parameter (shape, loc, scale)
a list with length of the number of bins that contains the calculated parameters.
multiple_basic_fit : list of BasicFit
Contains information for each fit.
Raises
------
RuntimeError
If the parameter number_of_intervals or bin_width was not specified.
RuntimeError
If there was not enough data and the number of intervals was less than three.
"""
# Compute intervals.
if number_of_intervals:
interval_centers, interval_width = np.linspace(
min(samples[dependency[index]]), max(samples[dependency[index]]),
num=number_of_intervals, endpoint=False, retstep=True)
interval_centers += 0.5 * interval_width
elif bin_width:
interval_width = bin_width
interval_centers = np.arange(
0.5 * interval_width,
max(samples[dependency[index]]) + 0.5 * interval_width,
interval_width)
else:
raise RuntimeError(
"Either the parameters number_of_intervals or bin_width has to be specified, "
"otherwise the intervals are not specified. Exiting.")
# Sort samples.
samples = np.stack((sample, samples[dependency[index]])).T
sort_indice = np.argsort(samples[:, 1])
sorted_samples = samples[sort_indice]
# Return values.
param_values = [[], [], []]
dist_values = []
# List of all basic fits.
multiple_basic_fit = []
# Deleted interval_centers by index.
deleted_centers = []
# Define the data interval that is used for the fit.
for i, step in enumerate(interval_centers):
mask = ((sorted_samples[:, 1] >= step - 0.5 * interval_width) &
(sorted_samples[:, 1] < step + 0.5 * interval_width))
samples_in_interval = sorted_samples[mask, 0]
if len(samples_in_interval) >= min_datapoints_for_fit:
try:
# Fit distribution to selected data.
basic_fit = Fit._append_params(name,
param_values,
dependency,
index,
samples_in_interval,
fixed_parameters=fixed_parameters)
multiple_basic_fit.append(basic_fit)
dist_values.append(samples_in_interval)
except ValueError:
deleted_centers.append(i) # Add index of unused center.
warnings.warn(
"A ValueError occured for the interval centered at '{}'"
" in dimension '{}'."
.format(step, dependency[index]),
RuntimeWarning, stacklevel=2)
else:
# For case that too few fitting data for the step were found
# the step is deleted.
deleted_centers.append(i) # Add index of unused center.
# Delete interval centers that were not used.
interval_centers = np.delete(interval_centers, deleted_centers)
if len(interval_centers) < 3:
nr_of_intervals = str(len(interval_centers))
raise RuntimeError("Your settings resulted in " + nr_of_intervals +
" intervals. However, at least 3 intervals are "
"required. Consider changing the required "
" minimum of datapoints within an interval using "
"the 'min_datapoints_for_fit' key.")
return interval_centers, dist_values, param_values, multiple_basic_fit
def _get_distribution(self, dimension, samples, **kwargs):
"""
Returns the fitted distribution, the dependency and information to
visualize all fits for this | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from decimal import (Decimal,
ROUND_HALF_UP)
import uuid
import pytz
from .expression import (FALSE,
TRUE)
from .java_variables import (JAVA_MAX_FLOAT,
JAVA_MIN_FLOAT)
from ..types.type import TypeID
class Literals(object):
EPOCH = datetime.datetime.utcfromtimestamp(0)
EPOCH_DAY = EPOCH.date()
@staticmethod
def from_(value): # noqa: C901
if value is None:
raise RuntimeError("Cannot create an expression literal from None")
if isinstance(value, bool):
return BooleanLiteral(value)
elif isinstance(value, int):
if Literal.JAVA_MIN_INT < value < Literal.JAVA_MAX_INT:
return IntegerLiteral(value)
return LongLiteral(value)
elif isinstance(value, float):
if Literal.JAVA_MIN_FLOAT < value < Literal.JAVA_MAX_FLOAT:
return FloatLiteral(value)
return DoubleLiteral(value)
elif isinstance(value, str):
return StringLiteral(value)
elif isinstance(value, uuid.UUID):
return UUIDLiteral(value)
elif isinstance(value, bytearray):
return BinaryLiteral(value)
elif isinstance(value, bytes):
return FixedLiteral(value)
elif isinstance(value, Decimal):
return DecimalLiteral(value)
else:
raise RuntimeError("Unimplemented Type Literal")
@staticmethod
def above_max():
return ABOVE_MAX
@staticmethod
def below_min():
return BELOW_MIN
class Literal(object):
JAVA_MAX_INT = 2147483647
JAVA_MIN_INT = -2147483648
JAVA_MAX_FLOAT = 3.4028235E38
JAVA_MIN_FLOAT = -3.4028235E38
@staticmethod
def of(value): # noqa: C901
if isinstance(value, bool):
return BooleanLiteral(value)
elif isinstance(value, int):
if value < Literal.JAVA_MIN_INT or value > Literal.JAVA_MAX_INT:
return LongLiteral(value)
return IntegerLiteral(value)
elif isinstance(value, float):
if value < Literal.JAVA_MIN_FLOAT or value > Literal.JAVA_MAX_FLOAT:
return DoubleLiteral(value)
return FloatLiteral(value)
elif isinstance(value, str):
return StringLiteral(value)
elif isinstance(value, uuid.UUID):
return UUIDLiteral(value)
elif isinstance(value, bytes):
return FixedLiteral(value)
elif isinstance(value, bytearray):
return BinaryLiteral(value)
elif isinstance(value, Decimal):
return DecimalLiteral(value)
def to(self, type):
raise NotImplementedError()
class BaseLiteral(Literal):
def __init__(self, value):
self.value = value
def to(self, type):
raise NotImplementedError()
def __eq__(self, other):
if id(self) == id(other):
return True
elif other is None or not isinstance(other, BaseLiteral):
return False
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "BaseLiteral(%s)" % str(self.value)
def __str__(self):
return str(self.value)
class ComparableLiteral(BaseLiteral):
def __init__(self, value):
super(ComparableLiteral, self).__init__(value)
def to(self, type):
raise NotImplementedError()
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if self.value is None:
return True
if other is None or other.value is None:
return False
return self.value < other.value
def __gt__(self, other):
if self.value is None:
return False
if other is None or other.value is None:
return True
return self.value > other.value
def __le__(self, other):
if self.value is None:
return True
if other is None or other.value is None:
return False
return self.value <= other.value
def __ge__(self, other):
if self.value is None:
return False
if other is None or other.value is None:
return True
return self.value >= other.value
class AboveMax(Literal):
def __init__(self):
super(AboveMax, self).__init__()
def value(self):
raise RuntimeError("AboveMax has no value")
def to(self, type):
raise RuntimeError("Cannot change the type of AboveMax")
def __str__(self):
return "aboveMax"
class BelowMin(Literal):
def __init__(self):
super(BelowMin, self).__init__()
def value(self):
raise RuntimeError("BelowMin has no value")
def to(self, type):
raise RuntimeError("Cannot change the type of BelowMin")
def __str__(self):
return "belowMin"
class BooleanLiteral(ComparableLiteral):
def __init__(self, value):
super(BooleanLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.BOOLEAN:
return self
class IntegerLiteral(ComparableLiteral):
def __init__(self, value):
super(IntegerLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.INTEGER:
return self
elif type_var.type_id == TypeID.LONG:
return LongLiteral(self.value)
elif type_var.type_id == TypeID.FLOAT:
return FloatLiteral(float(self.value))
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(float(self.value))
elif type_var.type_id == TypeID.DATE:
return DateLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class LongLiteral(ComparableLiteral):
def __init__(self, value):
super(LongLiteral, self).__init__(value)
def to(self, type_var): # noqa: C901
if type_var.type_id == TypeID.INTEGER:
if Literal.JAVA_MAX_INT < self.value:
return ABOVE_MAX
elif Literal.JAVA_MIN_INT > self.value:
return BELOW_MIN
return IntegerLiteral(self.value)
elif type_var.type_id == TypeID.LONG:
return self
elif type_var.type_id == TypeID.FLOAT:
return FloatLiteral(float(self.value))
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(float(self.value))
elif type_var.type_id == TypeID.TIME:
return TimeLiteral(self.value)
elif type_var.type_id == TypeID.TIMESTAMP:
return TimestampLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class FloatLiteral(ComparableLiteral):
def __init__(self, value):
super(FloatLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FLOAT:
return self
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class DoubleLiteral(ComparableLiteral):
def __init__(self, value):
super(DoubleLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FLOAT:
if JAVA_MAX_FLOAT < self.value:
return ABOVE_MAX
elif JAVA_MIN_FLOAT > self.value:
return BELOW_MIN
return FloatLiteral(self.value)
elif type_var.type_id == TypeID.DOUBLE:
return self
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class DateLiteral(ComparableLiteral):
def __init__(self, value):
super(DateLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.DATE:
return self
class TimeLiteral(ComparableLiteral):
def __init__(self, value):
super(TimeLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.TIME:
return self
class TimestampLiteral(ComparableLiteral):
def __init__(self, value):
super(TimestampLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.TIMESTAMP:
return self
elif type_var.type_id == TypeID.DATE:
return DateLiteral((datetime.datetime.fromtimestamp(self.value / 1000000) - Literals.EPOCH).days)
class DecimalLiteral(ComparableLiteral):
def __init__(self, value):
super(DecimalLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.DECIMAL and type_var.scale == abs(self.value.as_tuple().exponent):
return self
class StringLiteral(BaseLiteral):
def __init__(self, value):
super(StringLiteral, self).__init__(value)
def to(self, type_var): # noqa: C901
import dateutil.parser
if type_var.type_id == TypeID.DATE:
return DateLiteral((dateutil.parser.parse(self.value) - Literals.EPOCH).days)
elif type_var.type_id == TypeID.TIME:
return TimeLiteral(
int((dateutil.parser.parse(Literals.EPOCH.strftime("%Y-%m-%d ") + self.value) - Literals.EPOCH)
.total_seconds() * 1000000))
elif type_var.type_id == TypeID.TIMESTAMP:
timestamp = dateutil.parser.parse(self.value)
EPOCH = Literals.EPOCH
if bool(timestamp.tzinfo) != bool(type_var.adjust_to_utc):
raise RuntimeError("Cannot convert to %s when string is: %s" % (type_var, self.value))
if timestamp.tzinfo is not None:
EPOCH = EPOCH.replace(tzinfo=pytz.UTC)
return TimestampLiteral(int((timestamp - EPOCH).total_seconds() * 1000000))
elif type_var.type_id == TypeID.STRING:
return self
elif type_var.type_id == TypeID.UUID:
return UUIDLiteral(uuid.UUID(self.value))
elif type_var.type_id == TypeID.DECIMAL:
dec_val = Decimal(str(self.value))
if abs(dec_val.as_tuple().exponent) == type_var.scale:
if type_var.scale == 0:
return DecimalLiteral(Decimal(str(self.value))
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(str(self.value))
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
def __eq__(self, other):
if id(self) == id(other):
return True
if other is None or not isinstance(other, StringLiteral):
return False
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other is None:
return True
return self.value >= other.value
def __str__(self):
return '"' + self.value + '"'
class UUIDLiteral(ComparableLiteral):
def __init__(self, value):
super(UUIDLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.UUID:
return self
class FixedLiteral(BaseLiteral):
def __init__(self, value):
super(FixedLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FIXED:
if len(self.value) == type_var.length:
return self
elif type_var.type_id == TypeID.BINARY:
return BinaryLiteral(self.value)
def write_replace(self):
return FixedLiteralProxy(self.value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other is None:
return True
return self.value >= other.value
class BinaryLiteral(BaseLiteral):
def __init__(self, value):
super(BinaryLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FIXED:
if type_var.length == len(self.value):
return FixedLiteral(self.value)
return None
elif type_var.type_id == TypeID.BINARY:
return self
def write_replace(self):
return BinaryLiteralProxy(self.value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other | |
# Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.core.types import FunctionType, \
FunctionArgument, \
FunctionArgumentValues, \
FunctionReturnTypes, \
FunctionSignature, \
FunctionDoc, \
FunctionOOP, \
FunctionOOPField, \
CompoundOOPData, \
FunctionData, \
CompoundFunctionData
DUMP_PARTIAL = [
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="addPedClothes",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='addClothes',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='clothesTexture',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='clothesModel',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='clothesType',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to set the current clothes on a ped.' ,
arguments={
"thePed": """: The ped whose clothes you want to change. """,
"clothesTexture": """: A string determining the clothes texture that will be added. See the CJ Clothes|clothes catalog. """,
"clothesModel": """: A string determining the clothes model that will be added. See the CJ Clothes|clothes catalog. """,
"clothesType": """: A integer representing the clothes slot/type the clothes should be added to. See the CJ Clothes|clothes catalog. """
},
result='this function returns true if the clothes were successfully added to the ped, false otherwise.' ,
),
url='addPedClothes',
),
field=None,
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="addPedClothes",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='addClothes',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='clothesTexture',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='clothesModel',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='clothesType',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to set the current clothes on a ped.' ,
arguments={
"thePed": """: The ped whose clothes you want to change. """,
"clothesTexture": """: A string determining the clothes texture that will be added. See the CJ Clothes|clothes catalog. """,
"clothesModel": """: A string determining the clothes model that will be added. See the CJ Clothes|clothes catalog. """,
"clothesType": """: A integer representing the clothes slot/type the clothes should be added to. See the CJ Clothes|clothes catalog. """
},
result='this function returns true if the clothes were successfully added to the ped, false otherwise.' ,
),
url='addPedClothes',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="canPedBeKnockedOffBike",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='canBeKnockedOffBike',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function checks if the given ped can fall off bikes.' ,
arguments={
"thePed": """the ped you want to check. """
},
result='returns true if the ped can be knocked off bikes, false if he cannot or an invalid element was passed.' ,
),
url='canPedBeKnockedOffBike',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="getPedAmmoInClip",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getAmmoInClip',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='weaponSlot',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='current',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns an integer that contains the ammo in a specified peds weapon. See weapon|Weapon Info' ,
arguments={
"thePed": """The ped whose ammo you want to check. """,
"weaponSlot": """an integer representing the weapon slot (set to the peds currently selected slot if not specified). """
},
result='returns an int containing the amount of ammo in the specified peds currently selected or specified clip, or 0 if the ped specified is invalid.' ,
),
url='getPedAmmoInClip',
),
field=FunctionOOPField(
name='ammoInClip',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedAmmoInClip",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getAmmoInClip',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='weaponSlot',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='current',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns an integer that contains the ammo in a specified peds weapon. See weapon|Weapon Info' ,
arguments={
"thePed": """The ped whose ammo you want to check. """,
"weaponSlot": """an integer representing the weapon slot (set to the peds currently selected slot if not specified). """
},
result='returns an int containing the amount of ammo in the specified peds currently selected or specified clip, or 0 if the ped specified is invalid.' ,
),
url='getPedAmmoInClip',
),
field=FunctionOOPField(
name='ammoInClip',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedAnimation",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getAnimation',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
),
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Gets the animation of a player or ped that was set using setPedAnimation.' ,
arguments={
"thePed": """the player or ped you want to get the animations|animation of. """
},
result='<syntaxhighlight lang=lua>string anim, string block, int time, bool loop, bool updateposition, bool interruptable, bool freezelastframe, int blendtime, bool restoretaskonanimend</syntaxhighlight>' ,
),
url='getPedAnimation',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
FunctionOOP(
description=None,
base_function_name="getPedArmor",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getArmor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the current armor of the specified ped.' ,
arguments={
"thePed": """The ped whose armor you want to check """
},
result='a float with the armor, false if an invalid ped was given.' ,
),
url='getPedArmor',
),
field=FunctionOOPField(
name='armor',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedArmor",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getArmor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the current armor of the specified ped.' ,
arguments={
"thePed": """The ped whose armor you want to check """
},
result='a float with the armor, false if an invalid ped was given.' ,
),
url='getPedArmor',
),
field=FunctionOOPField(
name='armor',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedBonePosition",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getBonePosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='bone',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Returns the 3D world coordinates of a specific bone of a given ped.' ,
arguments={
"thePed": """the ped you want to inspect. """,
"bone": """the number of the bone to get the position of.
<div style="border: 3px red solid; margin-bottom:3px; padding-left:5px;"> """,
"1": """BONE_PELVIS1 """,
"2": """BONE_PELVIS """,
"3": """BONE_SPINE1 """,
"4": """BONE_UPPERTORSO """,
"5": """BONE_NECK """,
"6": """BONE_HEAD2 """,
"7": """BONE_HEAD1 """,
"8": """BONE_HEAD """,
"21": """BONE_RIGHTUPPERTORSO """,
"22": """BONE_RIGHTSHOULDER """,
"23": """BONE_RIGHTELBOW """,
"24": """BONE_RIGHTWRIST """,
"25": """BONE_RIGHTHAND """,
"26": """BONE_RIGHTTHUMB """,
"31": """BONE_LEFTUPPERTORSO """,
"32": """BONE_LEFTSHOULDER """,
"33": """BONE_LEFTELBOW """,
"34": """BONE_LEFTWRIST """,
"35": """BONE_LEFTHAND """,
"36": """BONE_LEFTTHUMB """,
"41": """BONE_LEFTHIP """,
"42": """BONE_LEFTKNEE """,
"43": """BONE_LEFTANKLE """,
"44": """BONE_LEFTFOOT """,
"51": """BONE_RIGHTHIP """,
"52": """BONE_RIGHTKNEE """,
"53": """BONE_RIGHTANKLE """,
"54": """BONE_RIGHTFOOT
</div> """
},
result='returns the x, y, z world position of the bone.' ,
),
url='getPedBonePosition',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="getPedCameraRotation",
class_name='ped',
method=FunctionData(
signature=FunctionSignature(
name='getCameraRotation',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePed',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
| |
<gh_stars>10-100
"""
This module uses the h5py (HDF5) package to read and write numpy
arrays to disk.
See the h5py and HDF5 documentation for further info on the details of
this approach.
"""
# pylint: disable=no-member
import h5py as h5
import numpy as np
from . import base
from ..data import base_legacy as db
from ..data import time as dt
import copy
from six import string_types
import sys
from .. import _version as _ver
import warnings
try:
# There is an organizational inconsistenty in different versions of h5py.
h5_group = h5.Group
except AttributeError:
# This is going away, but just in case we're using a version that doesn't have the above, here is this
h5_group = h5.highlevel.Group
if sys.version_info >= (3, 0):
import pickle as pkl
def pkl_loads(s):
try:
return pkl.loads(s)
except UnicodeDecodeError:
return pkl.loads(s, encoding='bytes')
else: # Python 2
input = raw_input # pylint: disable=undefined-variable
import cPickle as pkl # pylint: disable=undefined-variable, import-error
def pkl_loads(s):
return pkl.loads(s)
class Saver(base.DataFactory):
"""
A save data_factory object. This class saves data in DOLFYN
classes into DOLFYN format hdf5 files.
This function should not be used explicitly, instead use the
:meth:`main.Saveable.save` method of the data object.
Parameters
----------
filename : string
Name of fale to save to.
mode : string
File access mode. Should be 'w' (default) or 'a'.
where : string
Location in hdf5 file to save the data (default: '/')
max_file_size : int
option does not currently work.
See also:
- file
- h5py
"""
ver = _ver.version_info
fletcher32 = True
complib = 'gzip'
complevel = 2
shuffle = True
split_groups_into_files = False
# Right now, this isn't working, I think it is a bug in h5py.
# Perhaps later this will work, and it should be pretty
# transparent.
def __init__(self, filename, mode='w', where='/', max_file_size_mb=None):
self.file_mode = mode
# This does an 'expanduser' on the filename (i.e. '~/'
# replaced with '/home/<username>/').
self.filename = filename
kwargs = {}
if max_file_size_mb is not None:
kwargs['driver'] = 'family'
kwargs['memb_size'] = max_file_size_mb * (2 ** 20)
# Need to modify the filename to include a %d character.
self.fd = h5.File(self.filename, mode=self.file_mode, **kwargs)
self.close = self.fd.close
self.node = self.fd.get(where)
self.node.attrs.create(b'DataSaveVersion',
pkl.dumps(_ver.ver2tuple(self.ver)))
self._extrafiles = []
def get_group(self, where=None, nosplit=False):
"""
An internal function for returning the current, or a specified
node in the hdf5 file.
Return the h5py node at location `where`, `where` can be:
- a string indicating a location in the hdf5 file,
- a node in the hdf5 file (returns this node)
- None, in which case, the current value of self.node is returned.
"""
if where is None:
return self.node
elif where.__class__ is h5.Group:
return where
elif isinstance(where, string_types):
if self.split_groups_into_files and where != '/' and not nosplit:
if self.fd.get(where, None) is None:
fname = copy.copy(self.fd.filename)
grpname = where.split('/')[-1]
if fname.endswith('.h5'):
fname = fname[:-2] + grpname + '.h5'
else:
fname += '.' + grpname
thisgroup_file = h5.File(fname, mode=self.file_mode)
thisgroup_file.create_group('data')
thisgroup_file.close()
self.fd[where] = h5.ExternalLink(fname, '/data')
return self.fd.get(where)
else:
return self.fd.get(where)
else:
return self.fd.require_group(where)
else:
raise Exception('Not a valid group specification')
def write_type(self, obj, where=None):
"""
Write the type of the object being saved to the hdf5 file.
This allows for automatic loading of a file into a specific
DOLfYN class.
When a file is loaded, a dict of string:class (key:val) pairs
should be provided to the 'loader' data_factory. The string
(key) that matches the value of the file's '_object_type'
attribute is chosen. The corresponding class (value) is then
instanced and data is loaded into it according to the DOLfYN
specification.
This function writes the '_object_type' from the current
DOLfYN instance to the file so that it can be loaded later.
See also:
- loader.read_type
- get_typemap
"""
self.get_group(where).attrs.create('_object_type', str(obj.__class__))
if isinstance(obj, db.config) and obj.config_type not in [None, '*UNKNOWN*']:
self.get_group(where).attrs.create('_config_type', obj.config_type.encode('ascii'))
def write_dict(self, name, dct, where='/'):
"""
This is a method for writing simple dictionaries.
It writes the dictionary as attributes in a group. The keys
are written as attribute names (only string keys are allowed).
The values are pickled and written to the attribute value.
Parameters
----------
name : string
name of the dictionary to be written (hdf5 group to create).
dct : dict
The dictionary who's data should be saved.
where : string
The location to write `dct` (a Group named `name` will be
created at this location).
"""
tmp = self.get_group(where).require_group(name)
for ky, val in list(dct.items()):
tmp.attrs.create(ky, pkl.dumps(val))
def write(self, obj, where='/', nosplit_file=False, **kwargs):
"""
Write data in object `obj` to the file at location `where`.
`obj` should be a DOLfYN type object; a subclass of the
Dgroups class.
Parameters
----------
obj - data_boject
The object to write to the file.
where - string
The location in the file to write the data in the object
(default: '/')
nosplit_file - bool
Currently non-functional, for writing data to multiple files.
"""
nd = self.get_group(where)
self.write_type(obj, nd) # Write the data type.
if hasattr(obj, 'props'):
self.write_dict('##properties##', obj.props, nd)
# Write the 'props' attribute, if the data has one.
if hasattr(obj, '_units'):
self.write_dict('##units##', obj._units, nd)
# Write the 'units' property if the data has it
# (this has been deprecated in the DOLfYN standard,
# in favor of meta arrays).
# iterate over the group names:
for grp_nm, dat_nms in list(obj.groups.items()):
grp = self.get_group(where + '/' + grp_nm, nosplit=nosplit_file)
# Create or get the group specified.
for ky in dat_nms: # Iterate over the data names in the group.
if not hasattr(obj, ky):
continue
val = getattr(obj, ky)
if isinstance(val, np.ndarray) and val.dtype.name.startswith('unicode'):
val = val.astype('S')
if db.Dgroups in val.__class__.__mro__:
self.write(val,
where + '/' + grp_nm + '/_' + ky,
nosplit_file=True)
elif isinstance(val, np.ndarray) and len(val) > 0:
nd = grp.create_dataset(str(ky),
data=val,
compression=self.complib,
shuffle=self.shuffle,
fletcher32=self.fletcher32,)
for kw, d in list(kwargs.items()):
if ky in d:
nd.attrs.create(str(kw), d[ky])
if val.__class__ is dt.time_array:
nd.attrs.create('time_var', 'True')
if db.ma.valid and val.__class__ is db.ma.marray:
nd = grp.get(str(ky))
# print( 'writing meta data for %s' % ky )
for nm, val in list(val.meta.__dict__.items()):
if nm not in ['xformat', 'yformat']:
# print( nm,val )
nd.attrs.create(nm, pkl.dumps(val))
elif val.__class__ is dict:
grp.attrs.create(ky, pkl.dumps(val))
else:
grp.attrs.create(ky, pkl.dumps(val))
# class UpdateTool(base.DataFactory):
# """
# A class for updating data files when the format specification
# changes.
# """
# def __init__(self, filename, )
# self.file_mode = mode
# This does an 'expanduser' on the filename (i.e. '~/'
# replaced with '/home/<username>/').
# self.filename = filename
# kwargs = {}
# if max_file_size_mb is not None:
# kwargs['driver'] = 'family'
# kwargs['memb_size'] = max_file_size_mb * (2 ** 20)
# Need to modify the filename to include a %d character.
# self.fd = h5.File(self.filename, mode=self.file_mode, **kwargs)
# self.close = self.fd.close
# self.node = self.fd.get('/')
# self.node.attrs.create('DataSaveVersion', pkl.dumps(self.ver))
# self._extrafiles = []
# def change_type_name(self, oldname, newname):
# self.get_group(where).attrs.create('_object_type',
# str(obj.__class__))
class Loader(base.DataFactory):
"""
A save data_factory object. This class saves data in DOLFYN
classes into DOLFYN format hdf5 files.
This function should not be used explicitly, instead use the
:meth:`main.Saveable.save` method of the data object.
Parameters
----------
filename : string
Name of fale to save to.
type_map : (dict, type)
A mapping of class strings to types (or a specific type) that
the data should be loaded into.
"""
def __init__(self, filename, type_map,):
self.filename = filename
self.fd = h5.File(self.filename, mode='r+')
# Open the file r+ so that we can modify it on the
# fly if necessary (e.g. _fix_name)
self.close = self.fd.close
self.type_map = type_map
self.ver = _ver.ver2tuple(pkl.loads(
self.fd.attrs.get(b'DataSaveVersion', b'I0\n.')))
def get_group(self, where=None):
"""
If `where` is:
- None: return the current node.
- string: return the node at that address.
- otherwise return `where` itself.
"""
if where is None:
return self.node
elif isinstance(where, string_types):
return self.fd.get(where, None)
else:
return where
def get_name(self, node):
"""
Return the name of the `node`.
"""
return node.name.split('/')[-1]
def iter_data(self, groups=None, where='/'):
"""
Iterate over data nodes in `groups`.
See iter_groups for more info on how to specify groups.
"""
for grp in self.iter_groups(groups=groups, where=where):
for nd in list(grp.values()):
yield nd
def iter(self, groups=None, where='/'):
"""
Iterate over data nodes in `groups`, with the group name returned.
See iter_groups for more info on how to | |
<filename>galois/_fields/_linalg.py<gh_stars>0
"""
A module that contains linear algebra routines over Galois fields.
"""
import numpy as np
from ._dtypes import DTYPES
def _lapack_linalg(a, b, function, out=None, n_sum=None):
"""
In prime fields GF(p), it's much more efficient to use LAPACK/BLAS implementations of linear algebra
and then reduce modulo p rather than compute manually.
"""
assert type(a).is_prime_field
field = type(a)
characteristic = field.characteristic
# Determine the return data-type which is the minimum of the two inputs' data-types
if np.object_ in [a.dtype, b.dtype]:
return_dtype = np.object_
else:
return_dtype = a.dtype if np.iinfo(a.dtype).max < np.iinfo(b.dtype).max else b.dtype
a = a.view(np.ndarray)
b = b.view(np.ndarray)
# Determine the minimum dtype to hold the entire product and summation without overflowing
if n_sum is None:
n_sum = 1 if len(a.shape) == 0 else max(a.shape)
max_value = n_sum * (characteristic - 1)**2
dtypes = [dtype for dtype in DTYPES if np.iinfo(dtype).max >= max_value]
dtype = np.object_ if len(dtypes) == 0 else dtypes[0]
a = a.astype(dtype)
b = b.astype(dtype)
# Compute result using native numpy LAPACK/BLAS implementation
if function in [np.inner, np.vdot]:
# These functions don't have and `out` keyword argument
c = function(a, b)
else:
c = function(a, b, out=out)
c = c % characteristic # Reduce the result mod p
if np.isscalar(c):
# TODO: Sometimes the scalar c is a float?
c = field(int(c), dtype=return_dtype)
else:
c = c.astype(return_dtype).view(field)
return c
###############################################################################
# Matrix/vector products
###############################################################################
def dot(a, b, out=None): # pylint: disable=unused-argument
"""
https://numpy.org/doc/stable/reference/generated/numpy.dot.html
"""
if not type(a) is type(b):
raise TypeError(f"Operation 'dot' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")
if type(a).is_prime_field:
return _lapack_linalg(a, b, np.dot, out=out)
if a.ndim == 0 or b.ndim == 0:
return a * b
elif a.ndim == 1 and b.ndim == 1:
return np.sum(a * b)
elif a.ndim == 2 and b.ndim == 2:
return np.matmul(a, b, out=out)
elif a.ndim >= 2 and b.ndim == 1:
return np.sum(a * b, axis=-1, out=out)
# elif a.dnim >= 2 and b.ndim >= 2:
else:
raise NotImplementedError("Currently 'dot' is only supported up to 2-D matrices. Please open a GitHub issue at https://github.com/mhostetter/galois/issues.")
def vdot(a, b):
"""
https://numpy.org/doc/stable/reference/generated/numpy.vdot.html
"""
if not type(a) is type(b):
raise TypeError(f"Operation 'vdot' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")
if type(a).is_prime_field:
return _lapack_linalg(a, b, np.vdot)
a = a.flatten()
b = b.flatten().reshape(a.shape) # This is done to mimic numpy's error scenarios
return np.sum(a * b)
def inner(a, b):
"""
https://numpy.org/doc/stable/reference/generated/numpy.inner.html#numpy.inner
"""
if not type(a) is type(b):
raise TypeError(f"Operation 'inner' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")
if type(a).is_prime_field:
return _lapack_linalg(a, b, np.inner)
if a.ndim == 0 or b.ndim == 0:
return a * b
if not a.shape[-1] == b.shape[-1]:
raise ValueError(f"Operation 'inner' requires `a` and `b` to have the same last dimension, not {a.shape} and {b.shape}.")
return np.sum(a * b, axis=-1)
def outer(a, b, out=None): # pylint: disable=unused-argument
"""
https://numpy.org/doc/stable/reference/generated/numpy.outer.html#numpy.outer
"""
if not type(a) is type(b):
raise TypeError(f"Operation 'outer' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")
if type(a).is_prime_field:
return _lapack_linalg(a, b, np.outer, out=out, n_sum=1)
else:
return np.multiply.outer(a.ravel(), b.ravel(), out=out)
###############################################################################
# Matrix decomposition routines
###############################################################################
def row_reduce(A, ncols=None):
if not A.ndim == 2:
raise ValueError(f"Only 2-D matrices can be converted to reduced row echelon form, not {A.ndim}-D.")
ncols = A.shape[1] if ncols is None else ncols
A_rre = A.copy()
p = 0 # The pivot
for j in range(ncols):
# Find a pivot in column `j` at or below row `p`
idxs = np.nonzero(A_rre[p:,j])[0]
if idxs.size == 0:
continue
i = p + idxs[0] # Row with a pivot
# Swap row `p` and `i`. The pivot is now located at row `p`.
A_rre[[p,i],:] = A_rre[[i,p],:]
# Force pivot value to be 1
A_rre[p,:] /= A_rre[p,j]
# Force zeros above and below the pivot
idxs = np.nonzero(A_rre[:,j])[0].tolist()
idxs.remove(p)
A_rre[idxs,:] -= np.multiply.outer(A_rre[idxs,j], A_rre[p,:])
p += 1
if p == A_rre.shape[0]:
break
return A_rre, p
def lu_decompose(A):
if not A.ndim == 2:
raise ValueError(f"Argument `A` must be a 2-D matrix, not have shape {A.shape}.")
field = type(A)
n = A.shape[0]
Ai = A.copy()
L = field.Identity(n)
for i in range(0, n-1):
if Ai[i,i] == 0:
idxs = np.nonzero(Ai[i:,i])[0] # The first non-zero entry in column `i` below row `i`
if idxs.size == 0: # pylint: disable=no-else-continue
L[i,i] = 1
continue
else:
raise ValueError("The LU decomposition of `A` does not exist. Use the LUP decomposition instead.")
l = Ai[i+1:,i] / Ai[i,i]
Ai[i+1:,:] -= np.multiply.outer(l, Ai[i,:])
L[i+1:,i] = l
U = Ai
return L, U
def plu_decompose(A):
if not A.ndim == 2:
raise ValueError(f"Argument `A` must be a 2-D matrix, not have shape {A.shape}.")
field = type(A)
n = A.shape[0]
Ai = A.copy()
L = field.Zeros((n,n))
P = field.Identity(n) # Row permutation matrix
N_permutations = 0 # Number of permutations
for i in range(0, n-1):
if Ai[i,i] == 0:
idxs = np.nonzero(Ai[i:,i])[0] # The first non-zero entry in column `i` below row `i`
if idxs.size == 0:
L[i,i] = 1
continue
j = i + idxs[0]
# Swap rows `i` and `j`
P[[i,j],:] = P[[j,i],:]
Ai[[i,j],:] = Ai[[j,i],:]
L[[i,j],:] = L[[j,i],:]
N_permutations += 1
l = Ai[i+1:,i] / Ai[i,i]
Ai[i+1:,:] -= np.multiply.outer(l, Ai[i,:]) # Zero out rows below row `i`
L[i,i] = 1 # Set 1 on the diagonal
L[i+1:,i] = l
L[-1,-1] = 1 # Set the final diagonal to 1
U = Ai
# NOTE: Return column permutation matrix
return P.T, L, U, N_permutations
###############################################################################
# Matrix inversions, solutions, rank, etc
###############################################################################
def matrix_rank(A):
A_rre, _ = row_reduce(A)
rank = np.sum(~np.all(A_rre == 0, axis=1))
return rank
def inv(A):
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise np.linalg.LinAlgError(f"Argument `A` must be square, not {A.shape}.")
field = type(A)
n = A.shape[0]
I = field.Identity(n, dtype=A.dtype)
# Concatenate A and I to get the matrix AI = [A | I]
AI = np.concatenate((A, I), axis=-1)
# Perform Gaussian elimination to get the reduced row echelon form AI_rre = [I | A^-1]
AI_rre, _ = row_reduce(AI, ncols=n)
# The rank is the number of non-zero rows of the row reduced echelon form
rank = np.sum(~np.all(AI_rre[:,0:n] == 0, axis=1))
if not rank == n:
raise np.linalg.LinAlgError(f"Argument `A` is singular and not invertible because it does not have full rank of {n}, but rank of {rank}.")
A_inv = AI_rre[:,-n:]
return A_inv
def triangular_det(A):
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise np.linalg.LinAlgError(f"Argument `A` must be square, not {A.shape}.")
idxs = np.arange(0, A.shape[0])
return np.multiply.reduce(A[idxs,idxs])
def det(A):
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise np.linalg.LinAlgError(f"Argument `A` must be square, not {A.shape}.")
field = type(A)
n = A.shape[0]
if n == 2:
return A[0,0]*A[1,1] - A[0,1]*A[1,0]
elif n == 3:
return A[0,0]*(A[1,1]*A[2,2] - A[1,2]*A[2,1]) - A[0,1]*(A[1,0]*A[2,2] - A[1,2]*A[2,0]) + A[0,2]*(A[1,0]*A[2,1] - A[1,1]*A[2,0])
else:
P, L, U, N_permutations = plu_decompose(A)
P = P.T # Convert row permutation matrix into column permutation matrix
det_P = (-field(1)) ** N_permutations
det_L = triangular_det(L)
det_U = triangular_det(U)
return det_P * det_L * det_U
def solve(A, b):
if not type(A) is type(b):
raise TypeError(f"Arguments `A` and `b` must be of the same Galois field array class, not {type(A)} and {type(b)}.")
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise np.linalg.LinAlgError(f"Argument `A` must be square, not {A.shape}.")
if not b.ndim in [1, 2]:
raise np.linalg.LinAlgError(f"Argument `b` must have dimension equal to A or one less, not {b.ndim}.")
if not A.shape[-1] == b.shape[0]:
raise np.linalg.LinAlgError(f"The last dimension of `A` must equal the first dimension of `b`, not {A.shape} and {b.shape}.")
A_inv = inv(A)
x = A_inv @ b
return x
def row_space(A):
"""
R(A) = C(A^T)
"""
if not A.ndim == 2:
raise ValueError(f"Only 2-D matrices have a row space, not {A.ndim}-D.")
A_rre, _ = row_reduce(A)
rank = np.sum(~np.all(A_rre == 0, axis=1))
R = A_rre[0:rank,:]
return R
def column_space(A):
"""
C(A) = R(A^T)
"""
if not A.ndim == 2:
raise ValueError(f"Only 2-D matrices have a column space, not {A.ndim}-D.")
return row_space(A.T)
def left_null_space(A):
"""
x = LN(A) = | |
<filename>mars/dataframe/base/cut.py
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from numbers import Integral
import numpy as np
import pandas as pd
from ... import opcodes as OperandDef
from ...core import ENTITY_TYPE, ExecutableTuple, OutputType, \
recursive_tile
from ...core.context import get_context
from ...serialization.serializables import KeyField, AnyField, \
BoolField, Int32Field, StringField
from ...tensor import tensor as astensor
from ...tensor.core import TENSOR_TYPE, TensorOrder
from ...utils import has_unknown_shape
from ..core import SERIES_TYPE, INDEX_TYPE
from ..datasource.index import from_pandas as asindex
from ..initializer import Series as asseries
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import parse_index
class DataFrameCut(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.CUT
_input = KeyField('input')
_bins = AnyField('bins')
_right = BoolField('right')
_labels = AnyField('labels')
_retbins = BoolField('retbins')
_precision = Int32Field('precision')
_include_lowest = BoolField('include_lowest')
_duplicates = StringField('duplicates')
def __init__(self, bins=None, right=None, labels=None, retbins=None,
precision=None, include_lowest=None, duplicates=None, **kw):
super().__init__(_bins=bins, _right=right, _labels=labels,
_retbins=retbins, _precision=precision,
_include_lowest=include_lowest, _duplicates=duplicates, **kw)
@property
def input(self):
return self._input
@property
def bins(self):
return self._bins
@property
def right(self):
return self._right
@property
def labels(self):
return self._labels
@property
def retbins(self):
return self._retbins
@property
def precision(self):
return self._precision
@property
def include_lowest(self):
return self._include_lowest
@property
def duplicates(self):
return self._duplicates
@property
def output_limit(self):
return 1 if not self._retbins else 2
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs)
self._input = next(inputs_iter)
if isinstance(self._bins, ENTITY_TYPE):
self._bins = next(inputs_iter)
if isinstance(self._labels, ENTITY_TYPE):
self._labels = next(inputs_iter)
def __call__(self, x):
if isinstance(x, pd.Series):
x = asseries(x)
elif not isinstance(x, ENTITY_TYPE):
x = astensor(x)
if x.ndim != 1:
raise ValueError('Input array must be 1 dimensional')
if x.size == 0:
raise ValueError('Cannot cut empty array')
inputs = [x]
if self._labels is not None and \
not isinstance(self._labels, (bool, ENTITY_TYPE)):
self._labels = np.asarray(self._labels)
# infer dtype
x_empty = pd.Series([1], dtype=x.dtype) if isinstance(x, SERIES_TYPE) else \
np.asarray([1], dtype=x.dtype)
if isinstance(self._bins, INDEX_TYPE):
bins = self._bins.index_value.to_pandas()
inputs.append(self._bins)
bins_unknown = True
elif isinstance(self._bins, ENTITY_TYPE):
bins = np.asarray([2], dtype=self._bins.dtype)
inputs.append(self._bins)
bins_unknown = True
else:
bins = self._bins
bins_unknown = isinstance(self._bins, Integral)
if isinstance(self._labels, ENTITY_TYPE):
bins_unknown = True
labels = None
inputs.append(self._labels)
else:
if self._labels is False or not bins_unknown:
labels = self._labels
else:
labels = None
ret = pd.cut(x_empty, bins, right=self._right, labels=labels,
retbins=True, include_lowest=self._include_lowest,
duplicates=self._duplicates)
kws = []
output_types = []
if bins_unknown and isinstance(ret[0].dtype, pd.CategoricalDtype):
# inaccurate dtype, just create an empty one
out_dtype = pd.CategoricalDtype()
else:
out_dtype = ret[0].dtype
if isinstance(ret[0], pd.Series):
output_types.append(OutputType.series)
kws.append({
'dtype': out_dtype,
'shape': x.shape,
'index_value': x.index_value,
'name': x.name
})
elif isinstance(ret[0], np.ndarray):
output_types.append(OutputType.tensor)
kws.append({
'dtype': out_dtype,
'shape': x.shape,
'order': TensorOrder.C_ORDER
})
else:
assert isinstance(ret[0], pd.Categorical)
output_types.append(OutputType.categorical)
kws.append({
'dtype': out_dtype,
'shape': x.shape,
'categories_value': parse_index(out_dtype.categories,
store_data=True)
})
if self._retbins:
if isinstance(self._bins, (pd.IntervalIndex, INDEX_TYPE)):
output_types.append(OutputType.index)
kws.append({
'dtype': self._bins.dtype,
'shape': self._bins.shape,
'index_value': self._bins.index_value
if isinstance(self._bins, INDEX_TYPE) else
parse_index(self._bins, store_data=False),
'name': self._bins.name
})
else:
output_types.append(OutputType.tensor)
kws.append({
'dtype': ret[1].dtype,
'shape': ret[1].shape if ret[1].size > 0 else (np.nan,),
'order': TensorOrder.C_ORDER
})
self.output_types = output_types
return ExecutableTuple(self.new_tileables(inputs, kws=kws))
@classmethod
def tile(cls, op):
if isinstance(op.bins, ENTITY_TYPE):
# check op.bins chunk shapes
if has_unknown_shape(op.bins):
yield
bins = yield from recursive_tile(
op.bins.rechunk(op.bins.shape))
else:
bins = op.bins
if isinstance(op.labels, ENTITY_TYPE):
# check op.labels chunk shapes
if has_unknown_shape(op.labels):
yield
labels = yield from recursive_tile(
op.labels.rechunk(op.labels.shape))
else:
labels = op.labels
if isinstance(op.bins, Integral):
input_min, input_max = yield from recursive_tile(
op.input.min(), op.input.max())
input_min_chunk = input_min.chunks[0]
input_max_chunk = input_max.chunks[0]
# let input min and max execute first
yield [input_min_chunk, input_max_chunk]
ctx = get_context()
keys = [input_min_chunk.key, input_max_chunk.key]
# get min and max of x
min_val, max_val = ctx.get_chunks_result(keys)
# calculate bins
if np.isinf(min_val) or np.isinf(max_val):
raise ValueError('cannot specify integer `bins` '
'when input data contains infinity')
elif min_val == max_val: # adjust end points before binning
min_val -= 0.001 * abs(min_val) if min_val != 0 else 0.001
max_val += 0.001 * abs(max_val) if max_val != 0 else 0.001
bins = np.linspace(min_val, max_val, bins + 1, endpoint=True)
else: # adjust end points before binning
bins = np.linspace(min_val, max_val, bins + 1, endpoint=True)
adj = (max_val - min_val) * 0.001 # 0.1% of the range
if op.right:
bins[0] -= adj
else:
bins[-1] += adj
outs = op.outputs
out_chunks = []
for c in op.input.chunks:
chunk_op = op.copy().reset_key()
chunk_inputs = [c]
chunk_op._bins = bins
# do not return bins always for chunk
chunk_op._retbins = False
if isinstance(bins, ENTITY_TYPE):
chunk_inputs.append(bins.chunks[0])
chunk_op._labels = labels
if isinstance(labels, ENTITY_TYPE):
chunk_inputs.append(labels.chunks[0])
chunk_kws = []
if isinstance(outs[0], SERIES_TYPE):
chunk_kws.append({
'dtype': outs[0].dtype,
'shape': c.shape,
'index_value': c.index_value,
'name': c.name,
'index': c.index,
})
elif isinstance(outs[0], TENSOR_TYPE):
chunk_kws.append({
'dtype': outs[0].dtype,
'shape': c.shape,
'order': TensorOrder.C_ORDER,
'index': c.index,
})
else:
chunk_kws.append({
'dtype': outs[0].dtype,
'shape': c.shape,
'categories_value': outs[0].categories_value,
'index': c.index,
})
out_chunks.append(chunk_op.new_chunk(chunk_inputs, kws=chunk_kws))
kws = []
out_kw = outs[0].params
out_kw['chunks'] = out_chunks
out_kw['nsplits'] = op.input.nsplits
kws.append(out_kw)
if len(outs) == 2:
bins_kw = outs[1].params
bins_kw['chunks'] = bins_chunks = []
if isinstance(bins, ENTITY_TYPE):
bins_chunks.append(bins.chunks[0])
else:
if op.duplicates == 'drop':
if isinstance(bins, (np.ndarray, list, tuple)):
bins = np.unique(bins)
else:
bins = bins.unique()
bins = bins.astype(outs[1].dtype, copy=False)
convert = \
astensor if not isinstance(bins, pd.IntervalIndex) else asindex
converted = yield from recursive_tile(
convert(bins, chunk_size=len(bins)))
bins_chunks.append(converted.chunks[0])
bins_kw['nsplits'] = ((len(bins),),)
kws.append(bins_kw)
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=kws)
@classmethod
def execute(cls, ctx, op):
x = ctx[op.input.key]
bins = ctx[op.bins.key] if isinstance(op.bins, ENTITY_TYPE) else op.bins
labels = ctx[op.labels.key] if isinstance(op.labels, ENTITY_TYPE) else op.labels
cut = partial(pd.cut, right=op.right, retbins=op.retbins, precision=op.precision,
include_lowest=op.include_lowest, duplicates=op.duplicates)
try:
ret = cut(x, bins, labels=labels)
except ValueError:
# fail due to buffer source array is read-only
ret = cut(x.copy(), bins, labels=labels)
if op.retbins: # pragma: no cover
ctx[op.outputs[0].key] = ret[0]
ctx[op.outputs[1].key] = ret[1]
else:
ctx[op.outputs[0].key] = ret
def cut(x, bins, right: bool = True, labels=None, retbins: bool = False,
precision: int = 3, include_lowest: bool = False, duplicates: str = 'raise'):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used. Note that
IntervalIndex for `bins` must be non-overlapping.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or False, default None
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex. If True,
raises an error.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
Returns
-------
| |
np.zeros(im_size, dtype=np.uint8)
for i in range(nb_patterns):
label = (i + 1)
if len(im_size) == 2:
atlas = draw_rand_ellipse(atlas, color=label)
elif len(im_size) == 3:
atlas = draw_rand_ellipsoid(atlas, clr=label)
# logging.debug(type(atlas))
atlas_def = image_deform_elastic(atlas)
# logging.debug(np.unique(atlas))
export_image(out_dir, atlas_def, 'atlas')
# in case run in DEBUG show atlas and wait till close
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug('labels: %r', np.unique(atlas_def))
atlas_show = atlas_def if atlas_def.ndim == 2 else atlas_def[int(atlas_def.shape[0] / 2)]
plt.imshow(atlas_show)
plt.show()
atlas_new, imgs_patterns = atlas_filter_larges_components(atlas_def)
plt.imsave(os.path.join(path_out, 'atlas_rgb.png'), atlas_new, cmap=plt.cm.jet)
export_image(out_dir, atlas_new, 'atlas', stretch_range=False)
for i, img in enumerate(imgs_patterns):
export_image(out_dir, img, i, temp_img_name)
return imgs_patterns
def dictionary_generate_rnd_pattern(
path_out=None,
dir_name=DIR_NAME_DICTIONARY,
nb_patterns=NB_BIN_PATTERNS,
im_size=IMAGE_SIZE_2D,
temp_img_name=IMAGE_PATTERN,
rand_seed=None
):
""" generate pattern dictionary and allow overlapping
:param str path_out: path to the results directory
:param str dir_name: name of the folder
:param str temp_img_name: use template for pattern names
:param int nb_patterns: number of patterns / labels
:param tuple(int,int) im_size: image size
:param rand_seed: random initialization
:return ndarray: [np.array<height, width>] list of independent patters in the dict.
>>> p_dir = 'sample_rnd_pattern'
>>> os.mkdir(p_dir)
>>> _list_img_paths = dictionary_generate_rnd_pattern(
... nb_patterns=3, im_size=(10, 8), path_out=p_dir, rand_seed=0)
>>> len(_list_img_paths)
3
>>> _list_img_paths[1]
array([[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 255, 0],
[ 0, 0, 0, 0, 0, 0, 255, 0],
[ 0, 0, 0, 0, 0, 0, 255, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> import shutil
>>> shutil.rmtree(p_dir, ignore_errors=True)
"""
logging.info('generate Dict. composed from %i patterns and img. size %r', nb_patterns, im_size)
if path_out is not None:
path_out = os.path.join(path_out, dir_name)
create_clean_folder(path_out)
list_imgs = []
for i in range(nb_patterns):
im = draw_rand_ellipse(np.zeros(im_size, dtype=np.uint8), rand_seed=rand_seed)
im = image_deform_elastic(im, rand_seed=rand_seed)
list_imgs.append(im)
if path_out is not None:
export_image(path_out, im, i, temp_img_name)
return list_imgs
def generate_rand_patterns_occlusion(idx, im_ptns, out_dir=None, ptn_ration=RND_PATTERN_OCCLUSION, rand_seed=None):
""" generate the new sample from list of pattern with specific ration
:param int idx: index
:param list(ndarray) im_ptns: images with patterns
:param str out_dir: name of directory
:param float ptn_ration: number in range (0, 1)
:param rand_seed: random initialization
:return tuple(int,ndarray,str,list(int)):
>>> img1 = np.zeros((6, 15), dtype=int)
>>> img1[2:5, 5:10] = 1
>>> img2 = np.zeros((6, 15), dtype=int)
>>> img2[3:6, 2:13] = 1
>>> idx, im, im_name, ptn_weights = generate_rand_patterns_occlusion(0, [img1, img2],
... rand_seed=0)
>>> idx
0
>>> im
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
>>> im_name
'sample_00000'
>>> ptn_weights
[0, 1]
"""
# reinit seed to have random samples even in the same time
np.random.seed(rand_seed)
bool_combine = np.random.random(len(im_ptns)) < ptn_ration
# if there is non above threshold select one random
if not any(bool_combine):
bool_combine[np.random.randint(0, len(bool_combine))] = True
logging.debug('combination vector is %r', bool_combine.tolist())
im = sum(np.asarray(im_ptns)[bool_combine])
# convert sum to union such as all above 0 set as 1
im[im > 0.] = 1
im = im.astype(im_ptns[0].dtype)
im_name = SEGM_PATTERN.format(idx)
if out_dir is not None and os.path.exists(out_dir):
export_image(out_dir, im, idx)
ptn_weights = [int(x) for x in bool_combine]
return idx, im, im_name, ptn_weights
def dataset_binary_combine_patterns(
im_ptns,
out_dir=None,
nb_samples=NB_SAMPLES,
ptn_ration=RND_PATTERN_OCCLUSION,
nb_workers=NB_WORKERS,
rand_seed=None
):
""" generate a Binary dataset composed from N samples and given ration
of pattern occlusion
:param list(ndarray) im_ptns: [np.array<height, width>] list of ind. patters in the dictionary
:param str out_dir: path to the results directory
:param int nb_samples: number of samples in dataset
:param float ptn_ration: ration of how many patterns are used to create
an input observation / image
:param int nb_workers: number of running jobs
:param rand_seed: random initialization
:return tuple(ndarray,DF): [np.array<height, width>], df<nb_imgs, nb_lbs>
>>> img1 = np.zeros((6, 15), dtype=int)
>>> img1[2:5, 5:10] = 1
>>> img2 = np.zeros((6, 15), dtype=int)
>>> img2[3:6, 2:13] = 1
>>> im_spls, df_weights = dataset_binary_combine_patterns([img1, img2],
... nb_samples=5, rand_seed=0)
>>> len(im_spls)
5
>>> im_spls[1]
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
>>> df_weights # doctest: +NORMALIZE_WHITESPACE
ptn_01 ptn_02
image
sample_00000 0 1
sample_00001 0 1
sample_00002 0 1
sample_00003 0 1
sample_00004 0 1
"""
logging.info(
'generate a Binary dataset composed from %i samples and ration pattern occlusion %f', nb_samples, ptn_ration
)
if out_dir is not None:
create_clean_folder(out_dir)
im_spls = [None] * nb_samples
im_names = [None] * nb_samples
im_weights = [None] * nb_samples
logging.debug('running in %i threads...', nb_workers)
_wrapper_generate = partial(
generate_rand_patterns_occlusion, im_ptns=im_ptns, out_dir=out_dir, ptn_ration=ptn_ration, rand_seed=rand_seed
)
for idx, im, im_name, ptn_weights in WrapExecuteSequence(_wrapper_generate, range(nb_samples), nb_workers):
im_spls[idx] = im
im_names[idx] = im_name
im_weights[idx] = ptn_weights
df_weights = format_table_weights(im_names, im_weights)
logging.debug(df_weights.head())
return im_spls, df_weights
def format_table_weights(list_names, list_weights, index_name='image', col_name=COLUMN_NAME):
""" format the output table with patterns
:param list_names:
:param list_weights:
:return:
>>> df = format_table_weights(['aaa', 'bbb', 'ccc'], [[0, 1], [1, 0]])
>>> df # doctest: +NORMALIZE_WHITESPACE
ptn_01 ptn_02
image
aaa 0 1
bbb 1 0
"""
nb = min(len(list_names), len(list_weights))
df = pd.DataFrame(data=list_weights[:nb], index=list_names[:nb])
df.columns = [col_name.format(i + 1) for i in range(len(df.columns))]
df.index.name = index_name
df.sort_index(inplace=True)
return df
def add_image_binary_noise(im, ration=0.1, rand_seed=None):
""" generate and add a binary noise to an image
:param ndarray im: np.array<height, width> input binary image
:param float ration: number (0, 1) means 0 = no noise
:param rand_seed: random initialization
:return ndarray: np.array<height, width> binary image
>>> img = np.zeros((5, 15), dtype=int)
>>> img[1:4, 3:7] = 1
>>> add_image_binary_noise(img, ration=0.1, rand_seed=0)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0]], dtype=int16)
"""
logging.debug('... add random noise to a binary image')
np.random.seed(rand_seed)
rnd = np.random.random(im.shape)
rnd = np.array(rnd < ration, dtype=np.int16)
im_noise = np.abs(np.asanyarray(im, dtype=np.int16) - rnd)
# plt.subplot(1,3,1), plt.imshow(im)
# plt.subplot(1,3,2), plt.imshow(rnd)
# plt.subplot(1,3,3), plt.imshow(im - rnd)
# plt.show()
return np.array(im_noise, dtype=np.int16)
def export_image(path_out, img, im_name, name_template=SEGM_PATTERN, stretch_range=True, nifti=False):
""" export an image with given path and optional pattern for image name
:param str path_out: path to the results directory
:param ndarray img: image np.array<height, width>
:param str/int im_name: image nea of index to be place to patterns name
:param str name_template: str, while the name is not string generate image according
specific pattern, like format fn
:return str: path to the image
:param bool stretch_range: whether stretch intensity values
Image - PNG
>>> np.random.seed(0)
>>> img = np.random.random([5, 10])
>>> path_img = export_image('.', img, 'testing-image')
>>> path_img
'./testing-image.png'
>>> os.path.exists(path_img)
True
>>> name, im = load_image(path_img)
>>> im.shape
(5, 10)
>>> np.round(im.astype(float), 1).tolist() # doctest: +NORMALIZE_WHITESPACE
[[0.6, 0.7, 0.6, 0.6, 0.4, 0.7, 0.4, 0.9, 1.0, 0.4],
[0.8, 0.5, 0.6, 0.9, 0.1, 0.1, 0.0, 0.8, | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0293014,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.3036,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0502308,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0810205,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0408964,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.172148,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0574506,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 3.93511,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00210691,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0152359,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0155819,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0152359,
'Execution Unit/Register Files/Runtime Dynamic': 0.0176888,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0320977,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.0904574,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.88836,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000515972,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000515972,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000454699,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000178913,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000223835,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00171048,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00475818,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0149792,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 0.952807,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.068342,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0508763,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.21756,
'Instruction Fetch Unit/Runtime Dynamic': 0.140666,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0255279,
'L2/Runtime Dynamic': 0.00850176,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.49977,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.139694,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.00849739,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.00849749,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.5399,
'Load Store Unit/Runtime Dynamic': 0.190098,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0209531,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0419067,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.00743633,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.00781969,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.059242,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.011204,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.228125,
'Memory Management Unit/Runtime Dynamic': 0.0190236,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 12.5357,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00226628,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0258581,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
<gh_stars>1-10
# pylint: disable-msg=E1101,W0612
import operator
import nose # noqa
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, bdate_range, Panel
from pandas.tseries.index import DatetimeIndex
import pandas.core.datetools as datetools
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas import compat
import pandas.sparse.frame as spf
from pandas._sparse import BlockIndex, IntIndex
from pandas.sparse.api import SparseSeries, SparseDataFrame
from pandas.tests.frame.test_misc_api import SharedWithSparse
class TestSparseDataFrame(tm.TestCase, SharedWithSparse):
klass = SparseDataFrame
_multiprocess_can_split_ = True
def setUp(self):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.orig = pd.DataFrame(self.data, index=self.dates)
self.iorig = pd.DataFrame(self.data, index=self.dates)
self.frame = SparseDataFrame(self.data, index=self.dates)
self.iframe = SparseDataFrame(self.data, index=self.dates,
default_kind='integer')
values = self.frame.values.copy()
values[np.isnan(values)] = 0
self.zorig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'],
index=self.dates)
self.zframe = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=0, index=self.dates)
values = self.frame.values.copy()
values[np.isnan(values)] = 2
self.fill_orig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'],
index=self.dates)
self.fill_frame = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=self.dates)
self.empty = SparseDataFrame()
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = SparseDataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_sp_frame_equal(res, exp)
def test_as_matrix(self):
empty = self.empty.as_matrix()
self.assertEqual(empty.shape, (0, 0))
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.as_matrix()
self.assertEqual(mat.shape, (10, 0))
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.as_matrix()
self.assertEqual(mat.shape, (0, 10))
def test_copy(self):
cp = self.frame.copy()
tm.assertIsInstance(cp, SparseDataFrame)
tm.assert_sp_frame_equal(cp, self.frame)
# as of v0.15.0
# this is now identical (but not is_a )
self.assertTrue(cp.index.identical(self.frame.index))
def test_constructor(self):
for col, series in compat.iteritems(self.frame):
tm.assertIsInstance(series, SparseSeries)
tm.assertIsInstance(self.iframe['A'].sp_index, IntIndex)
# constructed zframe from matrix above
self.assertEqual(self.zframe['A'].fill_value, 0)
tm.assert_almost_equal([0, 0, 0, 0, 1, 2, 3, 4, 5, 6],
self.zframe['A'].values)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
tm.assertIsInstance(series, SparseSeries)
# construct from nested dict
data = {}
for c, s in compat.iteritems(self.frame):
data[c] = s.to_dict()
sdf = SparseDataFrame(data)
tm.assert_sp_frame_equal(sdf, self.frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = self.frame.index[:5]
cons = SparseDataFrame(
self.frame, index=idx, columns=self.frame.columns,
default_fill_value=self.frame.default_fill_value,
default_kind=self.frame.default_kind, copy=True)
reindexed = self.frame.reindex(idx)
tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
self.assertRaises(TypeError, self.frame.reindex, idx, level=0)
repr(self.frame)
def test_constructor_ndarray(self):
# no index or columns
sp = SparseDataFrame(self.frame.values)
# 1d
sp = SparseDataFrame(self.data['A'], index=self.dates, columns=['A'])
tm.assert_sp_frame_equal(sp, self.frame.reindex(columns=['A']))
# raise on level argument
self.assertRaises(TypeError, self.frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
with tm.assertRaisesRegexp(ValueError, "^Index length"):
SparseDataFrame(self.frame.values, index=self.frame.index[:-1])
with tm.assertRaisesRegexp(ValueError, "^Column length"):
SparseDataFrame(self.frame.values, columns=self.frame.columns[:-1])
# GH 9272
def test_constructor_empty(self):
sp = SparseDataFrame()
self.assertEqual(len(sp.index), 0)
self.assertEqual(len(sp.columns), 0)
def test_constructor_dataframe(self):
dense = self.frame.to_dense()
sp = SparseDataFrame(dense)
tm.assert_sp_frame_equal(sp, self.frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
self.assertTrue(sdf[0].index is sdf[1].index)
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
tm.assertIsInstance(x, SparseSeries)
df = SparseDataFrame(x)
tm.assertIsInstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
x2 = x.astype(float)
x2.ix[:9998] = np.NaN
# TODO: x_sparse is unused...fix
x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.ix[:9998] = 0
# TODO: y_sparse is unsused...fix
y_sparse = y.to_sparse(fill_value=0) # noqa
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.get_dtype_counts()
expected = Series({'float64': 4})
tm.assert_series_equal(result, expected)
def test_shape(self):
# GH 10452
self.assertEqual(self.frame.shape, (10, 4))
self.assertEqual(self.iframe.shape, (10, 4))
self.assertEqual(self.zframe.shape, (10, 4))
self.assertEqual(self.fill_frame.shape, (10, 4))
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self):
res = np.sqrt(self.frame)
dres = np.sqrt(self.frame.to_dense())
tm.assert_frame_equal(res.to_dense(), dres)
def test_pickle(self):
def _test_roundtrip(frame, orig):
result = self.round_trip_pickle(frame)
tm.assert_sp_frame_equal(frame, result)
tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)
_test_roundtrip(SparseDataFrame(), DataFrame())
self._check_all(_test_roundtrip)
def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
tm.assertIsInstance(sdf, SparseDataFrame)
self.assertTrue(np.isnan(sdf.default_fill_value))
tm.assertIsInstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
tm.assertIsInstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
self.assertEqual(sdf.default_fill_value, 0)
tm.assert_frame_equal(sdf.to_dense(), df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(df.density, 0.7)
df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})
self.assertEqual(df.density, 0.75)
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self):
self._check_frame_ops(self.frame)
def test_sparse_series_ops_i(self):
self._check_frame_ops(self.iframe)
def test_sparse_series_ops_z(self):
self._check_frame_ops(self.zframe)
def test_sparse_series_ops_fill(self):
self._check_frame_ops(self.fill_frame)
def _check_frame_ops(self, frame):
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
fill = sparse_result.default_fill_value
dense_result = dense_result.to_sparse(fill_value=fill)
tm.assert_sp_frame_equal(sparse_result, dense_result,
exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
tm.assertIsInstance(mixed_result, SparseDataFrame)
tm.assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']
ops = [getattr(operator, name) for name in opnames]
fidx = frame.index
# time series operations
series = [frame['A'], frame['B'], frame['C'], frame['D'],
frame['A'].reindex(fidx[:7]), frame['A'].reindex(fidx[::2]),
SparseSeries(
[], index=[])]
for op in opnames:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
frame[::2].to_dense(), getattr(operator, op))
# 2304, no auto-broadcasting
for i, s in enumerate(series):
f = lambda a, b: getattr(a, op)(b, axis='index')
_compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)
# rops are not implemented
# _compare_to_dense(s, frame, s.to_dense(),
# frame.to_dense(), f)
# cross-sectional operations
series = [frame.xs(fidx[0]), frame.xs(fidx[3]), frame.xs(fidx[5]),
frame.xs(fidx[7]), frame.xs(fidx[5])[:2]]
for op in ops:
for s in series:
_compare_to_dense(frame, s, frame.to_dense(), s, op)
_compare_to_dense(s, frame, s, frame.to_dense(), op)
# it works!
result = self.frame + self.frame.ix[:, ['A', 'B']] # noqa
def test_op_corners(self):
empty = self.empty + self.empty
self.assertTrue(empty.empty)
foo = self.frame + self.empty
tm.assertIsInstance(foo.index, DatetimeIndex)
tm.assert_frame_equal(foo, self.frame * np.nan)
foo = self.empty + self.frame
tm.assert_frame_equal(foo, self.frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])
result = sdf[['a', 'b']]
exp = sdf.reindex(columns=['a', 'b'])
tm.assert_sp_frame_equal(result, exp)
self.assertRaises(Exception, sdf.__getitem__, ['a', 'd'])
def test_icol(self):
# 10711 deprecated
# 2227
result = self.frame.iloc[:, 0]
self.assertTrue(isinstance(result, SparseSeries))
tm.assert_sp_series_equal(result, self.frame['A'])
# preserve sparse index type. #2251
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
self.assertEqual(type(iframe['A'].sp_index),
type(iframe.iloc[:, 0].sp_index))
def test_set_value(self):
# ok as the index gets conver to object
frame = self.frame.copy()
res = frame.set_value('foobar', 'B', 1.5)
self.assertEqual(res.index.dtype, 'object')
res = self.frame
res.index = res.index.astype(object)
res = self.frame.set_value('foobar', 'B', 1.5)
self.assertIsNot(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 1.5)
res2 = res.set_value('foobar', 'qux', 1.5)
self.assertIsNot(res2, res)
self.assert_numpy_array_equal(res2.columns,
list(self.frame.columns) + ['qux'])
self.assertEqual(res2.get_value('foobar', 'qux'), 1.5)
def test_fancy_index_misc(self):
# axis = 0
sliced = self.frame.ix[-2:, :]
expected = self.frame.reindex(index=self.frame.index[-2:])
tm.assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = self.frame.ix[:, -2:]
expected = self.frame.reindex(columns=self.frame.columns[-2:])
tm.assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self):
# slicing
sl = self.frame[:20]
tm.assert_sp_frame_equal(sl, self.frame.reindex(self.frame.index[:20]))
# boolean indexing
d = self.frame.index[5]
indexer = self.frame.index > d
subindex = self.frame.index[indexer]
subframe = self.frame[indexer]
self.assert_numpy_array_equal(subindex, subframe.index)
self.assertRaises(Exception, self.frame.__getitem__, indexer[:-1])
def test_setitem(self):
def _check_frame(frame, orig):
N = len(frame)
# insert SparseSeries
frame['E'] = frame['A']
tm.assertIsInstance(frame['E'], SparseSeries)
tm.assert_sp_series_equal(frame['E'], frame['A'],
check_names=False)
# insert SparseSeries differently-indexed
to_insert = frame['A'][::2]
frame['E'] = to_insert
expected = to_insert.to_dense().reindex(frame.index)
result = frame['E'].to_dense()
tm.assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'E')
# insert Series
frame['F'] = frame['A'].to_dense()
tm.assertIsInstance(frame['F'], SparseSeries)
tm.assert_sp_series_equal(frame['F'], frame['A'],
check_names=False)
# insert Series differently-indexed
to_insert = frame['A'].to_dense()[::2]
frame['G'] = to_insert
expected = to_insert.reindex(frame.index)
expected.name = 'G'
tm.assert_series_equal(frame['G'].to_dense(), expected)
# insert ndarray
frame['H'] = np.random.randn(N)
tm.assertIsInstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
frame['I'] = to_sparsify
self.assertEqual(len(frame['I'].sp_values), N // 2)
# insert ndarray wrong size
self.assertRaises(Exception, frame.__setitem__, 'foo',
np.random.randn(N - 1))
# scalar value
frame['J'] = 5
self.assertEqual(len(frame['J'].sp_values), N)
self.assertTrue((frame['J'].sp_values == 5).all())
frame['K'] = frame.default_fill_value
self.assertEqual(len(frame['K'].sp_values), 0)
self._check_all(_check_frame)
def test_setitem_corner(self):
self.frame['a'] = self.frame['B']
tm.assert_sp_series_equal(self.frame['a'], self.frame['B'],
check_names=False)
def test_setitem_array(self):
arr = self.frame['B']
self.frame['E'] = arr
tm.assert_sp_series_equal(self.frame['E'], self.frame['B'],
check_names=False)
self.frame['F'] = arr[:-1]
index = self.frame.index[:-1]
tm.assert_sp_series_equal(self.frame['E'].reindex(index),
self.frame['F'].reindex(index),
check_names=False)
def test_delitem(self):
A = self.frame['A']
C = self.frame['C']
del self.frame['B']
self.assertNotIn('B', self.frame)
tm.assert_sp_series_equal(self.frame['A'], A)
tm.assert_sp_series_equal(self.frame['C'], C)
del self.frame['D']
self.assertNotIn('D', self.frame)
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_set_columns(self):
self.frame.columns = self.frame.columns
self.assertRaises(Exception, setattr, self.frame, 'columns',
self.frame.columns[:-1])
def test_set_index(self):
self.frame.index = self.frame.index
self.assertRaises(Exception, setattr, self.frame, 'index',
self.frame.index[:-1])
def test_append(self):
a = self.frame[:5]
b = self.frame[5:]
appended = | |
# Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint:disable=useless-super-delegation
import logging
from rest_framework import status
from rest_framework.generics import (get_object_or_404, GenericAPIView,
ListAPIView, ListCreateAPIView, RetrieveUpdateDestroyAPIView)
from rest_framework.response import Response
from ..decorators import _valid_manager
from ..docs import OpenAPIResponse, no_body, swagger_auto_schema
from ..filters import DateRangeFilter
from ..mixins import (ChurnedQuerysetMixin, PlanSubscribersQuerysetMixin,
ProviderMixin, SubscriptionMixin, SubscriptionSmartListMixin,
SubscribedQuerysetMixin)
from .. import signals
from ..models import Subscription
from ..utils import generate_random_slug, datetime_or_now
from .roles import OptinBase
from .serializers import (ForceSerializer,
ProvidedSubscriptionSerializer, ProvidedSubscriptionCreateSerializer,
SubscribedSubscriptionSerializer)
#pylint: disable=no-init
LOGGER = logging.getLogger(__name__)
class SubscribedSubscriptionListBaseAPIView(SubscriptionMixin, ListAPIView):
pass
class SubscribedSubscriptionListAPIView(SubscriptionSmartListMixin,
SubscribedSubscriptionListBaseAPIView):
"""
Lists a subscriber subscriptions
Returns a list of {{PAGE_SIZE}} subscriptions, past and present, for
subscriber {organization}.
The queryset can be further refined to match a search filter (``q``)
and sorted on specific fields (``o``).
The API is typically used within an HTML
`subscriptions page </docs/themes/#dashboard_profile_subscriptions>`_
as present in the default theme.
**Tags**: subscriptions, subscriber, subscriptionmodel
**Examples**
.. code-block:: http
GET /api/profile/xia/subscriptions/?o=created_at&ot=desc HTTP/1.1
responds
.. code-block:: json
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"created_at": "2016-01-14T23:16:55Z",
"ends_at": "2017-01-14T23:16:55Z",
"description": null,
"organization": {
"slug": "xia",
"printable_name": "<NAME>"
},
"plan": {
"slug": "open-space",
"title": "Open Space",
"description": "open space desk, High speed internet
- Ethernet or WiFi, Unlimited printing,
Unlimited scanning, Unlimited fax service
(send and receive)",
"is_active": true,
"setup_amount": 0,
"period_amount": 17999,
"period_type": "monthly",
"app_url": "http://localhost:8020/app"
},
"auto_renew": true
}
]
}
"""
serializer_class = SubscribedSubscriptionSerializer
# No POST. We are talking about a subscriber Organization here.
class SubscriptionDetailAPIView(SubscriptionMixin,
RetrieveUpdateDestroyAPIView):
"""
Retrieves a subscription
Returns the subscription of {organization} to {subscribed_plan}.
**Tags**: subscriptions, subscriber, subscriptionmodel
**Examples**
.. code-block:: http
GET /api/profile/xia/subscriptions/open-space/ HTTP/1.1
responds
.. code-block:: json
{
"created_at": "2019-01-01T00:00:00Z",
"ends_at": "2020-01-01T00:00:00Z",
"description": null,
"organization": {
"slug": "xia",
"created_at": "2019-01-01T00:00:00Z",
"full_name": "<NAME>",
"email": "<EMAIL>",
"phone": "555-555-5555",
"street_address": "350 Bay St.",
"locality": "San Francisco",
"region": "CA",
"postal_code": "94133",
"country": "US",
"default_timezone": "UTC",
"printable_name": "<NAME>",
"is_provider": false,
"is_bulk_buyer": false,
"type": "personal",
"credentials": true,
"extra": null
},
"plan": {
"slug": "open-space",
"title": "Open Space",
"description": "open space desk",
"is_active": true,
"setup_amount": 0,
"period_amount": 17999,
"period_length": 1,
"interval": "monthly",
"unit": "cad",
"organization": "cowork",
"renewal_type": "auto-renew",
"is_not_priced": false,
"created_at": "2019-01-01T00:00:00Z",
"skip_optin_on_grant": false,
"optin_on_request": false,
"extra": null
},
"auto_renew": true,
"editable": true,
"extra": null,
"grant_key": null,
"request_key": null
}
"""
serializer_class = SubscribedSubscriptionSerializer
def put(self, request, *args, **kwargs):
"""
Unsubscribes at a future date
Unsubscribes {organization} from {subscribed_plan} at a future date.
The API is typically used within an HTML
`subscribers page </docs/themes/#dashboard_profile_subscribers>`_
as present in the default theme.
**Tags**: subscriptions, subscriber, subscriptionmodel
**Examples**
.. code-block:: http
PUT /api/profile/xia/subscriptions/open-space/ HTTP/1.1
.. code-block:: json
{
"ends_at": "2020-01-01T00:00:00Z"
}
responds
.. code-block:: json
{
"created_at": "2019-01-01T00:00:00Z",
"ends_at": "2020-01-01T00:00:00Z",
"description": null,
"organization": {
"slug": "xia",
"created_at": "2019-01-01T00:00:00Z",
"full_name": "<NAME>",
"email": "<EMAIL>",
"phone": "555-555-5555",
"street_address": "350 Bay St.",
"locality": "San Francisco",
"region": "CA",
"postal_code": "94133",
"country": "US",
"default_timezone": "UTC",
"printable_name": "<NAME>",
"is_provider": false,
"is_bulk_buyer": false,
"type": "personal",
"credentials": true,
"extra": null
},
"plan": {
"slug": "open-space",
"title": "Open Space",
"description": "open space desk",
"is_active": true,
"setup_amount": 0,
"period_amount": 17999,
"period_length": 1,
"interval": "monthly",
"unit": "cad",
"organization": "cowork",
"renewal_type": "auto-renew",
"is_not_priced": false,
"created_at": "2019-01-01T00:00:00Z",
"skip_optin_on_grant": false,
"optin_on_request": false,
"extra": null
},
"auto_renew": true,
"editable": true,
"extra": null,
"grant_key": null,
"request_key": null
}
"""
return super(SubscriptionDetailAPIView, self).put(
request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
"""
Unsubscribes now
Unsubscribes {organization} from {subscribed_plan}.
The API is typically used within an HTML
`subscribers page </docs/themes/#dashboard_profile_subscribers>`_
as present in the default theme.
**Tags**: subscriptions, subscriber, subscriptionmodel
**Examples**
.. code-block:: http
DELETE /api/profile/xia/subscriptions/open-space/ HTTP/1.1
"""
return super(SubscriptionDetailAPIView, self).delete(
request, *args, **kwargs)
def perform_update(self, serializer):
if not _valid_manager(
self.request, [serializer.instance.plan.organization]):
serializer.validated_data['created_at'] \
= serializer.instance.created_at
serializer.validated_data['ends_at'] = serializer.instance.ends_at
super(SubscriptionDetailAPIView, self).perform_update(serializer)
def destroy(self, request, *args, **kwargs):
#pylint:disable=unused-argument
at_time = datetime_or_now()
queryset = self.get_queryset().filter(ends_at__gt=at_time)
queryset.unsubscribe(at_time=at_time)
return Response(status=status.HTTP_204_NO_CONTENT)
class ProvidedSubscriptionsAPIView(SubscriptionSmartListMixin,
PlanSubscribersQuerysetMixin,
OptinBase, ListCreateAPIView):
"""
Lists subscriptions to a plan
Returns a list of {{PAGE_SIZE}} subscriptions to {plan} provided by
{organization}.
The queryset can be further refined to match a search filter (``q``)
and/or a range of dates ([``start_at``, ``ends_at``]),
and sorted on specific fields (``o``).
**Tags**: subscriptions, provider, subscriptionmodel
**Examples**
.. code-block:: http
GET /api/profile/cowork/plans/premium/subscriptions/ HTTP/1.1
responds
.. code-block:: json
{
"count": 1,
"next": null,
"previous": null,
"results": [{
"slug": "xia",
"full_name": "<NAME>",
"created_at": "2016-01-14T23:16:55Z",
"ends_at": "2017-01-14T23:16:55Z"
}]
}
"""
serializer_class = ProvidedSubscriptionSerializer
filter_backends = SubscriptionSmartListMixin.filter_backends + (
DateRangeFilter,)
def get_serializer_class(self):
if self.request.method.lower() == 'post':
return ProvidedSubscriptionCreateSerializer
return super(ProvidedSubscriptionsAPIView, self).get_serializer_class()
def add_relations(self, organizations, user, ends_at=None):
ends_at = datetime_or_now(ends_at)
subscriptions = []
created = False
self.decorate_personal(organizations)
for organization in organizations:
# Be careful that `self.plan` must exist otherwise the API will
# return a 404.
# We do not use `Subscription.objects.active_for` here because
# if the subscription was already created and the grant yet to be
# accepted, we want to avoid creating a duplicate.
subscription = Subscription.objects.filter(
organization=organization, plan=self.plan,
ends_at__gte=ends_at).order_by('ends_at').first()
if subscription is None:
subscription = Subscription.objects.new_instance(
organization, plan=self.plan)
if not self.plan.skip_optin_on_grant:
subscription.grant_key = generate_random_slug()
subscription.save()
created = True
else:
# We set subscription.organization to the object that was
# loaded and initialized with `is_personal` otherwise we
# will use a shadow copy loaded through `subscription`
# when we sent the serialized data back.
subscription.organization = organization
subscriptions += [subscription]
return subscriptions, created
@swagger_auto_schema(responses={
201: OpenAPIResponse("created", ProvidedSubscriptionSerializer)},
query_serializer=ForceSerializer)
def post(self, request, *args, **kwargs):
"""
Grants a subscription
Subscribes a customer to the {plan} provided by {organization}.
**Tags**: subscriptions, provider, subscriptionmodel
**Examples**
.. code-block:: http
POST /api/profile/cowork/plans/premium/subscriptions/ HTTP/1.1
.. code-block:: json
{
"organization": {
"slug": "xia"
}
}
responds
.. code-block:: json
{
"created_at": "2016-01-14T23:16:55Z",
"ends_at": "2017-01-14T23:16:55Z",
"description": null,
"organization": {
"slug": "xia",
"printable_name": "<NAME>"
},
"plan": {
"slug": "open-space",
"title": "Open Space",
"description": "open space desk, High speed internet
- Ethernet or WiFi, Unlimited printing,
Unlimited scanning, Unlimited fax service
(send and receive)",
"is_active": true,
"setup_amount": 0,
"period_amount": 17999,
"interval": "monthly",
"app_url": "http://localhost:8020/app"
},
"auto_renew": true
}
"""
return super(ProvidedSubscriptionsAPIView, self).post(
request, *args, **kwargs)
def send_signals(self, relations, user, reason=None, invite=False):
for subscription in relations:
signals.subscription_grant_created.send(sender=__name__,
subscription=subscription, reason=reason, invite=invite,
request=self.request)
def create(self, request, *args, **kwargs): #pylint:disable=unused-argument
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
return self.perform_optin(serializer, request)
class PlanSubscriptionDetailAPIView(ProviderMixin, SubscriptionDetailAPIView):
"""
Retrieves a subscription to a provider plan
Returns the subscription of {subscriber} to {plan} from provider
{organization}.
**Tags**: subscriptions, provider, subscriptionmodel
**Examples**
.. code-block:: http
GET /api/profile/cowork/plans/open-space/subscriptions/xia/ HTTP/1.1
responds
.. code-block:: json
{
"created_at": "2019-01-01T00:00:00Z",
"ends_at": "2020-01-01T00:00:00Z",
"description": null,
"organization": {
"slug": "xia",
"created_at": "2019-01-01T00:00:00Z",
"full_name": "<NAME>",
"email": "<EMAIL>",
"phone": "555-555-5555",
"street_address": "350 Bay St.",
"locality": "San Francisco",
"region": "CA",
"postal_code": "94133",
"country": "US",
"default_timezone": "UTC",
"printable_name": "<NAME>",
"is_provider": false,
"is_bulk_buyer": false,
"type": "personal",
"credentials": true,
"extra": null
},
"plan": {
"slug": "open-space",
"title": "Open Space",
"description": "open space desk",
"is_active": true,
"setup_amount": 0,
"period_amount": 17999,
"period_length": 1,
"interval": "monthly",
"unit": "cad",
"organization": "cowork",
"renewal_type": "auto-renew",
"is_not_priced": false,
"created_at": "2019-01-01T00:00:00Z",
"skip_optin_on_grant": false,
"optin_on_request": false,
"extra": null
},
"auto_renew": true,
"editable": true,
"extra": null,
"grant_key": null,
"request_key": null
}
"""
subscriber_url_kwarg = 'subscriber'
serializer_class | |
import os, re, sys, shutil, glob, time, csv, xlsxwriter
import argparse as ap
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
import msvcrt as m
version = "VNX Onions for MS Windows 0.1"
# col_filter = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,55,56,57,58]
col_filter = ['Poll Time','Object Name','Owner Array Name','Current Owner','Utilization (%)',
'Utilization-Optimal (%)','Utilization-Nonoptimal (%)','Queue Length','Queue Length-Optimal',
'Queue Length-Nonoptimal','Response Time (ms)','Response Time-Optimal (ms)','Response Time-Nonoptimal (ms)',
'Total Bandwidth (MB/s)','Total Bandwidth-Optimal (MB/s)','Total Bandwidth-Nonoptimal (MB/s)','Total Throughput (IO/s)',
'Read Bandwidth (MB/s)','Read Bandwidth-Optimal (MB/s)','Read Bandwidth-Nonoptimal (MB/s)','Read Size (KB)','Read Size-Optimal (KB)','Read Size-Nonoptimal (KB)','Read Throughput (IO/s)','Read Throughput-Optimal (IO/s)','Read Throughput-Nonoptimal (IO/s)','Write Bandwidth (MB/s)','Write Bandwidth-Optimal (MB/s)','Write Bandwidth-Nonoptimal (MB/s)','Write Size (KB)','Write Size-Optimal (KB)','Write Size-Nonoptimal (KB)','Write Throughput (IO/s)','Write Throughput-Optimal (IO/s)','Write Throughput-Nonoptimal (IO/s)','Service Time (ms)','Service Time-Optimal (ms)','Service Time-Nonoptimal (ms)']
#################################### Arguments and settings
parser = ap.ArgumentParser(description=version + ' Download & analyse VNX .nar files')
parser.add_argument('-nar',dest='nar_folder', required = False, help='NAR files location')
parser.add_argument('-u',dest='user', required = False, help='Unisphere user')
parser.add_argument('-p',dest='password', required = False, help='Unisphere password')
parser.add_argument('-s',dest='scope', required = False, help='Unisphere scope')
parser.add_argument('-ip',dest='IP', required = False, help='SPA or SPB IP address')
parser.add_argument('-f',dest='from_date', required = False, help='Start date YYYY-MM-DD')
parser.add_argument('-t',dest='to_date', required = False, help='End date YYYY-MM-DD')
parser.add_argument('-cd',dest='description', required = False, help='Description (String)')
parser.add_argument('--no-graphs', dest='nographs',required = False, action="store_const", const = True, help='No graphs')
parser.add_argument('-of',dest='outfile_location', required = False, help='(outfile location)')
parser.add_argument('-dpi',dest='dpi', required = False, help='resolution in DPI')
args = parser.parse_args()
if args.nar_folder is None and args.outfile_location is None and not all([args.user,args.password,args.IP,args.from_date,args.to_date,args.scope]):
print "\nError: Please specify NAR folder OR VNX/CX array IP and credentials\n"
parser.print_help()
sys.exit()
user = args.user
password = <PASSWORD>
IP = args.IP
from_date = args.from_date
to_date = args.to_date
scope = args.scope
description = args.description
nar_folder = args.nar_folder
try:
args.dpi = float(args.dpi)
except TypeError:
pass
working_folder = None
if nar_folder:
working_folder = nar_folder
elif args.outfile_location:
working_folder = os.path.split(args.outfile_location)[0]
else:
pass
serial = None
start_time = time.strftime("%H:%M:%S")
#################################### NAR GETTER
def get_nar_file_list(IP,user,password,scope):
print "Querying array for performance archives ..."
os.system('naviseccli.exe'+' -h '+ IP +' -scope '+ scope +' -user '+ user +' -password ' + password + ' analyzer -archive -list > nar.list')
def parse_nars(from_date,to_date):
print "Parsing the archive list ..."
nar_list_raw = open('nar.list','r')
nar_list = []
nar_list_selected = []
for row in nar_list_raw:
parts = row.split()
nar_list.append(parts[4])
del nar_list[0]
for nar_filename in nar_list:
date = int(((nar_filename.split('_'))[2]).translate(None,'-'))
if date >= int(from_date.translate(None,'-')) and date <= int(to_date.translate(None,'-')):
nar_list_selected.append(nar_filename)
return nar_list_selected
def get_nars(nar_file_list):
print "Downloading the archive files ..."
serial = (nar_file_list[1].split('_'))[0]
os.system('mkdir ' + serial) # create a directory with an array SN
for nar_file in nar_file_list: # download the files
print ('downloading ' + nar_file)
os.system('naviseccli.exe'+' -h '+ IP +' -scope ' + scope +' -user '+ user +' -password ' + password + ' analyzer -archive -file ' + nar_file + ' -o')
for filename in glob.glob(serial + '*.nar'):
try:
shutil.move(filename, '.\\'+ serial + '\\')
except shutil.Error:
print 'File %s already exists, skipping ...' % (nar_file)
try:
os.remove('nar.list')
except WindowsError:
pass
return serial
def decrypt_nars():
print "Decrypting archive files ..."
for filename in glob.iglob('*.nar'):
print "Now decrypting %s" % (filename)
os.system('naviseccli.exe analyzer -archivedump -data ' + filename + ' -out ' + filename + '.csv')
if nar_folder is None and args.outfile_location is None:
script_mode = "array"
print "Direct array download"
get_nar_file_list(IP,user,password,scope)
serial = get_nars(parse_nars(from_date,to_date))
os.chdir('.\\'+ serial + '\\')
decrypt_nars()
elif nar_folder is not None and args.outfile_location is None:
script_mode = "nar"
print "NAR folder specified, processing pre-downloaded NAR files..."
os.chdir(nar_folder)
decrypt_nars()
pass
else:
script_mode = "outfile"
print "Outfile selected, processing pre-compiled outfile..."
pass
#################################### MAIN start
try:
os.remove('*outfile')
except WindowsError:
pass
try:
serial = str(list(glob.iglob('*.nar'))[0].split('_')[0])
except:
serial = "serial_not_available"
if args.outfile_location is None:
outfile = open(serial + '_' + str(from_date) + '-' + str(to_date) + '.ofl','a')
print "Compiling main performance file: %s" % (outfile)
filecount = 0
for filename in glob.iglob('*.csv'):
filecount += 1
print "Now processing %s" % (filename)
if filecount == 1:
sn = filename.split('_')[0]
date_from = filename.split('_')[2]
infile = open(filename,'r')
for line in infile:
outfile.write(line)
else:
infile = open(filename,'r')
infile.next()
for line in infile:
outfile.write(line)
# os.rename('outfile.csv',sn + '_' + date_from + '.csv')
############################################## data analytics
sanitize_dict = {' ': '_','(%)': 'pct','(': '',')': '','%': 'pct','/':'_per_'}
def nar2csv():
print glob.iglob('*.nar')
for filename in glob.iglob('*.nar'):
print filename
os.system('naviseccli.exe analyzer -archivedump -data ' + filename + ' -out ' + filename + '.csv')
def sanitize_cols(df): #remove whitespaces and while at it, remove stupid characters
print "Sanitizing the metrics names..."
for bug,solution in sanitize_dict.items():
df.columns = df.columns.str.replace(bug,solution)
df.columns = df.columns.str.lower()
print "Sanitizing done"
return df
def get_obj(df): #scans the object name column to retrieve the list of objects
duped_lst = df.object_name.tolist()
deduped_lst = [] # all values that show in 'object name'
for obj in duped_lst: # deduplication
if obj not in deduped_lst:
deduped_lst.append(obj)
return deduped_lst
def rag_format(workbook,worksheet,cell_range,amber_threshold,red_threshold):
# Add a format. Light red fill with dark red text.
red = workbook.add_format({'bg_color': '#FFC7CE',
'font_color': '#9C0006'})
# Add a format. Yellow fill with dark yellow text.
amber = workbook.add_format({'bg_color': '#f4e541',
'font_color': '#f4af41'})
# Add a format. Green fill with dark green text.
green = workbook.add_format({'bg_color': '#C6EFCE',
'font_color': '#006100'})
worksheet.conditional_format(cell_range, {'type': 'cell',
'criteria': '>',
'value': red_threshold,
'format': red})
worksheet.conditional_format(cell_range, {'type': 'cell',
'criteria': 'between',
'minimum': amber_threshold,
'maximum': red_threshold,
'format': amber})
worksheet.conditional_format(cell_range, {'type': 'cell',
'criteria': 'between',
'minimum': 0,
'maximum': amber_threshold,
'format': green})
def obj_type(obj):
obj_type = 'Unknown'
if len(obj.split('[')) > 1:
obj_type = 'LUN'
if obj.startswith('Port'):
obj_type = 'Port'
elif obj.startswith('SP'):
obj_type = 'SP'
elif len(obj.split('Enclosure')) > 1:
obj_type = 'Disk'
return obj_type
if outfile is None:
outfile = args.outfile_location
if working_folder is None:
working_folder = ".\\" + serial + "\\"
# print "current folder is %s" % os.getcwd()
# print "working folder is %s" % working_folder
# os.chdir(working_folder) #change to the working folder for file generation
print "Loading the main DataFrame from %s, hang on - this takes time..." % (outfile)
timestamp = [1]
try:
df = pd.read_csv(outfile.name,parse_dates=timestamp,usecols=col_filter)
except AttributeError:
df = pd.read_csv(os.path.split(args.outfile_location)[1],parse_dates=timestamp,usecols=col_filter)
print 'Main DataFrame loaded'
# print list(df)
sanitize_cols(df)
obj_list = get_obj(df)
id_list = ['poll_time','object_name']
metrics_list_SP = ['utilization_pct',
'response_time_ms',
'service_time_ms',
'queue_length',
'read_bandwidth_mb_per_s',
'write_bandwidth_mb_per_s',
'read_size_kb',
'write_size_kb',
'read_throughput_io_per_s',
'write_throughput_io_per_s']
metrics_set_list_SP = [['utilization_pct'],
['response_time_ms',
'service_time_ms'],
['queue_length'],
['write_size_kb',
'read_size_kb'],
['read_bandwidth_mb_per_s',
'write_bandwidth_mb_per_s'],
['read_throughput_io_per_s',
'write_throughput_io_per_s']]
sp_stats_select = ["utilization_pct_max","utilization_pct_mean","utilization_pct_95th",
# "response_time_ms_max","response_time_ms_mean","response_time_ms_95th",
# "service_time_ms_max","service_time_ms_mean","service_time_ms_95th",
# "queue_length_max","queue_length_mean","queue_length_95th",
"read_bandwidth_mb_per_s_max","read_bandwidth_mb_per_s_mean","read_bandwidth_mb_per_s_95th",
"write_bandwidth_mb_per_s_max","write_bandwidth_mb_per_s_mean","write_bandwidth_mb_per_s_95th",
# "read_size_kb_max","read_size_kb_mean","read_size_kb_95th","write_size_kb_max","write_size_kb_mean","write_size_kb_95th",
"read_throughput_io_per_s_max","read_throughput_io_per_s_mean","read_throughput_io_per_s_95th",
"write_throughput_io_per_s_max","write_throughput_io_per_s_mean","write_throughput_io_per_s_95th"]
lun_stats_select = [
# "utilization_pct_max","utilization_pct_mean","utilization_pct_95th",
"response_time_ms_max","response_time_ms_mean","response_time_ms_95th",
# "service_time_ms_max","service_time_ms_mean","service_time_ms_95th",
# "queue_length_max","queue_length_mean","queue_length_95th",
"read_bandwidth_mb_per_s_max","read_bandwidth_mb_per_s_mean","read_bandwidth_mb_per_s_95th",
"write_bandwidth_mb_per_s_max","write_bandwidth_mb_per_s_mean","write_bandwidth_mb_per_s_95th",
# "read_size_kb_max","read_size_kb_mean","read_size_kb_95th","write_size_kb_max","write_size_kb_mean","write_size_kb_95th",
"read_throughput_io_per_s_max","read_throughput_io_per_s_mean","read_throughput_io_per_s_95th",
"write_throughput_io_per_s_max","write_throughput_io_per_s_mean","write_throughput_io_per_s_95th"]
disk_stats_select = [
"utilization_pct_max","utilization_pct_mean","utilization_pct_95th",
"response_time_ms_max","response_time_ms_mean","response_time_ms_95th",
# "service_time_ms_max","service_time_ms_mean","service_time_ms_95th",
# "queue_length_max","queue_length_mean","queue_length_95th",
# "read_bandwidth_mb_per_s_max","read_bandwidth_mb_per_s_mean","read_bandwidth_mb_per_s_95th",
# "write_bandwidth_mb_per_s_max","write_bandwidth_mb_per_s_mean","write_bandwidth_mb_per_s_95th",
# "read_size_kb_max","read_size_kb_mean","read_size_kb_95th","write_size_kb_max","write_size_kb_mean","write_size_kb_95th",
# "read_throughput_io_per_s_max","read_throughput_io_per_s_mean","read_throughput_io_per_s_95th",
# "write_throughput_io_per_s_max","write_throughput_io_per_s_mean","write_throughput_io_per_s_95th"
]
# list of metrics measured for SP
column_list = id_list + metrics_list_SP
# print df[df['object_name'] == 'SP A'][metrics_list_SP]
util_dict = {}
for obj in obj_list:
metric_df = df[df['object_name'] == obj]['utilization_pct']
try:
util_dict[obj] = metric_df.quantile(0.95)
except TypeError:
pass
# print 'Object %s utilization: %s' % (obj.split('[')[0],metric_df.quantile(0.95))
util_dict = dict(sorted(util_dict.iteritems(), key=lambda (k,v): (v,k)))
# for obj in util_dict:
# print '%s %s' % (obj.split('[')[0],util_dict[obj])
sorted_obj_list = util_dict.keys()
#guess array name (account and device) from LUN names
guessing_list = []
for item in sorted_obj_list:
try:
guessing_list.append(item.split('-')[0] + '-' + item.split('-')[1])
except IndexError:
pass
def most_common(lst):
return max(set(lst), key=lst.count)
array_name = most_common(guessing_list)
# for key in reversed(sorted_obj_list):
# print '%s is %s' % (key, obj_type(key))
extended_metrics_list = []
extended_metrics_list.append('Obj')
extended_metrics_list.append('Type')
for i in range(len(metrics_list_SP)):
extended_metrics_list.append(metrics_list_SP[i] + '_max')
extended_metrics_list.append(metrics_list_SP[i] + '_mean')
extended_metrics_list.append(metrics_list_SP[i] + '_95th')
#OUTPUT OF CUMULATIVE DATA TO CSV
with open(serial + 'lun_stats_tab.csv','wb') as lun_stats_tab:
csv_writer = csv.writer(lun_stats_tab)
csv_writer.writerow(extended_metrics_list)
for obj in reversed(sorted_obj_list):
metric_df = df[df['object_name'] == obj][metrics_list_SP]
stats_line = [obj.split('[')[0]] + [obj_type(obj)]
for metric in metric_df:
metric_max = metric_df[metric].max()
metric_mean = metric_df[metric].mean()
metric_95th = metric_df[metric].quantile(0.95)
stats_line_part = [metric_max,metric_mean,metric_95th]
stats_line = stats_line + stats_line_part
csv_writer.writerow(stats_line)
print ("Summary CSV ready ...")
print ("Loading results into DataFrame ...")
stats_df = pd.read_csv(serial + 'lun_stats_tab.csv')
sp_stats = stats_df[stats_df['Type'] == 'SP'].sort_values('Obj')
lun_stats = stats_df[stats_df['Type'] == 'LUN'].sort_values('utilization_pct_95th',ascending=False,na_position='last')
disk_stats = stats_df[stats_df['Type'] == 'Disk'].sort_values('Obj')
# lun_stats_growth = df[
# df['object_name',
# df['read_bandwidth_mb_per_s_mean'].resample('W','first')*604800-df['read_bandwidth_mb_per_s_mean'].resample('W','last')*604800,
# df['write_bandwidth_mb_per_s_mean'].resample('W','first')*604800-df['write_bandwidth_mb_per_s_mean'].resample('W','last')*604800,
# df['read_throughput_mb_per_s_mean'].resample('W','first')*604800-df['read_throughput_mb_per_s_mean'].resample('W','last')*604800,
# df['write_throughput_mb_per_s_mean'].resample('W','first')*604800-df['write_throughput_mb_per_s_mean'].resample('W','last')*604800,
# ],
# #columns = ['LUN name','Read BW change','Write BW change','Read IOPS change','Write IOPS change']
# ]
# lun_stats_growth.columns(['LUN name','Read BW change','Write BW change','Read IOPS change','Write IOPS change'])
# import pdb; pdb.set_trace()
def lun_load_growth():
LUN_list = lun_stats.Obj.tolist()
lun_stats_growth = pd.DataFrame()
# lun_stats_growth.columns(['LUN name','Read BW change','Write BW change','Read IOPS change','Write IOPS change'])
for lun in LUN_list:
lun_frame = df[df["object_name"] == lun]["object_name","read_bandwidth_mb_per_s","write_bandwidth_mb_per_s","read_throughput_mb_per_s","write_throughput_mb_per_s"]
lun_growth_line = pd.DataFrame({'LUN name':[lun],
'R BW growth':[(df[df["object_name"] == lun]["read_bandwidth_mb_per_s"].resample('1W','first') - df[df["object_name"] == lun]["read_bandwidth_mb_per_s"].resample('1W','last'))*604800],
'W BW growth':[(df[df["object_name"] == lun]["write_bandwidth_mb_per_s"].resample('1W','first') - df[df["object_name"] == lun]["write_bandwidth_mb_per_s"].resample('1W','last'))*604800],
'R IOPS growth':[(df[df["object_name"] == lun]["read_throughput_io_per_s"].resample('1W','first') - df[df["object_name"] == lun]["read_throughput_io_per_s"].resample('1W','last'))*604800],
'W IOPS growth':[(df[df["object_name"] == lun]["write_throughput_io_per_s"].resample('1W','first') - df[df["object_name"] == lun]["write_throughput_io_per_s"].resample('1W','last'))*604800],
})
lun_stats_growth.append(lun_growth_line)
return lun_stats_growth
# lun_load_growth()
print ("Exporting to MS Excel template ...")
try:
template_writer = pd.ExcelWriter((str(df.poll_time.min())).split(' ')[0] + "-" + (str(df.poll_time.max())).split(' ')[0] +"_"+ array_name + '_stats.xlsx',engine='xlsxwriter')
except IOError:
template_writer = pd.ExcelWriter('_generic_file_stats.xlsx',engine='xlsxwriter')
workbook = template_writer.book
bold = workbook.add_format({'bold': True})
title = workbook.add_format({'bold': True, 'font_size': 48})
start_row = [4,10,24]
sp_stats[["Obj"]+sp_stats_select].to_excel(template_writer,'Dashboard',startrow=start_row[0] , startcol=0)
lun_stats[["Obj"]+lun_stats_select].head(10).to_excel(template_writer,'Dashboard',startrow=start_row[1] , startcol=0)
disk_stats[["Obj"]+disk_stats_select][disk_stats["utilization_pct_95th"]>70].to_excel(template_writer,'Dashboard',startrow=start_row[2] , | |
from datetime import datetime
from typing import List, Dict, Union
import pytest
from dataclasses import field
from krake.data.config import HooksConfiguration
from krake.data.core import Metadata, ListMetadata
from krake.data.kubernetes import ClusterList
from marshmallow import ValidationError
from krake.data.serializable import (
Serializable,
ApiObject,
PolymorphicContainer,
is_generic,
is_base_generic,
is_qualified_generic,
is_generic_subtype,
)
from krake import utils
from tests.factories.core import MetadataFactory, GlobalMetricFactory
from tests.factories.kubernetes import ApplicationFactory, ClusterFactory
from tests.factories.openstack import ProjectFactory
class Person(Serializable):
given_name: str
surname: str
@property
def fullname(self):
return f"{self.given_name} {self.surname}"
class Book(Serializable):
id: int = field(metadata={"immutable": True})
created: datetime = field(default_factory=utils.now, metadata={"readonly": True})
name: str
author: Person
characters: List[Person] = field(default_factory=list)
def test_serializable():
class Application(Serializable):
id: int
name: str
optional: str = "optional"
kind: str = "app"
__metadata__ = {"discriminator": "kind"}
assert Application.Schema is not None
app = Application(id=42, name="<NAME>")
assert app.id == 42
assert app.name == "<NAME>"
assert app.kind == "app"
assert app.optional == "optional"
data = app.serialize()
assert data["id"] == 42
assert data["name"] == "<NAME>"
assert data["kind"] == "app"
assert data["optional"] == "optional"
# Missing keyword arguments
with pytest.raises(TypeError):
Application(id=42)
# Additional keyword arguments
with pytest.raises(TypeError):
Application(id=42, name="My fancy model", value=72)
instance = Application.deserialize(data)
assert isinstance(instance, Application)
assert instance.id == app.id
assert instance.name == app.name
assert instance.kind == app.kind
assert instance.optional == app.optional
def test_nested_attrs():
book = Book(
id=42,
name="The Hitchhiker's Guide to the Galaxy",
author=Person(given_name="Douglas", surname="Adams"),
)
data = book.serialize()
assert data["id"] == 42
assert data["name"] == "The Hitchhiker's Guide to the Galaxy"
assert isinstance(data["author"], dict)
assert data["author"]["given_name"] == "Douglas"
assert data["author"]["surname"] == "Adams"
def test_list_attr():
book = Book(
id=42,
name="The Hitchhiker's Guide to the Galaxy",
author=None,
characters=[
Person(given_name="Arthur", surname="Dent"),
Person(given_name="Ford", surname="Perfect"),
],
)
data = book.serialize()
assert data["id"] == 42
assert data["name"] == "The Hitchhiker's Guide to the Galaxy"
assert data["author"] is None
assert isinstance(data["characters"], list)
assert len(data["characters"]) == 2
assert data["characters"][0]["given_name"] == "Arthur"
assert data["characters"][0]["surname"] == "Dent"
assert data["characters"][1]["given_name"] == "Ford"
assert data["characters"][1]["surname"] == "Perfect"
def test_update():
book = Book(
id=42,
created=datetime(1979, 10, 12).astimezone(),
name="The Hitchhiker's Guide to the Galaxy",
author=Person(given_name="Douglas", surname="Adams"),
)
update = Book(
id=9780465025275,
name="Six Easy Pieces",
created=datetime(2011, 3, 11).astimezone(),
author=Person(given_name="Richard", surname="Feynman"),
)
book.update(update)
assert book.id == 42
assert book.created == book.created
assert book.name == "Six Easy Pieces"
assert book.author is not update.author
assert book.author.given_name == "Richard"
assert book.author.surname == "Feynman"
def test_update_replacing_value_with_none():
book = Book(
id=42,
created=datetime(1979, 10, 12).astimezone(),
name="The Hitchhiker's Guide to the Galaxy",
author=Person(given_name="Douglas", surname="Adams"),
)
update = Book(
id=9780465025275,
name="Six Easy Pieces",
created=datetime(2011, 3, 11).astimezone(),
author=None,
)
book.update(update)
assert book.author is None
def test_update_replacing_none_with_value():
book = Book(
id=9780465025275,
name="Six Easy Pieces",
created=datetime(2011, 3, 11).astimezone(),
author=None,
)
update = Book(
id=42,
created=datetime(1979, 10, 12).astimezone(),
name="The Hitchhiker's Guide to the Galaxy",
author=Person(given_name="Douglas", surname="Adams"),
)
book.update(update)
assert book.author is update.author
assert book.author.given_name == "Douglas"
assert book.author.surname == "Adams"
def test_api_object():
class Book(ApiObject):
api: str = "shelf"
kind: str = "Book"
book = Book()
assert book.api == "shelf"
assert book.kind == "Book"
book = Book.deserialize({})
assert book.api == "shelf"
assert book.kind == "Book"
with pytest.raises(ValidationError):
Book.deserialize({"api": "wrong-api"})
with pytest.raises(ValidationError):
Book.deserialize({"kind": "Letter"})
def test_creation_ignored():
class Status(Serializable):
state: str
class Metadata(Serializable):
created: str = field(metadata={"readonly": True})
name: str = field(metadata={"readonly": True})
changing: str
class Annotations(Serializable):
metadata: Metadata
class Application(Serializable):
id: int
kind: str = "app"
status: Status = field(metadata={"subresource": True})
metadata: Metadata
annotations: List[Annotations]
annotation_1 = Annotations(
metadata=Metadata(created=None, name="annot_1", changing="foo")
)
annotation_2 = Annotations(
metadata=Metadata(created="yes", name="annot_2", changing="bar")
)
app = Application(
id=42,
status=None,
metadata=Metadata(created=None, name="name", changing="foobar"),
annotations=[annotation_1, annotation_2],
)
serialized = app.serialize()
assert serialized["metadata"]["changing"] == "foobar"
assert serialized["metadata"]["created"] is None
assert serialized["annotations"][0]["metadata"]["created"] is None
assert serialized["annotations"][1]["metadata"]["created"] == "yes"
# The readonly and subresources are ignored
deserialized = Application.deserialize(serialized, creation_ignored=True)
assert deserialized.status is None
assert deserialized.metadata.created is None
assert deserialized.annotations[0].metadata.created is None
assert deserialized.annotations[1].metadata.created is None
# Do not ignore the readonly and subresources
with pytest.raises(ValidationError) as err:
Application.deserialize(serialized)
error_messages = err.value.messages
assert "status" in error_messages
assert "metadata" in error_messages
assert "created" in error_messages["metadata"]
assert "name" not in error_messages["metadata"]
assert "created" in error_messages["annotations"][0]["metadata"]
assert 1 not in error_messages["annotations"]
def test_api_object_repr():
"""Verify the representation of the instances of :class:`ApiObject`."""
app = ApplicationFactory(metadata__name="my-app", metadata__namespace="my-ns")
app_repr = (
f"<kubernetes.Application namespace='my-ns'"
f" name='my-app' uid={app.metadata.uid!r}>"
)
assert repr(app) == app_repr
project = ProjectFactory(metadata__name="my-project", metadata__namespace="other")
project_repr = (
f"<openstack.Project namespace='other'"
f" name='my-project' uid={project.metadata.uid!r}>"
)
assert repr(project) == project_repr
# Non-namespaced
metric = GlobalMetricFactory(metadata__name="my-metric")
metric_repr = f"<core.GlobalMetric name='my-metric' uid={metric.metadata.uid!r}>"
assert repr(metric) == metric_repr
# List
items = [ClusterFactory()] * 10
cluster_list = ClusterList(metadata=ListMetadata(), items=items)
cluster_list_repr = "<kubernetes.ClusterList length=10>"
assert repr(cluster_list) == cluster_list_repr
class DataSpec(PolymorphicContainer):
pass
@DataSpec.register("float")
class FloatSpec(Serializable):
min: float
max: float
@DataSpec.register("bool")
class BoolSpec(Serializable):
pass
def test_polymorphic_serialize():
assert DataSpec(type="float", float=FloatSpec(min=0, max=1.0)).serialize() == {
"type": "float",
"float": {"min": 0, "max": 1.0},
}
assert DataSpec(type="bool", bool=BoolSpec()).serialize() == {
"type": "bool",
"bool": {},
}
def test_polymorphic_deserialize():
spec = DataSpec.deserialize({"type": "float", "float": {"min": 0, "max": 1.0}})
assert isinstance(spec, DataSpec)
assert hasattr(spec, "float")
assert isinstance(spec.float, FloatSpec)
assert spec.float.min == 0
assert spec.float.max == 1.0
spec = DataSpec.deserialize({"type": "bool"})
assert isinstance(spec, DataSpec)
assert hasattr(spec, "bool")
assert isinstance(spec.bool, BoolSpec)
def test_polymorphic_multiple_subfields():
with pytest.raises(TypeError) as err:
DataSpec(type="float", float=None, bool=None)
assert "Got unexpected keyword argument 'bool'" == str(err.value)
def test_polymorphic_update():
spec = DataSpec(type="float", float=FloatSpec(min=0, max=1.0))
update = DataSpec(type="bool", bool=BoolSpec())
spec.update(update)
assert spec.type == "bool"
assert spec.bool == update.bool
assert spec.bool is update.bool
def test_polymorphic_equality():
"""Verify the equality check of the :class:`PolymorphicContainer`."""
# Inequality checks
spec1 = DataSpec(type="float", float=FloatSpec(min=0, max=1.0))
spec2 = DataSpec(type="float", float=FloatSpec(min=100, max=200))
assert spec1 != spec2
assert spec2 != spec1
spec3 = DataSpec(type="bool", bool=BoolSpec())
assert spec2 != spec3
assert spec3 != spec2
# Equality checks
spec4 = DataSpec(type="float", float=FloatSpec(min=0, max=1.0))
assert spec1 == spec4
spec5 = DataSpec(type="bool", bool=BoolSpec())
assert spec3 == spec5
def test_polymorphic_creation_error_handling():
"""Verify that creating a :class:`PolyMorphicContainer` without the `type` attribute
also raises an error.
"""
with pytest.raises(TypeError, match="Missing required keyword argument 'type'"):
DataSpec(float=FloatSpec(min=0, max=1.0))
def test_polymorphic_register_error_handling():
"""Verify that adding a :class:`PolyMorphicContainerSpec` with an already registered
type to a :class:`PolyMorphicContainer` leads to an exception."""
with pytest.raises(ValueError, match="'bool' already registered by "):
@DataSpec.register("bool")
class OtherSpec(Serializable):
pass
def test_polymorphic_validate_type_error_handling():
"""Verify that deserializing an instance of :class:`PolyMorphicContainerSpec` where
the "type" attribute is removed will lead to an exception.
"""
serialized = DataSpec(type="float", float=FloatSpec(min=0, max=1.0)).serialize()
serialized["type"] = "non-existing"
with pytest.raises(ValidationError, match="Unknown type 'non-existing'"):
DataSpec.deserialize(serialized)
def test_polymorphic_validate_subschema_error_handling():
"""Verify that deserializing an instance of :class:`PolyMorphicContainerSpec` where
the container attribute is removed will lead to an exception.
"""
serialized = DataSpec(type="float", float=FloatSpec(min=0, max=1.0)).serialize()
del serialized["float"]
with pytest.raises(ValidationError, match="Field is required"):
DataSpec.deserialize(serialized)
def test_is_generic():
assert is_generic(List)
assert is_generic(List[int])
assert is_generic(Union)
assert is_generic(Union[int, None])
assert is_generic(Dict)
assert is_generic(Dict[str, int])
assert not is_generic(str)
assert not is_generic(int)
assert not is_generic(object)
def test_is_base_generic():
assert is_base_generic(List)
assert is_base_generic(Dict)
assert is_base_generic(Union)
assert not is_base_generic(List[int])
assert not is_base_generic(Union[int, None])
assert not is_base_generic(Dict[int, str])
def test_is_qualified_generic():
assert is_qualified_generic(List[int])
assert is_qualified_generic(Union[int, None])
assert is_qualified_generic(Dict[int, str])
assert not is_qualified_generic(List)
assert not is_qualified_generic(Dict)
assert not is_qualified_generic(Union)
def test_is_generic_subtype():
assert is_generic_subtype(List[int], List)
assert is_generic_subtype(List[int], List[int])
assert is_generic_subtype(List, List)
assert not is_generic_subtype(List[int], Dict)
assert not is_generic_subtype(List[int], List[str])
assert not is_generic_subtype(List, List[int])
def test_schema_validation():
class Interval(Serializable):
max: int
min: int
def __post_init__(self):
if self.min > self.max:
raise ValidationError("'min' must not be greater than 'max'")
with pytest.raises(ValidationError) as excinfo:
Interval.deserialize({"min": 72, "max": 42})
assert "_schema" in excinfo.value.messages
@pytest.mark.parametrize(
"label_value",
[
{"key": "value"},
{"key1": "value"},
{"key": "value1"},
{"key-one": "value"},
{"key": "value-one"},
{"key-1": "value"},
{"key": "value-1"},
{"k": "value"},
{"key": "v"},
{"kk": "value"},
{"key": "vv"},
{"k.k": "value"},
{"key": "v-v"},
{"key_one.one": "value"},
{"key": "value.one_one"},
{"url.com/name": "value"},
{"url1.com/name": "value"},
{"url-suffix/name": "value"},
{"url.com/name-one": "value"},
{"url1.com/name-one": "value"},
{"url-suffix/name-one": "value"},
],
)
def test_label_validation(label_value):
# Test that valid label keys and values are accepted.
data = MetadataFactory(labels=label_value)
Metadata.deserialize(data.serialize())
@pytest.mark.parametrize(
"label_value",
[
{"key!": "value"},
{"key.": "value"},
{"-key": "value"},
{"-k": "value"},
{"-": "value"},
{"url/second/key": "value"},
{"url/": "value"},
{"/key": "value"},
{"k" * 70: "value"},
{"p" * 300 + "/" + "k" * 60: "value"},
],
)
def test_label_validation_reject_str_key(label_value):
# Test that invalid strings as label keys raise an exception.
data = MetadataFactory(labels=label_value)
with pytest.raises(ValidationError, match="Label key"):
Metadata.deserialize(data.serialize())
@pytest.mark.parametrize(
"label_value", [{True: "value"}, {None: "value"}, {10: "value"}, {0.1: "value"}]
)
def test_label_validation_reject_key(label_value):
"""Test that invalid types as label keys raise an exception."""
data = MetadataFactory(labels=label_value)
with pytest.raises(ValidationError, match="expected string or bytes-like object"):
Metadata.deserialize(data.serialize())
@pytest.mark.parametrize(
"label_value",
[
{"key": "value$"},
{"key": "value."},
{"key": "-value"},
{"key": "v-"},
{"key": "."},
{"key": "url.com/value"},
{"key": "v" * 70},
],
)
def test_label_validation_reject_str_value(label_value):
# Test that invalid strings as label values raise an exception.
data = MetadataFactory(labels=label_value)
with pytest.raises(ValidationError, | |
# encoding: utf-8
import os
import pytest
from ...config import Configuration
from ...testing import DummyHTTPClient
from ...model import create
from ...model.datasource import DataSource
from ...model.edition import Edition
from ...model.identifier import Identifier
from ...model.licensing import RightsStatus
from ...model.resource import (
Hyperlink,
Representation,
Resource,
)
from ...testing import MockRequestsResponse
class TestHyperlink:
def test_add_link(self, db_session, create_edition):
"""
GIVEN: A LicensePool
WHEN: Adding a link between this LicensePool and a resource
THEN: The LicensePool and resource are linked
"""
edition, pool = create_edition(db_session, with_license_pool=True)
identifier = edition.primary_identifier
data_source = pool.data_source
original, _ = create(db_session, Resource, url="http://example.com")
hyperlink, is_new = pool.add_link(
Hyperlink.DESCRIPTION, "http://example.com/", data_source,
"text/plain", "The content", None, RightsStatus.CC_BY,
"The rights explanation", original,
transformation_settings=dict(setting="a setting"))
assert is_new is True
rep = hyperlink.resource.representation
assert rep.media_type == "text/plain"
assert rep.content == b"The content"
assert hyperlink.rel == Hyperlink.DESCRIPTION
assert hyperlink.identifier == identifier
assert hyperlink.resource.rights_status.uri == RightsStatus.CC_BY
assert hyperlink.resource.rights_explanation == "The rights explanation"
transformation = hyperlink.resource.derived_through
assert transformation.derivative == hyperlink.resource
assert transformation.original == original
assert transformation.settings.get("setting") == "a setting"
assert [transformation] == original.transformations
@pytest.mark.parametrize(
'relation,default_filename',
[
pytest.param(Hyperlink.OPEN_ACCESS_DOWNLOAD, "content", id='content'),
pytest.param(Hyperlink.IMAGE, "cover", id='cover'),
pytest.param(Hyperlink.THUMBNAIL_IMAGE, "cover-thumbnail", id='cover-thumbnail'),
],
)
def test_default_filename(self, relation, default_filename):
"""
GIVEN: A Hyperlink relation and a default filename
WHEN: Getting the relation's default filename
THEN: The default filename is correctly set
"""
assert Hyperlink._default_filename(relation) == default_filename
def test_unmirrored(self, db_session, create_identifier, create_work, default_library):
"""
GIVEN: Hyperlink resources, a Work, and a Collection
WHEN: Getting Hyperlinks associated with an item in a Collection that could be mirrored but aren't.
THEN: Returns resources that coudl be mirrored but aren't.
"""
url = "www.example.com"
ds = DataSource.lookup(db_session, DataSource.GUTENBERG)
overdrive = DataSource.lookup(db_session, DataSource.OVERDRIVE)
[collection] = default_library.collections
collection.data_source = ds
# Here's an Identifier associated with a collection.
work = create_work(db_session, with_license_pool=True, collection=collection)
[pool] = work.license_pools
identifier1 = pool.identifier
# This is a random identifier not associated with the collection.
identifier2 = create_identifier(db_session)
def unmirrored():
return Hyperlink.unmirrored(collection).all()
# Identifier is not in the collection.
identifier2.add_link(Hyperlink.IMAGE, url+"/1", ds)
assert unmirrored() == []
# Hyperlink rel is not mirrorable.
identifier1.add_link(
"not mirrorable", url+"/2", ds, "text/plain"
)
assert unmirrored() == []
# Hyperlink has no associated representation -- it needs to be
# mirrored, which will create one!
hyperlink, _ = identifier1.add_link(
Hyperlink.IMAGE, url+"/3", ds, "image/png"
)
assert unmirrored() == [hyperlink]
# Representation is already mirrored, so does not show up
# in the unmirrored list.
representation = hyperlink.resource.representation
representation.set_as_mirrored(url+"/4")
assert unmirrored() == []
# Representation exists in database but is not mirrored -- it needs
# to be mirrored!
representation.mirror_url = None
assert unmirrored() == [hyperlink]
# Hyperlink is associated with a data source other than the
# data source of the collection. It ought to be mirrored, but
# this collection isn't responsible for mirroring it.
hyperlink.data_source = overdrive
assert unmirrored() == []
class TestResource:
def test_as_delivery_mechanism_for(self, db_session, create_work):
"""
GIVEN: A Work, a LicensePool, and a LicensePoolDeliveryMechanism
WHEN: Checking if the resource is used in a delivery mechanism for the given license pool
THEN: Returns the delivery mechanism if applicable
"""
# Calling as_delivery_mechanism_for on a Resource that is used
# to deliver a specific LicensePool returns the appropriate
# LicensePoolDeliveryMechanism.
work = create_work(db_session, with_open_access_download=True)
[pool] = work.license_pools
[lpdm] = pool.delivery_mechanisms
assert lpdm == lpdm.resource.as_delivery_mechanism_for(pool)
# If there's no relationship between the Resource and
# the LicensePoolDeliveryMechanism, as_delivery_mechanism_for
# returns None.
w2 = create_work(db_session, with_license_pool=True)
[unrelated] = w2.license_pools
assert lpdm.resource.as_delivery_mechanism_for(unrelated) is None
class TestRepresentation:
@pytest.mark.parametrize(
'base,expected',
[
("/foo/bar", "baz"),
("/foo/bar/", "baz"),
("/blah/blah/", "/foo/bar/baz")
]
)
def test_normalized_content_path(self, base, expected):
"""
GIVEN: A content path
WHEN: Normalizing the content path with a base
THEN: Returns a string path with respect to the base
"""
assert Representation.normalize_content_path("/foo/bar/baz", base) == expected
@pytest.mark.parametrize(
'url,headers,default,expected_headers_type',
[
# If there are no headers or no content-type header, the
# presumed media type takes precedence.
pytest.param("http://text/all.about.jpeg", None, "text/plain", "text/plain", id='no_headers'),
pytest.param(None, {}, "text/plain", "text/plain", id='empty_headers'),
# Most of the time, the content-type header takes precedence over the presumed media type.
pytest.param(None, {"content-type": "image/gif"}, "text/plain", "image/gif", id='image/gif'),
# Except when the content-type header is so generic as to be uselses.
pytest.param(None, {"content-type": "application/octet-stream;profile=foo"},
"text/plain", "text/plain", id='generic_headers'),
# If no default media type is specified, but one can be derived from
# the URL, that one is used as the default.
pytest.param("http://example.com/cover.jpeg", {"content-type": "application/octet-stream;profile=foo"},
None, "image/jpeg", id='no_default_media_type'),
# But a default media type doesn't override a specific
# Content-Type from the server, even if it superficially makes
# more sense.
pytest.param("http://images-galore/cover.jpeg", {"content-type": "image/png"},
None, "image/png", id='specific_content-type'),
],
)
def test_best_media_type(self, url, headers, default, expected_headers_type):
"""
GIVEN: A URL, headers dict with a content-type, and a default content-type
WHEN: Determining whether the content-type header should override a presumed media type
THEN: Returns the most likely media type
"""
assert Representation._best_media_type(url, headers, default) == expected_headers_type
@pytest.mark.parametrize(
'media_type,expected',
[
# Ebook formats and image formats get mirrored.
(Representation.EPUB_MEDIA_TYPE, True),
(Representation.MOBI_MEDIA_TYPE, True),
(Representation.JPEG_MEDIA_TYPE, True),
# Other media types don't get mirrored
("text/plain", False)
]
)
def test_mirrorable_media_type(self, db_session, create_representation, media_type, expected):
"""
GIVEN: A media type
WHEN: Determining if the representation based on the media type is mirrorable
THEN: Returns True/False depending on the representation's media type
"""
representation = create_representation(
db_session, url="http://example.com", media_type=media_type, content="content")
assert representation.mirrorable_media_type is expected
def test_guess_media_type(self):
"""
GIVEN: A path
WHEN: Guessing the media type from the path
THEN: A media type is returned
"""
m_file = Representation.guess_media_type
m_url = Representation.guess_url_media_type_from_path
jpg_file = "file.jpg"
zip_file = "file.ZIP"
zip_file_rel_path = "relatively/pathed/file.zip"
zip_file_abs_path = "/absolutely/pathed/file.zIp"
assert Representation.JPEG_MEDIA_TYPE == m_file(jpg_file)
assert Representation.ZIP_MEDIA_TYPE == m_file(zip_file)
for extension, media_type in list(Representation.MEDIA_TYPE_FOR_EXTENSION.items()):
filename = "file" + extension
assert media_type == m_file(filename)
assert m_file(None) is None
assert m_file("file") is None
assert m_file("file.unknown-extension") is None
# URLs should be handled differently
# Simple file-based guess will get this right, ...
zip_url = "https://some_url/path/file.zip"
assert Representation.ZIP_MEDIA_TYPE == m_file(zip_url)
# ... but will get these wrong.
zip_url_with_query = "https://some_url/path/file.zip?Policy=xyz123&Key-Pair-Id=xxx"
zip_url_misleading = "https://some_url/path/file.zip?Policy=xyz123&associated_cover=image.jpg"
assert m_file(zip_url_with_query) is None # We get None, but want Zip
assert Representation.JPEG_MEDIA_TYPE == m_file(zip_url_misleading) # We get JPEG, but want Zip
# Taking URL structure into account should get them all right.
assert Representation.ZIP_MEDIA_TYPE == m_url(zip_url)
# ... but will get these wrong.
assert Representation.ZIP_MEDIA_TYPE == m_url(zip_url_with_query)
assert Representation.ZIP_MEDIA_TYPE == m_url(zip_url_misleading)
# And we can handle local file cases
assert Representation.ZIP_MEDIA_TYPE == m_url(zip_file)
assert Representation.ZIP_MEDIA_TYPE == m_url(zip_file_rel_path)
assert Representation.ZIP_MEDIA_TYPE == m_url(zip_file_abs_path)
@pytest.mark.parametrize(
'url,media_type,extension',
[
pytest.param('', 'text/unknown', '', id='unknown_file_at_/foo'),
pytest.param('', 'text/plain', '.txt', id='text_file_at_/foo'),
pytest.param('.jpg', 'image/jpeg', '.jpg', id='JPEG_at_/foo.jpg'),
pytest.param('', 'image/jpeg', '.jpg', id='JPEG_at_/foo'),
pytest.param('', 'image/png', '.png', id='PNG_at_/foo'),
pytest.param('.epub.images', Representation.EPUB_MEDIA_TYPE, '.epub.images', id='EPUB_at_/foo.epub.images'),
pytest.param('.svg', 'image/svg+xml', '.svg', id='SVG_at_/foo.svg'),
],
)
def test_external_media_type_and_extension(self, db_session, create_representation, url, media_type, extension):
"""
GIVEN: A Representation
WHEN: Determining the external media type and extension
THEN: Returns a media type and returns an extension
"""
"""Test the various transformations that might happen to media type
and extension when we mirror a representation.
"""
url = "www.example.com/" + url
representation = create_representation(db_session, url, media_type)
assert representation.external_media_type == media_type
assert representation.extension() == extension
def test_set_fetched_content(self, db_session, create_representation):
"""
GIVEN: A Representation
WHEN: Reading an open filehandle to the representation's contents
THEN: The representation's content is returned
"""
representation = create_representation(db_session, "http://www.example.com/", "text/plain")
representation.set_fetched_content("some text")
assert representation.content_fh().read() == b"some text"
def test_set_fetched_content_file_on_disk(self, db_session, create_representation, tmpdir):
"""
GIVEN: A Representation that has content from a file on disk
WHEN: Reading an open filehandle to the representation's contents
THEN: The representation's content is returned
"""
Configuration.instance[Configuration.DATA_DIRECTORY] = str(tmpdir)
filename = "set_fetched_content_file_on_disk.txt"
path = tmpdir.join(filename)
path.write(b"some text")
representation = create_representation(db_session, "http://www.example.com/", "text/plain")
representation.set_fetched_content(None, str(path))
fh = representation.content_fh()
assert fh.read() == b"some text"
def test_unicode_content_utf8_default(self, db_session, create_representation):
"""
GIVEN: A Representation with unicode content
WHEN: Getting the representation's content and unicode_content
THEN: Representation's content is returned
"""
unicode_content = "It’s complicated."
utf8_content = unicode_content.encode("utf8")
# This bytestring can be decoded as Windows-1252, but that
# would be the wrong answer.
bad_windows_1252 = utf8_content.decode("windows-1252")
assert "It’s complicated." == bad_windows_1252
representation = create_representation(db_session, "http://example.com/", "text/plain")
representation.set_fetched_content(unicode_content, None)
assert utf8_content == representation.content
# By trying to interpret the content as UTF-8 before falling back to
| |
<reponame>usermicrodevices/pywingui
## Copyright (c) 2003 <NAME>
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from version_microsoft import WINVER, UNICODE
from sdkddkver import _WIN32_WINNT
try:
WINVER = _WIN32_WINNT
except:
_WIN32_WINNT = WINVER
from ctypes import *
from sys import hexversion
if hexversion < 0x02060000:
c_bool = c_byte
#TODO auto ie/comctl detection
WIN32_IE = 0x0550
#TODO: auto unicode selection,
#if unicode:
# CreateWindowEx = windll.user32.CreateWindowExW
#else:
# CreateWindowEx = windll.user32.CreateWindowExA
#etc, etc
DWORD = c_ulong
HANDLE = c_void_p
UINT = c_uint
BOOL = c_int
HWND = HANDLE
HINSTANCE = HANDLE
HICON = HANDLE
HDC = HANDLE
HCURSOR = HANDLE
HBRUSH = HANDLE
HMENU = HANDLE
HBITMAP = HANDLE
HIMAGELIST = HANDLE
HGDIOBJ = HANDLE
HMETAFILE = HANDLE
HRGN = HANDLE
ULONG = DWORD
ULONG_PTR = DWORD
UINT_PTR = DWORD
LONG_PTR = c_long
INT = c_int
LPCTSTR = c_char_p
LPTSTR = c_char_p
PSTR = c_char_p
LPCSTR = c_char_p
LPCWSTR = c_wchar_p
LPSTR = c_char_p
LPWSTR = c_wchar_p
PVOID = c_void_p
USHORT = c_ushort
WORD = c_ushort
ATOM = WORD
SHORT = c_short
LPARAM = c_ulong
WPARAM = c_uint
LPVOID = c_void_p
LONG = c_long
BYTE = c_byte
TCHAR = c_char #TODO depends on unicode/wide conventions
DWORD_PTR = c_ulong #TODO what is this exactly?
INT_PTR = c_ulong #TODO what is this exactly?
COLORREF = c_ulong
CLIPFORMAT = WORD
FLOAT = c_float
CHAR = c_char
WCHAR = c_wchar
FXPT16DOT16 = c_long
FXPT2DOT30 = c_long
LCSCSTYPE = c_long
LCSGAMUTMATCH = c_long
COLOR16 = USHORT
LRESULT = LONG_PTR
#### Windows version detection ##############################
class OSVERSIONINFO(Structure):
_fields_ = [("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", TCHAR * 128)]
def isMajorMinor(self, major, minor):
return (self.dwMajorVersion, self.dwMinorVersion) == (major, minor)
GetVersion = windll.kernel32.GetVersionExA
versionInfo = OSVERSIONINFO()
versionInfo.dwOSVersionInfoSize = sizeof(versionInfo)
GetVersion(byref(versionInfo))
def MAKELONG(w1, w2):
return w1 | (w2 << 16)
MAKELPARAM = MAKELONG
##### Windows Callback functions ################################
WNDPROC = WINFUNCTYPE(c_int, HWND, UINT, WPARAM, LPARAM)
DialogProc = WINFUNCTYPE(c_int, HWND, UINT, WPARAM, LPARAM)
CBTProc = WINFUNCTYPE(c_int, c_int, c_int, c_int)
MessageProc = CBTProc
EnumChildProc = WINFUNCTYPE(c_int, HWND, LPARAM)
MSGBOXCALLBACK = WINFUNCTYPE(c_int, HWND, LPARAM) #TODO look up real def
class WNDCLASSEX(Structure):
_fields_ = [("cbSize", UINT),
("style", UINT),
("lpfnWndProc", WNDPROC),
("cbClsExtra", INT),
("cbWndExtra", INT),
("hInstance", HINSTANCE),
("hIcon", c_void_p),#HICON
("hCursor", HCURSOR),
("hbrBackground", HBRUSH)]
if UNICODE:
_fields_ += [("lpszMenuName", c_wchar_p), ("lpszClassName", c_wchar_p)]
else:
_fields_ += [("lpszMenuName", c_char_p), ("lpszClassName", c_char_p)]
_fields_.append(("hIconSm", c_void_p))#HICON
class POINT(Structure):
_fields_ = [("x", LONG), ("y", LONG)]
def __str__(self):
return "POINT {x: %d, y: %d}" % (self.x, self.y)
POINTL = POINT
LPPOINT = POINTER(POINT)
class POINTS(Structure):
_fields_ = [("x", SHORT), ("y", SHORT)]
PtInRect = windll.user32.PtInRect
class RECT(Structure):
_fields_ = [("left", LONG),
("top", LONG),
("right", LONG),
("bottom", LONG)]
def __str__(self):
return "RECT {left: %d, top: %d, right: %d, bottom: %d}" % (self.left, self.top, self.right, self.bottom)
def __add__(self, value):
left, top, right, bottom = 0, 0, 0, 0
if self.left > value.left:
left = value.left
else:
left = self.left
if self.top > value.top:
top = value.top
else:
top = self.top
if self.right < value.right:
right = value.right
else:
right = self.right
if self.bottom < value.bottom:
bottom = value.bottom
else:
bottom = self.bottom
return RECT(left, top, right, bottom)
def __iadd__(self, value):
if self.left > value.left:
self.left = value.left
if self.top > value.top:
self.top = value.top
if self.right < value.right:
self.right = value.right
if self.bottom < value.bottom:
self.bottom = value.bottom
def getHeight(self):
return self.bottom - self.top
height = property(getHeight, None, None, "")
def getWidth(self):
return self.right - self.left
width = property(getWidth, None, None, "")
def getSize(self):
return self.width, self.height
size = property(getSize, None, None, "")
def ContainsPoint(self, pt):
"""determines if this RECT contains the given POINT pt
returns True if pt is in this rect
"""
return bool(PtInRect(byref(self), pt))
RECTL = RECT
class SIZE(Structure):
_fields_ = [('cx', LONG), ('cy', LONG)]
SIZEL = SIZE
##class MSG(Structure):
## _fields_ = [("hWnd", HWND),
## ("message", UINT),
## ("wParam", WPARAM),
## ("lParam", LPARAM),
## ("time", DWORD),
## ("pt", POINT)]
## def __str__(self):
## return "MSG {%d %d %d %d %d %s}" % (self.hWnd, self.message, self.wParam, self.lParam,
## self.time, str(self.pt))
#Hack: we need to use the same MSG type as comtypes.ole uses!
from ctypes.wintypes import MSG
class ACCEL(Structure):
_fields_ = [("fVirt", BYTE),
("key", WORD),
("cmd", WORD)]
class CREATESTRUCT(Structure):
_fields_ = [("lpCreateParams", LPVOID),
("hInstance", HINSTANCE),
("hMenu", HMENU),
("hwndParent", HWND),
("cx", INT),
("cy", INT),
("x", INT),
("y", INT),
("style", LONG)]
if UNICODE:
_fields_ += [("lpszName", c_wchar_p), ("lpszClass", c_wchar_p)]
else:
_fields_ += [("lpszName", c_char_p), ("lpszClass", c_char_p)]
_fields_.append(("dwExStyle", DWORD))
class NMHDR(Structure):
_fields_ = [("hwndFrom", HWND),
("idFrom", UINT),
("code", UINT)]
class PAINTSTRUCT(Structure):
_fields_ = [("hdc", HDC),
("fErase", BOOL),
("rcPaint", RECT),
("fRestore", BOOL),
("fIncUpdate", BOOL),
("rgbReserved", c_byte * 32)]
class MENUITEMINFO(Structure):
_fields_ = [("cbSize", UINT),
("fMask", UINT),
("fType", UINT),
("fState", UINT),
("wID", UINT),
("hSubMenu", HMENU),
("hbmpChecked", HBITMAP),
("hbmpUnchecked", HBITMAP),
("dwItemData", ULONG_PTR)]
if UNICODE:
_fields_.append(("dwTypeData", c_wchar_p))
else:
_fields_.append(("dwTypeData", c_char_p))
_fields_.append(("cch", UINT))
if WINVER >= 0x0500:
_fields_.append(("hbmpItem", HBITMAP))
class DLGTEMPLATE(Structure):
_pack_ = 2
_fields_ = [
("style", DWORD),
("exStyle", DWORD),
("cDlgItems", WORD),
("x", c_short),
("y", c_short),
("cx", c_short),
("cy", c_short)
]
class DLGITEMTEMPLATE(Structure):
_pack_ = 2
_fields_ = [
("style", DWORD),
("exStyle", DWORD),
("x", c_short),
("y", c_short),
("cx", c_short),
("cy", c_short),
("id", WORD)
]
class COPYDATASTRUCT(Structure):
_fields_ = [
("dwData", ULONG_PTR),
("cbData", DWORD),
("lpData", PVOID)]
def LOWORD(dword):
return dword & 0x0000ffff
def HIWORD(dword):
return dword >> 16
TRUE = 1
FALSE = 0
NULL = 0
IDI_APPLICATION = 32512
SW_HIDE = 0
SW_SHOWNORMAL = 1
SW_NORMAL = 1
SW_SHOWMINIMIZED = 2
SW_SHOWMAXIMIZED = 3
SW_MAXIMIZE = 3
SW_SHOWNOACTIVATE = 4
SW_SHOW = 5
SW_MINIMIZE = 6
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_RESTORE = 9
SW_SHOWDEFAULT = 10
SW_FORCEMINIMIZE = 11
SW_MAX = 11
EN_CHANGE = 768
MSGS = [('WM_NULL', 0),
('WM_CREATE', 1),
('WM_CANCELMODE', 31),
('WM_CAPTURECHANGED', 533),
('WM_CLOSE', 16),
('WM_COMMAND', 273),
('WM_DESTROY', 2),
('WM_ERASEBKGND', 20),
('WM_GETFONT', 49),
('WM_INITDIALOG', 272),
('WM_INITMENUPOPUP', 279),
('WM_KEYDOWN', 256),
('WM_KEYFIRST', 256),
('WM_KEYLAST', 264),
('WM_KEYUP', 257),
('WM_LBUTTONDBLCLK', 515),
('WM_LBUTTONDOWN', 513),
('WM_LBUTTONUP', 514),
('WM_MBUTTONDBLCLK', 521),
('WM_MBUTTONDOWN', 519),
('WM_MBUTTONUP', 520),
('WM_MENUSELECT', 287),
('WM_MOUSEFIRST', 512),
('WM_MOUSEHOVER', 673),
('WM_MOUSELEAVE', 675),
('WM_MOUSEMOVE', 512),
('WM_MOVE', 3),
('WM_NCCREATE', 129),
('WM_NCDESTROY', 130),
('WM_NOTIFY', 78),
('WM_PAINT', 15),
('WM_RBUTTONDBLCLK', 518),
('WM_RBUTTONDOWN', 516),
('WM_RBUTTONUP', 517),
('WM_SETCURSOR', 32),
('WM_SETFONT', 48),
('WM_SETREDRAW', 11),
('WM_SIZE', 5),
('WM_SYSKEYDOWN', 260),
('WM_SYSKEYUP', 261),
('WM_USER', 1024),
('WM_WINDOWPOSCHANGED', 71),
('WM_WINDOWPOSCHANGING', 70),
('WM_SETTEXT', 12),
('WM_GETTEXT', 13),
('WM_GETTEXTLENGTH', 14),
('WM_ACTIVATE', 6),
('WM_HSCROLL', 276),
('WM_VSCROLL', 277),
('WM_CTLCOLORBTN', 309),
('WM_CTLCOLORDLG', 310),
('WM_CTLCOLOREDIT', 307),
('WM_CTLCOLORLISTBOX', 308),
('WM_CTLCOLORMSGBOX', 306),
('WM_CTLCOLORSCROLLBAR', 311),
('WM_CTLCOLORSTATIC', 312),
('WM_TIMER', 0x0113),
('WM_CONTEXTMENU', 0x007B),
('WM_COPYDATA', 0x004A),
('WM_ACTIVATEAPP', 0x001C),
('WM_NCACTIVATE', 0x0086)
]
#insert wm_* msgs as constants in this module:
for key, val in MSGS:
exec('%s = %d' % (key, val)) #TODO without using 'exec'?
BN_CLICKED = 0
CS_HREDRAW = 2
CS_VREDRAW = 1
#~ WHITE_BRUSH = 0
MIIM_STATE= 1
MIIM_ID= 2
MIIM_SUBMENU =4
MIIM_CHECKMARKS= 8
MIIM_TYPE= 16
MIIM_DATA= 32
MIIM_STRING= 64
MIIM_BITMAP= 128
MIIM_FTYPE =256
MFT_BITMAP= 4
MFT_MENUBARBREAK =32
MFT_MENUBREAK= 64
MFT_OWNERDRAW= 256
MFT_RADIOCHECK= 512
MFT_RIGHTJUSTIFY= 0x4000
MFT_SEPARATOR =0x800
MFT_RIGHTORDER= 0x2000L
MFT_STRING = 0
# Menu flags for Add/Check/EnableMenuItem()
MF_INSERT = 0x00000000L
MF_CHANGE = 0x00000080L
MF_APPEND = 0x00000100L
MF_DELETE = 0x00000200L
MF_REMOVE = 0x00001000L
MF_BYCOMMAND = 0x00000000L
MF_BYPOSITION = 0x00000400L
MF_SEPARATOR = 0x00000800L
MF_ENABLED = 0x00000000L
MF_GRAYED = 0x00000001L
MF_DISABLED = 0x00000002L
MF_UNCHECKED = 0x00000000L
MF_CHECKED = 0x00000008L
MF_USECHECKBITMAPS = 0x00000200L
MF_STRING = 0x00000000L
MF_BITMAP = 0x00000004L
MF_OWNERDRAW = 0x00000100L
MF_POPUP = 0x00000010L
MF_MENUBARBREAK = 0x00000020L
MF_MENUBREAK = 0x00000040L
MF_UNHILITE = 0x00000000L
MF_HILITE = 0x00000080L
if WINVER >= 0x0400:
MF_DEFAULT = 0x00001000L
MF_SYSMENU = 0x00002000L
MF_HELP = 0x00004000L
if WINVER >= 0x0400:
MF_RIGHTJUSTIFY = 0x00004000L
MF_MOUSESELECT = 0x00008000L
if WINVER >= 0x0400:
MF_END = 0x00000080L # Obsolete -- only used by old RES files
if WINVER >= 0x0400:
MFT_STRING = MF_STRING
MFT_BITMAP = MF_BITMAP
MFT_MENUBARBREAK = MF_MENUBARBREAK
MFT_MENUBREAK = MF_MENUBREAK
MFT_OWNERDRAW = MF_OWNERDRAW
MFT_RADIOCHECK = 0x00000200L
MFT_SEPARATOR = MF_SEPARATOR
MFT_RIGHTORDER = 0x00002000L
MFT_RIGHTJUSTIFY = MF_RIGHTJUSTIFY
# Menu flags for Add/Check/EnableMenuItem()
MFS_GRAYED = 0x00000003L
MFS_DISABLED = MFS_GRAYED
MFS_CHECKED = MF_CHECKED
MFS_HILITE = MF_HILITE
MFS_ENABLED = MF_ENABLED
MFS_UNCHECKED = MF_UNCHECKED
MFS_UNHILITE = MF_UNHILITE
MFS_DEFAULT = MF_DEFAULT
LOCALE_SYSTEM_DEFAULT = 0x800
WS_BORDER = 0x800000
WS_CAPTION = 0xc00000
WS_CHILD = 0x40000000
WS_CHILDWINDOW = 0x40000000
WS_CLIPCHILDREN = 0x2000000
WS_CLIPSIBLINGS = 0x4000000
WS_DISABLED = 0x8000000
WS_DLGFRAME = 0x400000
WS_GROUP = 0x20000
WS_HSCROLL = 0x100000
WS_ICONIC = 0x20000000
WS_MAXIMIZE = 0x1000000
WS_MAXIMIZEBOX = 0x10000
WS_MINIMIZE = 0x20000000
WS_MINIMIZEBOX = 0x20000
WS_OVERLAPPED = 0
WS_OVERLAPPEDWINDOW = 0xcf0000
WS_POPUP = 0x80000000l
WS_POPUPWINDOW = 0x80880000
WS_SIZEBOX = 0x40000
WS_SYSMENU = 0x80000
WS_TABSTOP = 0x10000
WS_THICKFRAME = 0x40000
WS_TILED = 0
WS_TILEDWINDOW = 0xcf0000
WS_VISIBLE = 0x10000000
WS_VSCROLL = 0x200000
# Extended Window Styles
WS_EX_DLGMODALFRAME = 0x00000001L
WS_EX_NOPARENTNOTIFY = 0x00000004L
WS_EX_TOPMOST = 0x00000008L
WS_EX_ACCEPTFILES = 0x00000010L
WS_EX_TRANSPARENT = 0x00000020L
if WINVER >= 0x0400:
WS_EX_MDICHILD = 0x00000040L
WS_EX_TOOLWINDOW = 0x00000080L
WS_EX_WINDOWEDGE = 0x00000100L
WS_EX_CLIENTEDGE = 0x00000200L
WS_EX_CONTEXTHELP = 0x00000400L
WS_EX_RIGHT = 0x00001000L
WS_EX_LEFT = 0x00000000L
WS_EX_RTLREADING = 0x00002000L
WS_EX_LTRREADING = 0x00000000L
WS_EX_LEFTSCROLLBAR = 0x00004000L
WS_EX_RIGHTSCROLLBAR = 0x00000000L
WS_EX_CONTROLPARENT = 0x00010000L
WS_EX_STATICEDGE = 0x00020000L
WS_EX_APPWINDOW = 0x00040000L
WS_EX_OVERLAPPEDWINDOW = (WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE)
WS_EX_PALETTEWINDOW = (WS_EX_WINDOWEDGE | WS_EX_TOOLWINDOW | WS_EX_TOPMOST)
if WINVER >= 0x0500:
WS_EX_LAYERED = 0x00080000
WS_EX_NOINHERITLAYOUT = 0x00100000L # Disable inheritence of mirroring by children
WS_EX_LAYOUTRTL = 0x00400000L # Right to left mirroring
if WINVER >= 0x0501:
WS_EX_COMPOSITED = 0x02000000L
if WINVER >= 0x0500:
WS_EX_NOACTIVATE = 0x08000000L
WA_INACTIVE = 0
WA_ACTIVE = 1
WA_CLICKACTIVE = 2
RB_SETBARINFO = WM_USER + 4
RB_GETBANDCOUNT = WM_USER + 12
RB_INSERTBANDA = WM_USER + 1
RB_INSERTBANDW = WM_USER + 10
RB_INSERTBAND = RB_INSERTBANDA
RBBIM_STYLE = 1
RBBIM_COLORS = 2
RBBIM_TEXT = 4
RBBIM_IMAGE = 8
RBBIM_CHILD = 16
RBBIM_CHILDSIZE = 32
RBBIM_SIZE = 64
RBBIM_BACKGROUND = 128
RBBIM_ID = 256
RBBIM_IDEALSIZE = 0x00000200
TPM_CENTERALIGN =4
TPM_LEFTALIGN =0
TPM_RIGHTALIGN= 8
TPM_LEFTBUTTON= 0
TPM_RIGHTBUTTON= 2
TPM_HORIZONTAL= 0
TPM_VERTICAL= 64
TPM_TOPALIGN= 0
TPM_VCENTERALIGN= 16
TPM_BOTTOMALIGN= 32
TPM_NONOTIFY= 128
TPM_RETURNCMD= 256
TBIF_TEXT = 0x00000002
DT_NOPREFIX = 0x00000800
DT_HIDEPREFIX = 1048576
WH_CBT = 5
WH_MSGFILTER = (-1)
I_IMAGENONE = -2
TBSTATE_ENABLED = 4
BTNS_SHOWTEXT = 0x00000040
CW_USEDEFAULT = 0x80000000
COLOR_3DFACE = 15
BF_LEFT = 1
BF_TOP = 2
BF_RIGHT = 4
BF_BOTTOM = 8
BDR_RAISEDOUTER = 1
BDR_SUNKENOUTER | |
== int(abs(y)) and abs(y) <= 512:
pow2 = [xsym]
pow2_scal = [theano.scalar.get_scalar_type(xsym.dtype)()]
y_to_do = abs(y)
for i in xrange(int(numpy.log2(y_to_do))):
pow2.append(T.sqr(pow2[i]))
pow2_scal.append(theano.scalar.sqr(pow2_scal[i]))
rval1 = None
rval1_scal = None
while y_to_do > 0:
log_to_do = int(numpy.log2(y_to_do))
if rval1:
rval1 *= pow2[log_to_do]
rval1_scal *= pow2_scal[log_to_do]
else:
rval1 = pow2[log_to_do]
rval1_scal = pow2_scal[log_to_do]
y_to_do -= 2 ** log_to_do
if abs(y) > 2:
# We fuse all the pow together here to make
# compilation faster
rval1 = Elemwise(
theano.scalar.Composite(
[pow2_scal[0]], [rval1_scal])).make_node(xsym)
if y < 0:
rval = [T.inv(rval1)]
else:
rval = [rval1]
if rval:
rval[0] = T.cast(rval[0], odtype)
assert rval[0].type == node.outputs[0].type, (
rval, node.outputs)
return rval
@gof.local_optimizer([T.mul])
def local_mul_specialize(node):
"""
Remove special-case constants from mul arguments and useless neg in inputs.
mul(-1, x) -> neg(x)
mul(1, x, y) -> mul(x, y)
mul(0, ...) -> alloc(0, shapes...)
This is not done if we would add more nodes in the graph, like with:
mul(-1, x, y) -/-> neg(mul(x, y))
"""
# here, we are past the point of canonicalization, so we don't
# want to put in un-necessary fills.
#
# at this point [post canonicalize], mul() may have many inputs.
if node.op == T.mul:
# the idea here is that we have pow(x, y)
neg = False
new_inputs = []
nb_neg_node = 0
nb_cst = 0
for input in node.inputs:
# remove any neg arguments
while input.owner and input.owner.op == T.neg:
neg ^= True
input = input.owner.inputs[0]
nb_neg_node += 1
# remove special case arguments of 1, -1 or 0
y = local_mul_canonizer.get_constant(input)
if y == 1.0:
nb_cst += 1
elif y == -1.0:
nb_cst += 1
neg ^= True # toggles
elif y == 0.0:
# if we find any zero, we just return right away
return [broadcast_like(0, node.outputs[0], node.fgraph)]
else:
new_inputs.append(input)
if new_inputs != node.inputs:
if new_inputs:
if len(new_inputs) == 1:
if neg:
rval = -new_inputs[0]
else:
rval = new_inputs[0]
else:
# The next case would cause a replace by an equivalent case.
if (neg and
nb_neg_node == 0 and
nb_cst == 1):
return
elif neg:
# Don't add an extra neg node as we can't
# fully replace this mul by a neg.
m1 = numpy.asarray(-1, dtype=node.outputs[0].dtype)
new_inputs = [m1] + new_inputs
rval = T.mul(*new_inputs)
return [broadcast_like(rval, node.outputs[0], node.fgraph)]
else:
# there are no variable inputs to mul
# N.B. this could have been constant-folded...
if neg:
return [broadcast_like(-1, node.outputs[0], node.fgraph)]
else:
return [broadcast_like(1, node.outputs[0], node.fgraph)]
register_specialize(local_mul_specialize)
@gof.local_optimizer([T.add])
def local_add_specialize(node):
def fill_chain(v):
out = _fill_chain(v, node.inputs)
return out
# here, we are past the point of canonicalization, so we don't want
# to put in un-necessary fills.
if node.op == T.add:
new_inputs = []
for input in node.inputs:
try:
y = get_scalar_constant_value(input)
except NotScalarConstantError:
y = input
if numpy.all(y == 0.0):
continue
new_inputs.append(input)
if len(new_inputs) < len(node.inputs):
dtype = node.outputs[0].type.dtype
if len(new_inputs) == 0:
# we got rid of the entire expression!
ndim = node.outputs[0].type.ndim
# Reuse call to constant for cache()
cst = T.constant(numpy.zeros((1,) * ndim, dtype=dtype))
assert cst.type.broadcastable == (True,) * ndim
return fill_chain(cst)
if len(new_inputs) == 1:
ret = fill_chain(new_inputs[0])
else:
ret = fill_chain(T.add(*new_inputs))
# The dtype should not be changed. It can happen if the input
# that was forcing upcasting was equal to 0.
if ret[0].dtype != dtype:
ret = [T.cast(ret[0], dtype)]
return ret
else:
return False
register_specialize(local_add_specialize)
mul_canonizer = in2out(gof.LocalOptGroup(local_mul_canonizer, local_fill_cut,
local_fill_sink),
name='mul_canonizer_groups')
def check_for_x_over_absX(numerators, denominators):
"""Convert x/abs(x) into sign(x). """
# TODO: this function should dig/search through dimshuffles
# This won't catch a dimshuffled absolute value
for den in list(denominators):
if (den.owner and den.owner.op == T.abs_ and
den.owner.inputs[0] in numerators):
if den.owner.inputs[0].type.dtype.startswith('complex'):
# TODO: Make an Op that projects a complex number to
# have unit length but projects 0 to 0. That
# would be a weird Op, but consistent with the
# special case below. I heard there's some
# convention in Matlab that is similar to
# this... but not sure.
pass
else:
denominators.remove(den)
numerators.remove(den.owner.inputs[0])
numerators.append(T.sgn(den.owner.inputs[0]))
return numerators, denominators
local_mul_canonizer.add_simplifier(check_for_x_over_absX, 'X_over_absX')
@register_canonicalize
@gof.local_optimizer([T.abs_])
def local_abs_lift(node):
"""
Move the abs toward the input.
This is needed for check_for_x_over_absX to apply in more case.
"""
if node.op == T.abs_ and node.inputs[0].owner:
assert node.nin == 1
if node.inputs[0].owner.op == T.mul:
return [T.mul(*[T.abs_(i) for i in node.inputs[0].owner.inputs])]
if node.inputs[0].owner.op == T.true_div:
i = node.inputs[0].owner.inputs
return [T.true_div(T.abs_(i[0]), T.abs_(i[1]))]
@register_specialize
@gof.local_optimizer([T.mul, T.true_div])
def local_abs_merge(node):
"""
Merge abs generated by local_abs_lift when the canonizer don't
need it anymore
"""
if node.op == T.mul and sum([i.owner.op == T.abs_ for i in node.inputs
if i.owner]) > 1:
inputs = []
for i in node.inputs:
if i.owner and i.owner.op == T.abs_:
inputs.append(i.owner.inputs[0])
elif isinstance(i, Constant):
try:
const = get_scalar_constant_value(i)
except NotScalarConstantError:
return False
if not (const >= 0).all():
return False
inputs.append(i)
else:
return False
return [T.abs_(T.mul(*inputs))]
if node.op == T.true_div and sum([i.owner.op == T.abs_ for i in
node.inputs if i.owner]) == 2:
return [T.abs_(T.true_div(node.inputs[0].owner.inputs[0],
node.inputs[1].owner.inputs[0]))]
@register_stabilize
@register_specialize
@gof.local_optimizer([T.log])
def local_log1p(node):
# log(1+x) -> log1p(x)
if node.op == T.log:
log_arg, = node.inputs
if log_arg.owner and log_arg.owner.op == T.add:
scalars, scalar_inputs, nonconsts = scalarconsts_rest(
log_arg.owner.inputs)
# scalar_inputs are potentially dimshuffled and fill'd scalars
if scalars and numpy.allclose(numpy.sum(scalars), 1):
if not nonconsts:
pass # leave for constant-merge
if len(nonconsts) == 1:
return _fill_chain(T.log1p(nonconsts[0]), scalar_inputs)
else:
return _fill_chain(T.log1p(T.add(*nonconsts)),
scalar_inputs)
# TODO: in canonicalize, change log10 and log2 -> log
@register_stabilize
@register_specialize
@gof.local_optimizer([T.log])
def local_log_add(node):
# log(exp(x)+exp(y))
#
# Suppose x >= y
# log(exp(x) + exp(y))
# log(exp(x) * (1 + exp(y)/exp(x)))
# x + log(1 + exp(y)/exp(x))
# x + log1p(exp(y)/exp(x))
# x + log1p(exp(y-x))
if node.op == T.log:
z = node.inputs[0]
if z.owner and z.owner.op == T.add:
zi = z.owner.inputs
if len(zi) != 2:
# -- upgrading Maximum to handle multiple inputs wasn't trivial
# TODO
# raise NotImplementedError()
return
pre_exp = [x.owner.inputs[0] for x in zi
if x.owner and x.owner.op == T.exp]
if len(pre_exp) == len(zi):
# all arguments to add are exp(<something>)
max_pre = T.maximum(*pre_exp)
ret = max_pre + T.log1p(T.exp(T.add(*[p - max_pre
for p in pre_exp])))
ret.tag.values_eq_approx = values_eq_approx_remove_inf
return [ret]
def add_calculate(num, denum, aslist=False, out_type=None):
# TODO: make sure that this function and mul_calculate are similar
if out_type is None:
zero = 0.0
else:
zero = theano._asarray(0, dtype=out_type.dtype)
# zero = 0.0 if out_type is None else theano._asarray(0,
# dtype=out_type.dtype)
v = reduce(numpy.add, num, zero) - reduce(numpy.add, denum, zero)
if aslist:
if numpy.all(v == 0):
return []
else:
return [v]
return v
local_add_canonizer = Canonizer(T.add, T.sub, T.neg, add_calculate)
add_canonizer = in2out(gof.LocalOptGroup(local_add_canonizer, local_fill_cut,
local_fill_sink),
name='add_canonizer_group')
register_canonicalize(local_add_canonizer, name='local_add_canonizer')
##################
# Distributivity #
##################
def distribute_greedy(pos_pairs, neg_pairs, num, denum,
out_type, minscore=0):
# each pair in pos_pairs and neg_pairs is a num/denum pair. this
# function attempts to add num and denum to the corresponding parts
# of each pair, and counts how many multiplications/divisions can
# be saved in that way.
# each division is counted like div_cost multiplications
# (typically, division costs more so we are willing to multiply more
# in order to divide less)
# 1.5 was obtained through an informal test and may very well be
# platform dependent
div_cost = 1.5
# score is number of operations saved, higher is better
score = len(num) + div_cost * len(denum)
new_pos_pairs = list(itertools.starmap(local_mul_canonizer.simplify,
[(n + num, d + denum, out_type) for (n, d)
in pos_pairs]))
new_neg_pairs = list(itertools.starmap(local_mul_canonizer.simplify,
[(n + num, d + denum, out_type) for (n, d)
in neg_pairs]))
for (n, d), (nn, dd) in zip(pos_pairs + neg_pairs, new_pos_pairs +
new_neg_pairs):
# We calculate how many operations we are saving with the new
# num and denum
score += len(n) + div_cost * len(d) - len(nn) - div_cost * len(dd)
if score <= minscore:
# the change is not applied because it adds too many operations
return False, pos_pairs, neg_pairs
return True, new_pos_pairs, new_neg_pairs
def attempt_distribution(factor, num, denum, out_type):
# we try to insert each num and each denum in the factor
# returns: changes?, new_factor, new_num, new_denum
# if there are changes, new_num and new_denum contain all the numerators
# and denumerators that could not be distributed in the factor
pos, | |
IV'] = sum([c['Minimum Levenshtein Distance'] for c in eval_distance_result if c['Minimum Levenshtein Distance'] == 1 and c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV']) / ALL_IV_NEED_CORRECTION
original_dict['Minimum Levenshtein Distance']['Recall for Minimum Levenshtein Distance on Entire Dataset'] = len(
[c for c in eval_distance_result if
c['Minimum Levenshtein Distance'] == 1 and c['original spelling status'] == 'correct']) / ALL_CORRECT_SPELLING
original_dict['Minimum Levenshtein Distance']['Recall for Minimum Levenshtein Distance on Entire Dataset, IV'] = len(
[c for c in eval_distance_result if
c['Minimum Levenshtein Distance'] == 1 and c['original spelling status'] == 'correct' and c[
'OOV/IV'] == 'IV']) / ALL_CORRECT_SPELLING_IV
original_dict['Edit Distance No More Than 2']['Precision for Edit Distance No More Than 2 on Entire Dataset'] = sum(
[c['Edit Distance No More Than 2'] for c in eval_distance_result]) / sum(
[c['Edit Distance No More Than 2 Total'] for c in eval_distance_result])
original_dict['Edit Distance No More Than 2'][
'Precision for Edit Distance No More Than 2 on the Misspellings which are IV'] = sum(
[c['Edit Distance No More Than 2'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV']) / sum(
[c['Edit Distance No More Than 2 Total'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV'])
original_dict['Edit Distance No More Than 2']['Accuracy for Edit Distance No More Than 2 on the Misspellings which are IV'] = sum(
[c['Edit Distance No More Than 2'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV']) / ALL_IV_NEED_CORRECTION
original_dict['Edit Distance No More Than 2']['Recall for Edit Distance No More Than 2 on Entire Dataset'] = len(
[c for c in eval_distance_result if
c['Edit Distance No More Than 2'] == 1 and c['original spelling status'] == 'correct']) / ALL_CORRECT_SPELLING
original_dict['Edit Distance No More Than 2']['Recall for Edit Distance No More Than 2 on Entire Dataset, IV'] = len(
[c for c in eval_distance_result if
c['Edit Distance No More Than 2'] == 1 and c['original spelling status'] == 'correct' and c[
'OOV/IV'] == 'IV']) / ALL_CORRECT_SPELLING_IV
original_dict['Edit Distance No More Than 1']['Precision for Edit Distance No More Than 1 on Entire Dataset'] = sum(
[c['Edit Distance No More Than 1'] for c in eval_distance_result]) / sum(
[c['Edit Distance No More Than 1 Total'] for c in eval_distance_result])
original_dict['Edit Distance No More Than 1'][
'Precision for Edit Distance No More Than 1 on the Misspellings which are IV'] = sum(
[c['Edit Distance No More Than 1'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV']) / sum(
[c['Edit Distance No More Than 1 Total'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV'])
original_dict['Edit Distance No More Than 1'][
'Accuracy for Edit Distance No More Than 1 on the Misspellings which are IV'] = sum(
[c['Edit Distance No More Than 1'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV']) /ALL_IV_NEED_CORRECTION
original_dict['Edit Distance No More Than 1']['Recall for Edit Distance No More Than 1 on Entire Dataset'] = len(
[c for c in eval_distance_result if
c['Edit Distance No More Than 1'] == 1 and c['original spelling status'] == 'correct']) / ALL_CORRECT_SPELLING
original_dict['Edit Distance No More Than 1']['Recall for Edit Distance No More Than 1 on Entire Dataset, IV'] = len(
[c for c in eval_distance_result if
c['Edit Distance No More Than 1'] == 1 and c['original spelling status'] == 'correct' and c[
'OOV/IV'] == 'IV']) / ALL_CORRECT_SPELLING_IV
original_dict['3-Gram with Jaccard Best Match']['Precision for 3-Gram with Jaccard Best Match on Entire Dataset'] = sum(
[c['3-Gram with Jaccard Best Match'] for c in eval_distance_result]) / len(
[c['3-Gram with Jaccard Best Match'] for c in eval_distance_result])
original_dict['3-Gram with Jaccard Best Match'][
'Precision for 3-Gram with Jaccard Best Match on the Misspellings which are IV'] = sum(
[c['3-Gram with Jaccard Best Match'] for c in eval_distance_result if
c['3-Gram with Jaccard Best Match'] == 1 and c['original spelling status'] == 'misspell' and c[
'OOV/IV'] == 'IV']) / ALL_IV_NEED_CORRECTION
original_dict['3-Gram with Jaccard Best Match'][
'Accuracy for 3-Gram with Jaccard Best Match on the Misspellings which are IV'] = sum(
[c['3-Gram with Jaccard Best Match'] for c in eval_distance_result if
c['3-Gram with Jaccard Best Match'] == 1 and c['original spelling status'] == 'misspell' and c[
'OOV/IV'] == 'IV']) / ALL_IV_NEED_CORRECTION
original_dict['3-Gram with Jaccard Best Match']['Recall for 3-Gram with Jaccard Best Match on Entire Dataset'] = len(
[c for c in eval_distance_result if
c['3-Gram with Jaccard Best Match'] == 1 and c['original spelling status'] == 'correct']) / ALL_CORRECT_SPELLING
original_dict['3-Gram with Jaccard Best Match'][
'Recall for 3-Gram with Jaccard Best Match on Entire Dataset, IV'] = len([c for c in eval_distance_result if c[
'3-Gram with Jaccard Best Match'] == 1 and c['original spelling status'] == 'correct' and c[
'OOV/IV'] == 'IV']) / \
ALL_CORRECT_SPELLING_IV
original_dict['2-Gram with Jaccard Best Match']['Precision for 2-Gram with Jaccard Best Match on Entire Dataset'] = sum(
[c['2-Gram with Jaccard Best Match'] for c in eval_distance_result]) / len(
[c['2-Gram with Jaccard Best Match'] for c in eval_distance_result])
original_dict['2-Gram with Jaccard Best Match'][
'Precision for 2-Gram with Jaccard Best Match on the Misspellings which are IV'] = sum(
[c['2-Gram with Jaccard Best Match'] for c in eval_distance_result if
c['2-Gram with Jaccard Best Match'] == 1 and c['original spelling status'] == 'misspell' and c[
'OOV/IV'] == 'IV']) / ALL_IV_NEED_CORRECTION
original_dict['2-Gram with Jaccard Best Match'][
'Accuracy for 2-Gram with Jaccard Best Match on the Misspellings which are IV'] = sum(
[c['2-Gram with Jaccard Best Match'] for c in eval_distance_result if
c['2-Gram with Jaccard Best Match'] == 1 and c['original spelling status'] == 'misspell' and c[
'OOV/IV'] == 'IV']) / ALL_IV_NEED_CORRECTION
original_dict['2-Gram with Jaccard Best Match']['Recall for 2-Gram with Jaccard Best Match on Entire Dataset'] = len(
[c for c in eval_distance_result if
c['2-Gram with Jaccard Best Match'] == 1 and c['original spelling status'] == 'correct']) / ALL_CORRECT_SPELLING
original_dict['2-Gram with Jaccard Best Match'][
'Recall for 2-Gram with Jaccard Best Match on Entire Dataset, IV'] = len([c for c in eval_distance_result if c[
'2-Gram with Jaccard Best Match'] == 1 and c['original spelling status'] == 'correct' and c[
'OOV/IV'] == 'IV']) / \
ALL_CORRECT_SPELLING_IV
original_dict['3-Gram Jaccard Distance No More Than 0.2'][
'Precision for 3-Gram Jaccard Distance No More Than 0.2 on Entire Dataset'] = sum(
[c['3-Gram Jaccard Distance No More Than 0.2'] for c in eval_distance_result]) / sum(
[c['3-Gram Jaccard Distance No More Than 0.2 Total'] for c in eval_distance_result])
original_dict['3-Gram Jaccard Distance No More Than 0.2'][
'Precision for 3-Gram Jaccard Distance No More Than 0.2 on the Misspellings which are IV'] = sum(
[c['3-Gram Jaccard Distance No More Than 0.2'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV']) / sum(
[c['3-Gram Jaccard Distance No More Than 0.2 Total'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV'])
original_dict['3-Gram Jaccard Distance No More Than 0.2'][
'Accuracy for 3-Gram Jaccard Distance No More Than 0.2 on the Misspellings which are IV'] = sum(
[c['3-Gram Jaccard Distance No More Than 0.2'] for c in eval_distance_result if
c['original spelling status'] == 'misspell' and c['OOV/IV'] == 'IV']) / ALL_IV_NEED_CORRECTION
original_dict['3-Gram Jaccard Distance No More Than 0.2'][
'Recall for 3-Gram Jaccard Distance No More Than 0.2 on Entire Dataset'] = len([c for c in eval_distance_result if
c[
'3-Gram Jaccard Distance No '
'More Than 0.2'] == 1 and
c[
'original spelling status']
== 'correct']) / \
ALL_CORRECT_SPELLING
original_dict['3-Gram Jaccard Distance No More Than 0.2'][
'Recall for 3-Gram Jaccard Distance No More Than 0.2 on Entire Dataset, IV'] = len(
[c for c in eval_distance_result if
c['3-Gram Jaccard Distance No More Than 0.2'] == 1 and c['original spelling status'] == 'correct' and c[
'OOV/IV'] == 'IV']) / ALL_CORRECT_SPELLING_IV
original_dict['2-Gram Jaccard Distance No More Than 0.2'][
'Precision for 2-Gram Jaccard Distance No More Than 0.2 on Entire Dataset'] = sum(
[c['2-Gram Jaccard Distance No More Than 0.2'] for c in eval_distance_result]) / sum(
[c['2-Gram Jaccard Distance No More Than 0.2 Total'] for c in eval_distance_result])
original_dict['2-Gram Jaccard Distance | |
uk_98
+ 31680 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 170280 * uk_100
+ 297000 * uk_101
+ 60720 * uk_102
+ 915255 * uk_103
+ 1596375 * uk_104
+ 326370 * uk_105
+ 2784375 * uk_106
+ 569250 * uk_107
+ 116380 * uk_108
+ 10648 * uk_109
+ 1115818 * uk_11
+ 22264 * uk_110
+ 11616 * uk_111
+ 62436 * uk_112
+ 108900 * uk_113
+ 22264 * uk_114
+ 46552 * uk_115
+ 24288 * uk_116
+ 130548 * uk_117
+ 227700 * uk_118
+ 46552 * uk_119
+ 2333074 * uk_12
+ 12672 * uk_120
+ 68112 * uk_121
+ 118800 * uk_122
+ 24288 * uk_123
+ 366102 * uk_124
+ 638550 * uk_125
+ 130548 * uk_126
+ 1113750 * uk_127
+ 227700 * uk_128
+ 46552 * uk_129
+ 1217256 * uk_13
+ 97336 * uk_130
+ 50784 * uk_131
+ 272964 * uk_132
+ 476100 * uk_133
+ 97336 * uk_134
+ 26496 * uk_135
+ 142416 * uk_136
+ 248400 * uk_137
+ 50784 * uk_138
+ 765486 * uk_139
+ 6542751 * uk_14
+ 1335150 * uk_140
+ 272964 * uk_141
+ 2328750 * uk_142
+ 476100 * uk_143
+ 97336 * uk_144
+ 13824 * uk_145
+ 74304 * uk_146
+ 129600 * uk_147
+ 26496 * uk_148
+ 399384 * uk_149
+ 11411775 * uk_15
+ 696600 * uk_150
+ 142416 * uk_151
+ 1215000 * uk_152
+ 248400 * uk_153
+ 50784 * uk_154
+ 2146689 * uk_155
+ 3744225 * uk_156
+ 765486 * uk_157
+ 6530625 * uk_158
+ 1335150 * uk_159
+ 2333074 * uk_16
+ 272964 * uk_160
+ 11390625 * uk_161
+ 2328750 * uk_162
+ 476100 * uk_163
+ 97336 * uk_164
+ 3025 * uk_17
+ 1210 * uk_18
+ 2530 * uk_19
+ 55 * uk_2
+ 1320 * uk_20
+ 7095 * uk_21
+ 12375 * uk_22
+ 2530 * uk_23
+ 484 * uk_24
+ 1012 * uk_25
+ 528 * uk_26
+ 2838 * uk_27
+ 4950 * uk_28
+ 1012 * uk_29
+ 22 * uk_3
+ 2116 * uk_30
+ 1104 * uk_31
+ 5934 * uk_32
+ 10350 * uk_33
+ 2116 * uk_34
+ 576 * uk_35
+ 3096 * uk_36
+ 5400 * uk_37
+ 1104 * uk_38
+ 16641 * uk_39
+ 46 * uk_4
+ 29025 * uk_40
+ 5934 * uk_41
+ 50625 * uk_42
+ 10350 * uk_43
+ 2116 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 56593173142 * uk_47
+ 118331180206 * uk_48
+ 61738007064 * uk_49
+ 24 * uk_5
+ 331841787969 * uk_50
+ 578793816225 * uk_51
+ 118331180206 * uk_52
+ 153424975 * uk_53
+ 61369990 * uk_54
+ 128319070 * uk_55
+ 66949080 * uk_56
+ 359851305 * uk_57
+ 627647625 * uk_58
+ 128319070 * uk_59
+ 129 * uk_6
+ 24547996 * uk_60
+ 51327628 * uk_61
+ 26779632 * uk_62
+ 143940522 * uk_63
+ 251059050 * uk_64
+ 51327628 * uk_65
+ 107321404 * uk_66
+ 55993776 * uk_67
+ 300966546 * uk_68
+ 524941650 * uk_69
+ 225 * uk_7
+ 107321404 * uk_70
+ 29214144 * uk_71
+ 157026024 * uk_72
+ 273882600 * uk_73
+ 55993776 * uk_74
+ 844014879 * uk_75
+ 1472118975 * uk_76
+ 300966546 * uk_77
+ 2567649375 * uk_78
+ 524941650 * uk_79
+ 46 * uk_8
+ 107321404 * uk_80
+ 166375 * uk_81
+ 66550 * uk_82
+ 139150 * uk_83
+ 72600 * uk_84
+ 390225 * uk_85
+ 680625 * uk_86
+ 139150 * uk_87
+ 26620 * uk_88
+ 55660 * uk_89
+ 2572416961 * uk_9
+ 29040 * uk_90
+ 156090 * uk_91
+ 272250 * uk_92
+ 55660 * uk_93
+ 116380 * uk_94
+ 60720 * uk_95
+ 326370 * uk_96
+ 569250 * uk_97
+ 116380 * uk_98
+ 31680 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 172920 * uk_100
+ 297000 * uk_101
+ 29040 * uk_102
+ 943855 * uk_103
+ 1621125 * uk_104
+ 158510 * uk_105
+ 2784375 * uk_106
+ 272250 * uk_107
+ 26620 * uk_108
+ 10648 * uk_109
+ 1115818 * uk_11
+ 10648 * uk_110
+ 11616 * uk_111
+ 63404 * uk_112
+ 108900 * uk_113
+ 10648 * uk_114
+ 10648 * uk_115
+ 11616 * uk_116
+ 63404 * uk_117
+ 108900 * uk_118
+ 10648 * uk_119
+ 1115818 * uk_12
+ 12672 * uk_120
+ 69168 * uk_121
+ 118800 * uk_122
+ 11616 * uk_123
+ 377542 * uk_124
+ 648450 * uk_125
+ 63404 * uk_126
+ 1113750 * uk_127
+ 108900 * uk_128
+ 10648 * uk_129
+ 1217256 * uk_13
+ 10648 * uk_130
+ 11616 * uk_131
+ 63404 * uk_132
+ 108900 * uk_133
+ 10648 * uk_134
+ 12672 * uk_135
+ 69168 * uk_136
+ 118800 * uk_137
+ 11616 * uk_138
+ 377542 * uk_139
+ 6644189 * uk_14
+ 648450 * uk_140
+ 63404 * uk_141
+ 1113750 * uk_142
+ 108900 * uk_143
+ 10648 * uk_144
+ 13824 * uk_145
+ 75456 * uk_146
+ 129600 * uk_147
+ 12672 * uk_148
+ 411864 * uk_149
+ 11411775 * uk_15
+ 707400 * uk_150
+ 69168 * uk_151
+ 1215000 * uk_152
+ 118800 * uk_153
+ 11616 * uk_154
+ 2248091 * uk_155
+ 3861225 * uk_156
+ 377542 * uk_157
+ 6631875 * uk_158
+ 648450 * uk_159
+ 1115818 * uk_16
+ 63404 * uk_160
+ 11390625 * uk_161
+ 1113750 * uk_162
+ 108900 * uk_163
+ 10648 * uk_164
+ 3025 * uk_17
+ 1210 * uk_18
+ 1210 * uk_19
+ 55 * uk_2
+ 1320 * uk_20
+ 7205 * uk_21
+ 12375 * uk_22
+ 1210 * uk_23
+ 484 * uk_24
+ 484 * uk_25
+ 528 * uk_26
+ 2882 * uk_27
+ 4950 * uk_28
+ 484 * uk_29
+ 22 * uk_3
+ 484 * uk_30
+ 528 * uk_31
+ 2882 * uk_32
+ 4950 * uk_33
+ 484 * uk_34
+ 576 * uk_35
+ 3144 * uk_36
+ 5400 * uk_37
+ 528 * uk_38
+ 17161 * uk_39
+ 22 * uk_4
+ 29475 * uk_40
+ 2882 * uk_41
+ 50625 * uk_42
+ 4950 * uk_43
+ 484 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 56593173142 * uk_47
+ 56593173142 * uk_48
+ 61738007064 * uk_49
+ 24 * uk_5
+ 336986621891 * uk_50
+ 578793816225 * uk_51
+ 56593173142 * uk_52
+ 153424975 * uk_53
+ 61369990 * uk_54
+ 61369990 * uk_55
+ 66949080 * uk_56
+ 365430395 * uk_57
+ 627647625 * uk_58
+ 61369990 * uk_59
+ 131 * uk_6
+ 24547996 * uk_60
+ 24547996 * uk_61
+ 26779632 * uk_62
+ 146172158 * uk_63
+ 251059050 * uk_64
+ 24547996 * uk_65
+ 24547996 * uk_66
+ 26779632 * uk_67
+ 146172158 * uk_68
+ 251059050 * uk_69
+ 225 * uk_7
+ 24547996 * uk_70
+ 29214144 * uk_71
+ 159460536 * uk_72
+ 273882600 * uk_73
+ 26779632 * uk_74
+ 870388759 * uk_75
+ 1494942525 * uk_76
+ 146172158 * uk_77
+ 2567649375 * uk_78
+ 251059050 * uk_79
+ 22 * uk_8
+ 24547996 * uk_80
+ 166375 * uk_81
+ 66550 * uk_82
+ 66550 * uk_83
+ 72600 * uk_84
+ 396275 * uk_85
+ 680625 * uk_86
+ 66550 * uk_87
+ 26620 * uk_88
+ 26620 * uk_89
+ 2572416961 * uk_9
+ | |
import pandas as pd
def get_toy_data_seqclassification():
train_data = {
"sentence1": [
'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
"Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .",
"They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .",
"Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .",
],
"sentence2": [
'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
"Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .",
"On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .",
"Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .",
],
"label": [1, 0, 1, 0],
"idx": [0, 1, 2, 3],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"sentence1": [
"The stock rose $ 2.11 , or about 11 percent , to close Friday at $ 21.51 on the New York Stock Exchange .",
"Revenue in the first quarter of the year dropped 15 percent from the same period a year earlier .",
"The Nasdaq had a weekly gain of 17.27 , or 1.2 percent , closing at 1,520.15 on Friday .",
"The DVD-CCA then appealed to the state Supreme Court .",
],
"sentence2": [
"PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .",
"With the scandal hanging over Stewart 's company , revenue the first quarter of the year dropped 15 percent from the same period a year earlier .",
"The tech-laced Nasdaq Composite .IXIC rallied 30.46 points , or 2.04 percent , to 1,520.15 .",
"The DVD CCA appealed that decision to the U.S. Supreme Court .",
],
"label": [1, 1, 0, 1],
"idx": [4, 5, 6, 7],
}
dev_dataset = pd.DataFrame(dev_data)
test_data = {
"sentence1": [
"That compared with $ 35.18 million , or 24 cents per share , in the year-ago period .",
"Shares of Genentech , a much larger company with several products on the market , rose more than 2 percent .",
"Legislation making it harder for consumers to erase their debts in bankruptcy court won overwhelming House approval in March .",
"The Nasdaq composite index increased 10.73 , or 0.7 percent , to 1,514.77 .",
],
"sentence2": [
"Earnings were affected by a non-recurring $ 8 million tax benefit in the year-ago period .",
"Shares of Xoma fell 16 percent in early trade , while shares of Genentech , a much larger company with several products on the market , were up 2 percent .",
"Legislation making it harder for consumers to erase their debts in bankruptcy court won speedy , House approval in March and was endorsed by the White House .",
"The Nasdaq Composite index , full of technology stocks , was lately up around 18 points .",
],
"label": [0, 0, 0, 0],
"idx": [8, 10, 11, 12],
}
test_dataset = pd.DataFrame(test_data)
custom_sent_keys = ["sentence1", "sentence2"]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
X_test = test_dataset[custom_sent_keys]
return X_train, y_train, X_val, y_val, X_test
def get_toy_data_multiclassclassification():
train_data = {
"text": [
"i didnt feel humiliated",
"i can go from feeling so hopeless to so damned hopeful just from being around someone who cares and is awake",
"im grabbing a minute to post i feel greedy wrong",
"i am ever feeling nostalgic about the fireplace i will know that it is still on the property",
"i am feeling grouchy",
"ive been feeling a little burdened lately wasnt sure why that was",
"ive been taking or milligrams or times recommended amount and ive fallen asleep a lot faster but i also feel like so funny",
"i feel as confused about life as a teenager or as jaded as a year old man",
"i have been with petronas for years i feel that petronas has performed well and made a huge profit",
"i feel romantic too",
"i feel like i have to make the suffering i m seeing mean something",
"i do feel that running is a divine experience and that i can expect to have some type of spiritual encounter",
],
"label": [0, 0, 3, 2, 3, 0, 5, 4, 1, 2, 0, 1],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"text": [
"i think it s the easiest time of year to feel dissatisfied",
"i feel low energy i m just thirsty",
"i have immense sympathy with the general point but as a possible proto writer trying to find time to write in the corners of life and with no sign of an agent let alone a publishing contract this feels a little precious",
"i do not feel reassured anxiety is on each side",
],
"label": [3, 0, 1, 1],
}
dev_dataset = pd.DataFrame(dev_data)
custom_sent_keys = ["text"]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
return X_train, y_train, X_val, y_val
def get_toy_data_multiplechoiceclassification():
train_data = {
"video-id": [
"anetv_fruimvo90vA",
"anetv_fruimvo90vA",
"anetv_fruimvo90vA",
"anetv_MldEr60j33M",
"lsmdc0049_Hannah_and_her_sisters-69438",
],
"fold-ind": ["10030", "10030", "10030", "5488", "17405"],
"startphrase": [
"A woman is seen running down a long track and jumping into a pit. The camera",
"A woman is seen running down a long track and jumping into a pit. The camera",
"A woman is seen running down a long track and jumping into a pit. The camera",
"A man in a white shirt bends over and picks up a large weight. He",
"Someone furiously shakes someone away. He",
],
"sent1": [
"A woman is seen running down a long track and jumping into a pit.",
"A woman is seen running down a long track and jumping into a pit.",
"A woman is seen running down a long track and jumping into a pit.",
"A man in a white shirt bends over and picks up a large weight.",
"Someone furiously shakes someone away.",
],
"sent2": ["The camera", "The camera", "The camera", "He", "He"],
"gold-source": ["gen", "gen", "gold", "gen", "gold"],
"ending0": [
"captures her as well as lifting weights down in place.",
"follows her spinning her body around and ends by walking down a lane.",
"watches her as she walks away and sticks her tongue out to another person.",
"lifts the weights over his head.",
"runs to a woman standing waiting.",
],
"ending1": [
"pans up to show another woman running down the track.",
"pans around the two.",
"captures her as well as lifting weights down in place.",
"also lifts it onto his chest before hanging it back out again.",
"tackles him into the passenger seat.",
],
"ending2": [
"follows her movements as the group members follow her instructions.",
"captures her as well as lifting weights down in place.",
"follows her spinning her body around and ends by walking down a lane.",
"spins around and lifts a barbell onto the floor.",
"pounds his fist against a cupboard.",
],
"ending3": [
"follows her spinning her body around and ends by walking down a lane.",
"follows her movements as the group members follow her instructions.",
"pans around the two.",
"bends down and lifts the weight over his head.",
"offers someone the cup on his elbow and strides out.",
],
"label": [1, 3, 0, 0, 2],
}
dev_data = {
"video-id": [
"lsmdc3001_21_JUMP_STREET-422",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
"lsmdc0001_American_Beauty-45991",
],
"fold-ind": ["11783", "10977", "10970", "10968"],
"startphrase": | |
fields = ("id","state","alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states2(self.instance.state, value)
if not self.instance.state == "使用中":
if ((self.instance.create_user == self.context['request'].user.username) and
(self.instance.auditor != self.context['request'].user.username)) : # 如果当前用户为创建账号但不是审核账号
if not (self.instance.state == "新建" and (value == "审核中" or value == "作废")):
raise serializers.ValidationError("创建者只能将[新建]信息更改成[审核中]或[作废]")
if value == "使用中": # 如果新状态为使用中状态
data=SalesOrderCreateModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有订单子项,并将子项转换成WAIT
try:
child = SalesOrderItemCreateModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前销售订单项下的子项不存在")
child.state = "等待"
child.save()
if value == "终止": # 如果新状态为终止状态
if not (self.context['request'].user.has_perm('plan.deal_salesordercreatemodel')):
raise serializers.ValidationError("当前用户不具备执行订单权限")
data = SalesOrderCreateModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有订单子项,并将[使用中]的子项转换成END
try:
child = SalesOrderItemCreateModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前销售订单项下的子项不存在")
if child.state == "等待":
child.state = "终止"
child.save()
return value
# 审核记录字段验证
def validate_alter(self, value):
obj = SalesOrderCreateModel.objects.get(id=self.instance.id).alter
for data in value:
obj.add(data.id)
return value
# endregion
# region 产品生产任务类型定义 序列化器
class ProductTaskTypeSerialize_Create(serializers.ModelSerializer):
"""
产品生产任务类型定义--create
"""
state = serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = ProductTaskTypeModel
fields = ("id", "name", "code", "state", "classes", "parent", "attach_attribute",
"file", "desc", "auditor", "create_user")
# 所有字段验证
def validate(self, attrs):
if not attrs["create_user"].has_perm('plan.add_producttasktypemodel'): # 如果当前用户没有创建权限
raise serializers.ValidationError("当前用户不具备创建权限'")
if settings.SAME_USER!=True:
if attrs["create_user"].username == attrs["auditor"]: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_producttasktypemodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
# 父类别字段验证
def validate_parent(self, value):
if self.initial_data['classes'] == "一级类别": # 判断 类别是否为一级类别
if value != None: # 一级类别不能指定父类别
raise serializers.ValidationError("处于[一级类别]的信息不能指定父类别")
else:
if value is None: # 非一级类别必须指定父类别
raise serializers.ValidationError("处于" + self.initial_data["classes"] + "类别的信息必须指定父类别")
else: # 判断指定的父类别是否符合条件
list = ProductTaskTypeModel.objects.get(id=value.id)
if list is None: # 判断 父类别是否存在
raise serializers.ValidationError("指定的父类别不存在")
elif (list.state != "使用中"): # 判断 父类别状态是否合适
raise serializers.ValidationError("指定的父类别不在--'使用中'状态")
else: # 判断 子父类别的层级是否合适
if self.initial_data['classes'] == "二级类别" and list.classes != "一级类别":
raise serializers.ValidationError("[二级类别]的父类别必须是[一级类别]'")
if self.initial_data['classes'] == "三级类别" and list.classes != "二级类别":
raise serializers.ValidationError("[三级类别]的父类别必须是[二级类别]")
if self.initial_data['classes'] == "四级类别" and list.classes != "三级类别":
raise serializers.ValidationError("[四级类别]的父类别必须是[三级类别]")
return value
class ProductTaskTypeSerialize_List(serializers.ModelSerializer):
"""
产品生产任务类型定义--list
"""
class Meta:
model = ProductTaskTypeModel
fields = ("id", "name", "code", "state", "classes", "auditor", "create_user","create_time","update_time")
class ProductTaskCreateSerialize_Type(serializers.ModelSerializer):
"""
产品生产任务定义--产品生产任务类型定义
"""
class Meta:
model = ProductTaskCreateModel
fields = ("id", "name", "code", "state", "auditor", "create_user")
class ProductTaskTypeSerialize_Retrieve(serializers.ModelSerializer):
"""
产品生产任务类型定义--retrieve
"""
file = PlanFileSerialize_List(many=True) # 类型文件信息
alter = PlanAlterRecordSerialize_List(many=True) # 审核记录信息
parent = ProductTaskTypeSerialize_List(required=False) # 父类别信息
# productTaskType_child = ProductTaskTypeSerialize_List(many=True)# 子类别信息
# productTaskType_item = ProductTaskCreateSerialize_Type(many=True)# 附属项信息
class Meta:
model = ProductTaskTypeModel
fields = "__all__"
class ProductTaskTypeSerialize_Update(serializers.ModelSerializer):
"""
产品生产任务类型定义--update
"""
class Meta:
model = ProductTaskTypeModel
fields = ("id", "name", "code", "classes", "parent", "attach_attribute",
"file", "desc", "auditor",)
# 所有字段验证
def validate(self, attrs):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 该字段不能更改
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_producttasktypemodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
# 父类别字段验证
def validate_parent(self, value):
if self.instance.state != '新建': # 如果不是新建状态 该字段不能更改
raise serializers.ValidationError("当前信息已提交,禁止更改")
if self.initial_data['classes'] == "一级类别": # 判断 类别是否为一级类别
if value != None: # 一级类别不能指定父类别
raise serializers.ValidationError("处于[一级类别]的信息不能指定父类别")
else:
if value is None: # 非一级类别必须指定父类别
raise serializers.ValidationError("处于" + self.initial_data["classes"] + "类别的信息必须指定父类别")
else: # 判断指定的父类别是否符合条件
list = ProductTaskTypeModel.objects.get(id=value.id)
if list is None: # 判断 父类别是否存在
raise serializers.ValidationError("指定的父类别不存在")
elif (list.state != "使用中"): # 判断 父类别状态是否合适
raise serializers.ValidationError("指定的父类别不在--'使用状态'")
else: # 判断 子父类别的层级是否合适
if self.initial_data['classes'] == "二级类别" and list.classes != "一级类别":
raise serializers.ValidationError("[二级类别]的父类别必须是[一级类别]'")
if self.initial_data['classes'] == "三级类别" and list.classes != "二级类别":
raise serializers.ValidationError("[三级类别]的父类别必须是[二级类别]")
if self.initial_data['classes'] == "四级类别" and list.classes != "三级类别":
raise serializers.ValidationError("[四级类别]的父类别必须是[三级类别]")
return value
class ProductTaskTypeSerialize_Partial(serializers.ModelSerializer):
"""
产品生产任务类型定义--partial
"""
class Meta:
model = ProductTaskTypeModel
fields = ("id", "state", "alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states(self.instance.state, value)
if ((self.instance.create_user == self.context['request'].user.username) and\
(self.instance.auditor != self.context['request'].user.username)): # 如果当前用户为创建账号但不是审核账号
if not (self.instance.state == "新建" and (value == "审核中" or value == "作废")):
raise serializers.ValidationError("创建者只能将[新建]信息更改成[审核中]或[作废]")
return value
# 审核记录字段验证
def validate_alter(self, value):
obj = ProductTaskTypeModel.objects.get(id=self.instance.id).alter
for data in value:
obj.add(data.id)
return value
# endregion
# region 产品生产任务类型层级结构 序列化器
class ProductTaskTypeSerialize_Fourth(serializers.ModelSerializer):
"""
产品生产任务类型层级结构--fourth
"""
class Meta:
model = ProductTaskTypeModel
fields = ("id", "name", "code", "state")
class ProductTaskTypeSerialize_Third(serializers.ModelSerializer):
"""
产品生产任务类型定义--third
"""
productTaskType_child = ProductTaskTypeSerialize_Fourth(many=True) # 子类别信息
class Meta:
model = ProductTaskTypeModel
fields = ("id", "name", "code", "state", "productTaskType_child")
class ProductTaskTypeSerialize_Second(serializers.ModelSerializer):
"""
产品生产任务类型定义--second
"""
productTaskType_child = ProductTaskTypeSerialize_Third(many=True) # 子类别信息
class Meta:
model = ProductTaskTypeModel
fields = ("id", "name", "code", "state", "productTaskType_child")
class ProductTaskTypeSerialize_First(serializers.ModelSerializer):
"""
产品生产任务类型定义--fitst
"""
productTaskType_child = ProductTaskTypeSerialize_Second(many=True) # 子类别信息
class Meta:
model = ProductTaskTypeModel
fields = ("id", "name", "code", "state","productTaskType_child")
# endregion
# region 产品生产子任务创建 序列化器
class ProductTaskItemCreateSerialize_Create(serializers.ModelSerializer):
"""
产品生产任务子项创建--create
"""
state = serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = ProductTaskItemCreateModel
fields = ("id","state", "salesOrderItem","route_id","sum", "file", "attribute1", "attribute2",
"attribute3", "attribute4", "attribute5", "desc", "create_user")
# 所有字段验证
def validate(self, attrs):
if 'route_id' in attrs.keys():
if attrs['route_id'] is not '':
try:
route = ProductRouteDefinitionModel.objects.get(id=attrs["route_id"]) # 判断指定的生产线路是否存在
except Exception as e:
raise serializers.ValidationError("指定的生产线路不存在")
if (route.state != "使用中"): # 判断 状态是否合适
raise serializers.ValidationError("指定的生产路线不在--'使用状态'")
attrs["routeType_code"] = route.type.code # 获取生产线路类型编码
attrs["routeType_name"] = route.type.name # 获取生产线路类型名称
attrs["route_code"] = route.code # 获取生产线路编码
attrs["route_name"] = route.name # 获取生产线路名称
return attrs
# 订单子项字段验证
def validate_salesOrderItem(self, value):
list = SalesOrderItemCreateModel.objects.get(id=value.id)
if list is None: # 判断 父类别是否存在
raise serializers.ValidationError("指定的订单子项不存在")
elif (list.state != "等待"): # 判断 父类别状态是否合适
raise serializers.ValidationError("指定的订单子项不在--'等待状态'")
return value
class SalesOrderCreateSerialize_ProductTaskItem(serializers.ModelSerializer):
"""
销售订单创建--销售订单子项创建--产品生产任务子项创建
"""
class Meta:
model = SalesOrderCreateModel
fields = ("id", "name", "code","state")
class SalesOrderItemCreateSerialize_ProductTaskItem(serializers.ModelSerializer):
"""
销售订单子项创建--产品生产任务子项创建
"""
salesOrderItem_parent=SalesOrderCreateSerialize_ProductTaskItem(many=True)
class Meta:
model = SalesOrderItemCreateModel
fields = ("id","state","productType_code","productType_name","product_id","product_name","product_code","batch","salesOrderItem_parent")
class ProductTaskItemCreateSerialize_List(serializers.ModelSerializer):
"""
产品生产任务子项创建--list
"""
salesOrderItem =SalesOrderItemCreateSerialize_ProductTaskItem()
class Meta:
model = ProductTaskItemCreateModel
fields = "__all__"
class ProductTaskItemCreateSerialize_Retrieve(serializers.ModelSerializer):
"""
产品生产任务子项创建--Retrieve
"""
salesOrderItem =SalesOrderItemCreateSerialize_ProductTaskItem()
productTaskItem_parent=ProductTaskCreateSerialize_ProductTaskItem(many=True)
file = PlanFileSerialize_List(many=True) # 类型文件信息
class Meta:
model = ProductTaskItemCreateModel
fields = "__all__"
class ProductTaskItemCreateSerialize_Partial(serializers.ModelSerializer):
"""
产品生产任务子项创建--partial
"""
class Meta:
model = ProductTaskItemCreateModel
fields = ("id", "state","completed","equipment_id","attribute1","attribute2","attribute3","attribute4","attribute5","attribute6","attribute7","attribute8","attribute9","attribute10",
"attribute11", "attribute12", "attribute13", "attribute14", "attribute15", "attribute16", "attribute17",
"attribute18", "attribute19", "attribute20", )
def validate(self, attrs):
if 'state' in attrs.keys() and 'completed' in attrs.keys():
raise serializers.ValidationError("不能同时更新状态与完成数量")
if 'equipment_id' in attrs.keys():
if attrs['equipment_id'] is not '':
try:
equipment = EquipmentAccountModel.objects.get(id=attrs["equipment_id"]) # 判断指定的设备是否存在
except Exception as e:
raise serializers.ValidationError("指定的设备不存在")
if (equipment.state != "使用中"): # 判断 状态是否合适
raise serializers.ValidationError("指定的设备不在--'使用状态'")
attrs["equipmentType_code"] = equipment.type.code # 获取设备类型编码
attrs["equipmentType_name"] = equipment.type.name # 获取设备类型名称
attrs["equipment_code"] = equipment.code # 获取设备编码
attrs["equipment_name"] = equipment.name # 获取设备名称
if 'team_id' in attrs.keys():
if attrs['team_id'] is not '':
try:
team = TeamInforDefinitionModel.objects.get(id=attrs["team_id"]) # 判断指定的班组是否存在
except Exception as e:
raise serializers.ValidationError("指定的班组不存在")
if (team.state != "使用中"): # 判断 状态是否合适
raise serializers.ValidationError("指定的班组不在--'使用状态'")
attrs["team_code"] = team.code # 获取班组编码
attrs["team_name"] = team.name # 获取班组名称
return attrs
# 状态字段验证
def validate_state(self, value):
parentState = ProductTaskItemCreateModel.objects.filter(
id=self.instance.id).first().productTaskItem_parent.all().values('state')
if (parentState[0]['state'] != "使用中"):
raise serializers.ValidationError("当前任务不处于[使用中状态],禁止更改任务子项订单状态")
if not ((self.instance.state == "等待" and (value == "挂起" or value == "加工中" or value == "终止"))or
(self.instance.state == "挂起" and (value == "等待" or value == "终止")) or
(self.instance.state == "加工中" and (value == "挂起" or value == "等待" or value == "终止"))):
raise serializers.ValidationError("子任务不能从"+self.instance.state+"更改成"+value)
if not (self.context['request'].user.has_perm('plan.deal_producttaskcreatemodel')):
raise serializers.ValidationError("当前用户不具备执行任务权限")
if value == "终止": # 如果新状态为终止状态
# 遍历所有管理子任务项的任务项,如果任务项的所有子项都处于END,则将任务设置成END
data1 = ProductTaskItemCreateModel.objects.filter(id=self.instance.id).first().productTaskItem_parent.all().values('id')
for item1 in data1: # 遍历所有关联此子项的父项
count = 1
parentModel = ProductTaskCreateModel.objects.filter(id=item1['id']).first()
data2 = parentModel.child.all().values('id')
for item2 in data2: # 遍历父项的所有子项
child = ProductTaskItemCreateModel.objects.filter(id=item2['id']).first()
if child.state == "终止":
count += 1
if count == len(data2):
parentModel.state = "终止"
parentModel.save()
# 获取未完成的任务数量,并将订单的已分配数量减少,已完成数量增加
return value
# 完成总数字段验证
def validate_completed(self, value):
if not (self.instance.state == "加工中"):
raise serializers.ValidationError("只有在[加工中状态]下,才能更新加工完成数")
if value>=self.instance.sum: #当完成总数大于分配总数时,自动更新订单完成数量
list = SalesOrderItemCreateModel.objects.get(id=self.instance.salesOrderItem_id)
list.completed+=self.instance.sum
list.save()
self.instance.state = "完成"
data1 =list.salesOrderItem_parent.all().values('id')
for item1 in data1: # 遍历与此订单子项相关的订单,判断订单下所有的子订单是否完成
count = 0
parentModel = SalesOrderCreateModel.objects.filter(id=item1['id']).first()
data2 = parentModel.child.all().values('id')
for item2 in data2: # 遍历父项的所有子项
child = SalesOrderItemCreateModel.objects.filter(id=item2['id']).first()
if child.state == "终止":
count += 1
if child.completed >= child.sum:
if child.state == "等待":
child.state="完成"
child.save()
count += 1
if count == len(data2):
parentModel.state = "完成"
parentModel.save()
# 遍历所有子任务项的任务项,如果任务项的所有子项都处于DONE或END,则将任务设置成DONE
value1 = ProductTaskItemCreateModel.objects.filter(
id=self.instance.id).first().productTaskItem_parent.all().values('id')
for item1 in value1: # 遍历所有关联此子项的父项
count = 1
parentModel = ProductTaskCreateModel.objects.filter(id=item1['id']).first()
value2 = parentModel.child.all().values('id')
for item2 in value2: # 遍历父项的所有子项
child = ProductTaskItemCreateModel.objects.filter(id=item2['id']).first()
if child.state == "终止":
count += 1
if child.state == "完成":
count += 1
if count == len(value2):
parentModel.state = "完成"
parentModel.save()
return value
# endregion
# region 产品生产任务创建 序列化器
class | |
== subject_tokens:
if forbidden_index is None:
labels[index][label_subject] = 1
for i in range(subject_tokens_len - 1):
labels[index + i + 1][1] = 1
break
elif index < forbidden_index or index >= forbidden_index + len(
object_tokens):
labels[index][label_subject] = 1
for i in range(subject_tokens_len - 1):
labels[index + i + 1][1] = 1
break
# if token wasn't assigned as any "B"/"I" tag, give it an "O" tag for outside
for i in range(seq_len):
if labels[i] == [0] * num_labels:
labels[i][0] = 1
return labels
def convert_example_to_feature(
example,
tokenizer: BertTokenizer,
chineseandpunctuationextractor: ChineseAndPunctuationExtractor,
label_map,
max_length: Optional[int]=512,
pad_to_max_length: Optional[bool]=None):
spo_list = example['spo_list'] if "spo_list" in example.keys() else None
text_raw = example['text']
sub_text = []
buff = ""
for char in text_raw:
if chineseandpunctuationextractor.is_chinese_or_punct(char):
if buff != "":
sub_text.append(buff)
buff = ""
sub_text.append(char)
else:
buff += char
if buff != "":
sub_text.append(buff)
tok_to_orig_start_index = []
tok_to_orig_end_index = []
orig_to_tok_index = []
tokens = []
text_tmp = ''
for (i, token) in enumerate(sub_text):
orig_to_tok_index.append(len(tokens))
sub_tokens = tokenizer._tokenize(token)
text_tmp += token
for sub_token in sub_tokens:
tok_to_orig_start_index.append(len(text_tmp) - len(token))
tok_to_orig_end_index.append(len(text_tmp) - 1)
tokens.append(sub_token)
if len(tokens) >= max_length - 2:
break
else:
continue
break
seq_len = len(tokens)
# 2 tags for each predicate + I tag + O tag
num_labels = 2 * (len(label_map.keys()) - 2) + 2
# initialize tag
labels = [[0] * num_labels for i in range(seq_len)]
if spo_list is not None:
labels = parse_label(spo_list, label_map, tokens, tokenizer)
# add [CLS] and [SEP] token, they are tagged into "O" for outside
if seq_len > max_length - 2:
tokens = tokens[0:(max_length - 2)]
labels = labels[0:(max_length - 2)]
tok_to_orig_start_index = tok_to_orig_start_index[0:(max_length - 2)]
tok_to_orig_end_index = tok_to_orig_end_index[0:(max_length - 2)]
tokens = ["[CLS]"] + tokens + ["[SEP]"]
# "O" tag for [PAD], [CLS], [SEP] token
outside_label = [[1] + [0] * (num_labels - 1)]
labels = outside_label + labels + outside_label
tok_to_orig_start_index = [-1] + tok_to_orig_start_index + [-1]
tok_to_orig_end_index = [-1] + tok_to_orig_end_index + [-1]
if seq_len < max_length:
tokens = tokens + ["[PAD]"] * (max_length - seq_len - 2)
labels = labels + outside_label * (max_length - len(labels))
tok_to_orig_start_index = tok_to_orig_start_index + [-1] * (
max_length - len(tok_to_orig_start_index))
tok_to_orig_end_index = tok_to_orig_end_index + [-1] * (
max_length - len(tok_to_orig_end_index))
token_ids = tokenizer.convert_tokens_to_ids(tokens)
return InputFeature(
input_ids=np.array(token_ids),
seq_len=np.array(seq_len),
tok_to_orig_start_index=np.array(tok_to_orig_start_index),
tok_to_orig_end_index=np.array(tok_to_orig_end_index),
labels=np.array(labels), )
#数据集
class DuIEDataset(paddle.io.Dataset):
"""
Dataset of DuIE.
"""
def __init__(
self,
input_ids: List[Union[List[int], np.ndarray]],
seq_lens: List[Union[List[int], np.ndarray]],
tok_to_orig_start_index: List[Union[List[int], np.ndarray]],
tok_to_orig_end_index: List[Union[List[int], np.ndarray]],
labels: List[Union[List[int], np.ndarray, List[str], List[Dict]]]):
super(DuIEDataset, self).__init__()
self.input_ids = input_ids
self.seq_lens = seq_lens
self.tok_to_orig_start_index = tok_to_orig_start_index
self.tok_to_orig_end_index = tok_to_orig_end_index
self.labels = labels
def __len__(self):
if isinstance(self.input_ids, np.ndarray):
return self.input_ids.shape[0]
else:
return len(self.input_ids)
def __getitem__(self, item):
return {
"input_ids": np.array(self.input_ids[item]),
"seq_lens": np.array(self.seq_lens[item]),
"tok_to_orig_start_index":
np.array(self.tok_to_orig_start_index[item]),
"tok_to_orig_end_index": np.array(self.tok_to_orig_end_index[item]),
# If model inputs is generated in `collate_fn`, delete the data type casting.
"labels": np.array(
self.labels[item], dtype=np.float32),
}
@classmethod
def from_file(cls,
file_path: Union[str, os.PathLike],
tokenizer: BertTokenizer,
max_length: Optional[int]=512,
pad_to_max_length: Optional[bool]=None):
assert os.path.exists(file_path) and os.path.isfile(
file_path), f"{file_path} dose not exists or is not a file."
label_map_path = os.path.join(
os.path.dirname(file_path), "predicate2id.json")
assert os.path.exists(label_map_path) and os.path.isfile(
label_map_path
), f"{label_map_path} dose not exists or is not a file."
with open(label_map_path, 'r', encoding='utf8') as fp:
label_map = json.load(fp)
chineseandpunctuationextractor = ChineseAndPunctuationExtractor()
input_ids, seq_lens, tok_to_orig_start_index, tok_to_orig_end_index, labels = (
[] for _ in range(5))
dataset_scale = sum(1 for line in open(
file_path, 'r', encoding="UTF-8"))
logger.info("Preprocessing data, loaded from %s" % file_path)
with open(file_path, "r", encoding="utf-8") as fp:
lines = fp.readlines()
for line in tqdm(lines):
example = json.loads(line)
input_feature = convert_example_to_feature(
example, tokenizer, chineseandpunctuationextractor,
label_map, max_length, pad_to_max_length)
input_ids.append(input_feature.input_ids)
seq_lens.append(input_feature.seq_len)
tok_to_orig_start_index.append(
input_feature.tok_to_orig_start_index)
tok_to_orig_end_index.append(
input_feature.tok_to_orig_end_index)
labels.append(input_feature.labels)
return cls(input_ids, seq_lens, tok_to_orig_start_index,
tok_to_orig_end_index, labels)
@dataclass
class DataCollator:
"""
Collator for DuIE.
"""
def __call__(self, examples: List[Dict[str, Union[list, np.ndarray]]]):
batched_input_ids = np.stack([x['input_ids'] for x in examples])
seq_lens = np.stack([x['seq_lens'] for x in examples])
tok_to_orig_start_index = np.stack(
[x['tok_to_orig_start_index'] for x in examples])
tok_to_orig_end_index = np.stack(
[x['tok_to_orig_end_index'] for x in examples])
labels = np.stack([x['labels'] for x in examples])
return (batched_input_ids, seq_lens, tok_to_orig_start_index,
tok_to_orig_end_index, labels)
# relation_extraction task
class BCELossForDuIE(nn.Layer):
def __init__(self, ):
super(BCELossForDuIE, self).__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, logits, labels, mask):
loss = self.criterion(logits, labels)
mask = paddle.cast(mask, 'float32')
loss = loss * mask.unsqueeze(-1)
loss = paddle.sum(loss.mean(axis=2), axis=1) / paddle.sum(mask, axis=1)
loss = loss.mean()
return loss
def set_random_seed(seed):
"""sets random seed"""
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
@Relation_ExtractionModel.register("Relation_Extraction", "Paddle")
class Relation_ExtractionPaddle(Relation_ExtractionModel):
def __init__(self, args, name: str = 'Relation_ExtractionModel', ):
super().__init__()
self.name = name
self.args = args
@paddle.no_grad()
def evaluate(self,model, criterion, data_loader, file_path, mode):
"""
mode eval:
eval on development set and compute P/R/F1, called between training.
mode predict:
eval on development / test set, then write predictions to \
predict_test.json and predict_test.json.zip \
under args.data_path dir for later submission or evaluation.
"""
example_all = []
with open(file_path, "r", encoding="utf-8") as fp:
for line in fp:
example_all.append(json.loads(line))
id2spo_path = os.path.join(os.path.dirname(file_path), "id2spo.json")
with open(id2spo_path, 'r', encoding='utf8') as fp:
id2spo = json.load(fp)
model.eval()
loss_all = 0
eval_steps = 0
formatted_outputs = []
current_idx = 0
for batch in tqdm(data_loader, total=len(data_loader)):
eval_steps += 1
input_ids, seq_len, tok_to_orig_start_index, tok_to_orig_end_index, labels = batch
logits = model(input_ids=input_ids)
mask = (input_ids != 0).logical_and((input_ids != 1)).logical_and((input_ids != 2))
loss = criterion(logits, labels, mask)
loss_all += loss.numpy().item()
probs = F.sigmoid(logits)
logits_batch = probs.numpy()
seq_len_batch = seq_len.numpy()
tok_to_orig_start_index_batch = tok_to_orig_start_index.numpy()
tok_to_orig_end_index_batch = tok_to_orig_end_index.numpy()
formatted_outputs.extend(decoding(example_all[current_idx: current_idx+len(logits)],
id2spo,
logits_batch,
seq_len_batch,
tok_to_orig_start_index_batch,
tok_to_orig_end_index_batch))
current_idx = current_idx+len(logits)
loss_avg = loss_all / eval_steps
print("eval loss: %f" % (loss_avg))
if mode == "predict":
predict_file_path = os.path.join(args.data_path, 'predictions.json')
else:
predict_file_path = os.path.join(args.data_path, 'predict_eval.json')
predict_zipfile_path = write_prediction_results(formatted_outputs,
predict_file_path)
if mode == "eval":
precision, recall, f1 = get_precision_recall_f1(file_path,
predict_zipfile_path)
os.system('rm {} {}'.format(predict_file_path, predict_zipfile_path))
return precision, recall, f1
elif mode != "predict":
raise Exception("wrong mode for eval func")
def run(self):
args = self.args
paddle.set_device(args.device)
rank = paddle.distributed.get_rank()
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
# Reads label_map.
# read B I O的id
label_map_path = os.path.join(args.data_path, "predicate2id.json")
if not (os.path.exists(label_map_path) and os.path.isfile(label_map_path)):
sys.exit("{} dose not exists or is not a file.".format(label_map_path))
with open(label_map_path, 'r', encoding='utf8') as fp:
label_map = json.load(fp) # dict
num_classes = (len(label_map.keys()) - 2) * 2 + 2 # 由于object和subject的区别 B标签*2+2
# Loads pretrained model BERT
model = BertForTokenClassification.from_pretrained(
args.model_name_or_path, num_classes=num_classes)
model = paddle.DataParallel(model)
tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
criterion = BCELossForDuIE()
# Loads dataset.
train_dataset = DuIEDataset.from_file(
os.path.join(args.data_path, 'train_data.json'), tokenizer,
args.max_seq_length, True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True)
collator = DataCollator()
train_data_loader = DataLoader(
dataset=train_dataset,
batch_sampler=train_batch_sampler,
collate_fn=collator,
return_list=True)
eval_file_path = os.path.join(args.data_path, 'dev_data.json')
test_dataset = DuIEDataset.from_file(eval_file_path, tokenizer,
args.max_seq_length, True)
test_batch_sampler = paddle.io.BatchSampler(
test_dataset, batch_size=args.batch_size, shuffle=False, drop_last=True)
test_data_loader = DataLoader(
dataset=test_dataset,
batch_sampler=test_batch_sampler,
collate_fn=collator,
return_list=True)
# Defines learning rate strategy.
steps_by_epoch = len(train_data_loader)
num_training_steps = steps_by_epoch * args.num_train_epochs
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
args.warmup_ratio)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
# Starts training.
global_step = 0
logging_steps = 50
save_steps = 10000
tic_train = time.time()
for epoch in range(args.num_train_epochs):
print("\n=====start training of %d epochs=====" % epoch)
tic_epoch = time.time()
model.train()
for step, batch in enumerate(train_data_loader):
input_ids, seq_lens, tok_to_orig_start_index, tok_to_orig_end_index, labels = batch
logits = model(input_ids=input_ids)
mask = (input_ids != 0).logical_and((input_ids != 1)).logical_and(
(input_ids != 2))
loss = criterion(logits, labels, mask)
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
loss_item = loss.numpy().item()
global_step += 1
if global_step % logging_steps == 0 and rank == 0:
print(
"epoch: %d / %d, steps: %d / %d, loss: %f, speed: %.2f step/s"
% (epoch, args.num_train_epochs, step, steps_by_epoch,
loss_item, logging_steps / (time.time() - tic_train)))
tic_train = time.time()
if global_step % save_steps == 0 and rank == 0:
print("\n=====start evaluating ckpt of %d steps=====" %
global_step)
precision, recall, f1 = self.evaluate(
model, criterion, test_data_loader, eval_file_path, "eval")
print("precision: %.2f\t recall: %.2f\t f1: %.2f\t" %
(100 * precision, 100 * recall, 100 * f1))
print("saving checkpoing model_%d.pdparams to %s " %
(global_step, args.output_dir))
paddle.save(model.state_dict(),
os.path.join(args.output_dir,
"model_%d.pdparams" % global_step))
model.train() # back to train mode
tic_epoch = time.time() - tic_epoch
print("epoch time | |
data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'STC05', Properties(desc=u'Monetary Amount', req_sit=u'R', data_type=(u'R',u'1',u'18'), position=5,
codes=[] ) ),
Element( u'STC06', Properties(desc=u'Date', req_sit=u'S', data_type=(u'DT',u'8',u'8'), position=6,
codes=[] ) ),
Element( u'STC07', Properties(desc=u'Payment Method Code', req_sit=u'S', data_type=(u'ID',u'3',u'3'), position=7,
codes=[u'ACH', u'BOP', u'CHK', u'FWT', u'NON'] ) ),
Element( u'STC08', Properties(desc=u'Date', req_sit=u'S', data_type=(u'DT',u'8',u'8'), position=8,
codes=[] ) ),
Element( u'STC09', Properties(desc=u'Check Number', req_sit=u'S', data_type=(u'AN',u'1',u'16'), position=9,
codes=[] ) ),
Composite( u'C043', Properties(req_sit=u'S',refdes='',seq=u'10',desc=u'Health Care Claim Status'),
Element( u'STC10-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC10-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC10-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Composite( u'C043', Properties(req_sit=u'S',refdes='',seq=u'11',desc=u'Health Care Claim Status'),
Element( u'STC11-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC11-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC11-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Element( u'STC12', Properties(desc=u'Free-form Message Text', req_sit=u'N', data_type=(u'AN',u'1',u'264'), position=12,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'110',desc=u'Payer Claim Identification Number'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1K'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'110',desc=u'Institutional Bill Type Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'BLT'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'110',desc=u'Medical Record Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'EA'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'120',desc=u'Claim Service Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'232'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'RD8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
parsed_277_2220D,
)
parsed_277_2100E = Loop( u'2100E', Properties(looptype='',repeat=u'1',pos=u'050',req_sit=u'R',desc=u'Dependent Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'050',desc=u'Dependent Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'QC'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'R', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'MI', u'ZZ'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
)
parsed_277_2220E = Loop( u'2220E', Properties(looptype='',repeat=u'>1',pos=u'180',req_sit=u'S',desc=u'Service Line Information'),
Segment( u'SVC', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'180',desc=u'Service Line Information'),
Composite( u'C003', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Composite Medical Procedure Identifier'),
Element( u'SVC01-01', Properties(desc=u'Product/Service ID Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=0,
codes=[u'AD', u'CI', u'HC', u'ID', u'IV', u'N1', u'N2', u'N3', u'N4', u'ND', u'NH', u'NU', u'RB'] ) ),
Element( u'SVC01-02', Properties(desc=u'Product/Service ID', req_sit=u'R', data_type=(u'AN',u'1',u'48'), position=1,
codes=[] ) ),
Element( u'SVC01-03', Properties(desc=u'Procedure Modifier', req_sit=u'S', data_type=(u'AN',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'SVC01-04', Properties(desc=u'Procedure Modifier', req_sit=u'S', data_type=(u'AN',u'2',u'2'), position=3,
codes=[] ) ),
Element( u'SVC01-05', Properties(desc=u'Procedure Modifier', req_sit=u'S', data_type=(u'AN',u'2',u'2'), position=4,
codes=[] ) ),
Element( u'SVC01-06', Properties(desc=u'Procedure Modifier', req_sit=u'S', data_type=(u'AN',u'2',u'2'), position=5,
codes=[] ) ),
Element( u'SVC01-07', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=6,
codes=[] ) ),
),
Element( u'SVC02', Properties(desc=u'Monetary Amount', req_sit=u'R', data_type=(u'R',u'1',u'18'), position=2,
codes=[] ) ),
Element( u'SVC03', Properties(desc=u'Monetary Amount', req_sit=u'R', data_type=(u'R',u'1',u'18'), position=3,
codes=[] ) ),
Element( u'SVC04', Properties(desc=u'Product/Service ID', req_sit=u'S', data_type=(u'AN',u'1',u'48'), position=4,
codes=[] ) ),
Element( u'SVC05', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Composite( u'C003', Properties(req_sit=u'N',refdes='',seq=u'06',desc=u'Composite Medical Procedure Identifier'),
),
Element( u'SVC07', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=7,
codes=[] ) ),
),
Segment( u'STC', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'190',desc=u'Service Line Status Information'),
Composite( u'C043', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Health Care Claim Status'),
Element( u'STC01-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC01-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC01-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Element( u'STC02', Properties(desc=u'Date', req_sit=u'R', data_type=(u'DT',u'8',u'8'), position=2,
codes=[] ) ),
Element( u'STC03', Properties(desc=u'Action Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=3,
codes=[] ) ),
Element( u'STC04', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'STC05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=5,
codes=[] ) ),
Element( u'STC06', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=6,
codes=[] ) ),
Element( u'STC07', Properties(desc=u'Payment Method Code', req_sit=u'N', data_type=(u'ID',u'3',u'3'), position=7,
codes=[] ) ),
Element( u'STC08', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=8,
codes=[] ) ),
Element( u'STC09', Properties(desc=u'Check Number', req_sit=u'N', data_type=(u'AN',u'1',u'16'), position=9,
codes=[] ) ),
Composite( u'C043', Properties(req_sit=u'S',refdes='',seq=u'10',desc=u'Health Care Claim Status'),
Element( u'STC10-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC10-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC10-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Composite( u'C043', Properties(req_sit=u'S',refdes='',seq=u'11',desc=u'Health Care Claim Status'),
Element( u'STC11-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC11-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC11-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Element( u'STC12', Properties(desc=u'Free-form Message Text', req_sit=u'N', data_type=(u'AN',u'1',u'264'), position=12,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'200',desc=u'Service Line Item Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'FJ'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'210',desc=u'Service Line Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'472'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'RD8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
)
parsed_277_2200E = Loop( u'2200E', Properties(looptype='',repeat=u'>1',pos=u'090',req_sit=u'R',desc=u'Claim Submitter Trace Number'),
Segment( u'TRN', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'090',desc=u'Claim Submitter Trace Number'),
Element( u'TRN01', Properties(desc=u'Trace Type Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=1,
codes=[u'2'] ) ),
Element( u'TRN02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'TRN03', Properties(desc=u'Originating Company Identifier', req_sit=u'N', data_type=(u'AN',u'10',u'10'), position=3,
codes=[] ) ),
Element( u'TRN04', Properties(desc=u'Reference Identification', req_sit=u'N', data_type=(u'AN',u'1',u'50'), position=4,
codes=[] ) ),
),
Segment( u'STC', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'100',desc=u'Claim Level Status Information'),
Composite( u'C043', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Health Care Claim Status'),
Element( u'STC01-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC01-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC01-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Element( u'STC02', Properties(desc=u'Date', req_sit=u'R', data_type=(u'DT',u'8',u'8'), position=2,
codes=[] ) ),
Element( u'STC03', Properties(desc=u'Action Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=3,
codes=[] ) ),
Element( u'STC04', Properties(desc=u'Monetary Amount', req_sit=u'R', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'STC05', Properties(desc=u'Monetary Amount', req_sit=u'R', data_type=(u'R',u'1',u'18'), position=5,
codes=[] ) ),
Element( u'STC06', Properties(desc=u'Date', req_sit=u'S', data_type=(u'DT',u'8',u'8'), position=6,
codes=[] ) ),
Element( u'STC07', Properties(desc=u'Payment Method Code', req_sit=u'S', data_type=(u'ID',u'3',u'3'), position=7,
codes=[u'ACH', u'BOP', u'CHK', u'FWT', u'NON'] ) ),
Element( u'STC08', Properties(desc=u'Date', req_sit=u'S', data_type=(u'DT',u'8',u'8'), position=8,
codes=[] ) ),
Element( u'STC09', Properties(desc=u'Check Number', req_sit=u'S', data_type=(u'AN',u'1',u'16'), position=9,
codes=[] ) ),
Composite( u'C043', Properties(req_sit=u'S',refdes='',seq=u'10',desc=u'Health Care Claim Status'),
Element( u'STC10-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC10-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC10-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Composite( u'C043', Properties(req_sit=u'S',refdes='',seq=u'11',desc=u'Health Care Claim Status'),
Element( u'STC11-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC11-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC11-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Element( u'STC12', Properties(desc=u'Free-form Message Text', req_sit=u'N', data_type=(u'AN',u'1',u'264'), position=12,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'110',desc=u'Payer Claim Identification Number'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1K'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'110',desc=u'Institutional Bill Type Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'BLT'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'110',desc=u'Medical Record Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'EA'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'120',desc=u'Claim Service Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'232'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), | |
1)*ido;
ch[ah] = ref(cc,ac - ido - 1) + tr2 + tr3;
ch[ah + 1] = ref(cc,ac - ido) + ti2 + ti3;
cr2 = ref(cc,ac - ido - 1) + tr11*tr2 + tr12*tr3;
ci2 = ref(cc,ac - ido) + tr11*ti2 + tr12*ti3;
cr3 = ref(cc,ac - ido - 1) + tr12*tr2 + tr11*tr3;
ci3 = ref(cc,ac - ido) + tr12*ti2 + tr11*ti3;
cr5 = isign*(ti11*tr5 + ti12*tr4);
ci5 = isign*(ti11*ti5 + ti12*ti4);
cr4 = isign*(ti12*tr5 - ti11*tr4);
ci4 = isign*(ti12*ti5 - ti11*ti4);
ch[ah + l1*ido] = cr2 - ci5;
ch[ah + 4*l1*ido] = cr2 + ci5;
ch[ah + l1*ido + 1] = ci2 + cr5;
ch[ah + 2*l1*ido + 1] = ci3 + cr4;
ch[ah + 2*l1*ido] = cr3 - ci4;
ch[ah + 3*l1*ido] = cr3 + ci4;
ch[ah + 3*l1*ido + 1] = ci3 - cr4;
ch[ah + 4*l1*ido + 1] = ci2 - cr5;
}
} else {
for (k=1; k<=l1; k++) {
for (i=0; i<ido-1; i+=2) {
ac = i + 1 + (k*5 - 4)*ido;
ti5 = ref(cc,ac) - ref(cc,ac + 3*ido);
ti2 = ref(cc,ac) + ref(cc,ac + 3*ido);
ti4 = ref(cc,ac + ido) - ref(cc,ac + 2*ido);
ti3 = ref(cc,ac + ido) + ref(cc,ac + 2*ido);
tr5 = ref(cc,ac - 1) - ref(cc,ac + 3*ido - 1);
tr2 = ref(cc,ac - 1) + ref(cc,ac + 3*ido - 1);
tr4 = ref(cc,ac + ido - 1) - ref(cc,ac + 2*ido - 1);
tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 2*ido - 1);
ah = i + (k - 1)*ido;
ch[ah] = ref(cc,ac - ido - 1) + tr2 + tr3;
ch[ah + 1] = ref(cc,ac - ido) + ti2 + ti3;
cr2 = ref(cc,ac - ido - 1) + tr11*tr2 + tr12*tr3;
ci2 = ref(cc,ac - ido) + tr11*ti2 + tr12*ti3;
cr3 = ref(cc,ac - ido - 1) + tr12*tr2 + tr11*tr3;
ci3 = ref(cc,ac - ido) + tr12*ti2 + tr11*ti3;
cr5 = isign*(ti11*tr5 + ti12*tr4);
ci5 = isign*(ti11*ti5 + ti12*ti4);
cr4 = isign*(ti12*tr5 - ti11*tr4);
ci4 = isign*(ti12*ti5 - ti11*ti4);
dr3 = cr3 - ci4;
dr4 = cr3 + ci4;
di3 = ci3 + cr4;
di4 = ci3 - cr4;
dr5 = cr2 + ci5;
dr2 = cr2 - ci5;
di5 = ci2 - cr5;
di2 = ci2 + cr5;
ch[ah + l1*ido] = wa1[i]*dr2 - isign*wa1[i+1]*di2;
ch[ah + l1*ido + 1] = wa1[i]*di2 + isign*wa1[i+1]*dr2;
ch[ah + 2*l1*ido] = wa2[i]*dr3 - isign*wa2[i+1]*di3;
ch[ah + 2*l1*ido + 1] = wa2[i]*di3 + isign*wa2[i+1]*dr3;
ch[ah + 3*l1*ido] = wa3[i]*dr4 - isign*wa3[i+1]*di4;
ch[ah + 3*l1*ido + 1] = wa3[i]*di4 + isign*wa3[i+1]*dr4;
ch[ah + 4*l1*ido] = wa4[i]*dr5 - isign*wa4[i+1]*di5;
ch[ah + 4*l1*ido + 1] = wa4[i]*di5 + isign*wa4[i+1]*dr5;
}
}
}
} /* passf5 */
static void passf(int *nac, int ido, int ip, int l1, int idl1,
Treal cc[], Treal ch[],
const Treal wa[], int isign)
/* isign is -1 for forward transform and +1 for backward transform */
{
int idij, idlj, idot, ipph, i, j, k, l, jc, lc, ik, idj, idl, inc,idp;
Treal wai, war;
idot = ido / 2;
/* nt = ip*idl1;*/
ipph = (ip + 1) / 2;
idp = ip*ido;
if (ido >= l1) {
for (j=1; j<ipph; j++) {
jc = ip - j;
for (k=0; k<l1; k++) {
for (i=0; i<ido; i++) {
ch[i + (k + j*l1)*ido] =
ref(cc,i + (j + k*ip)*ido) + ref(cc,i + (jc + k*ip)*ido);
ch[i + (k + jc*l1)*ido] =
ref(cc,i + (j + k*ip)*ido) - ref(cc,i + (jc + k*ip)*ido);
}
}
}
for (k=0; k<l1; k++)
for (i=0; i<ido; i++)
ch[i + k*ido] = ref(cc,i + k*ip*ido);
} else {
for (j=1; j<ipph; j++) {
jc = ip - j;
for (i=0; i<ido; i++) {
for (k=0; k<l1; k++) {
ch[i + (k + j*l1)*ido] = ref(cc,i + (j + k*ip)*ido) + ref(cc,i + (jc + k*
ip)*ido);
ch[i + (k + jc*l1)*ido] = ref(cc,i + (j + k*ip)*ido) - ref(cc,i + (jc + k*
ip)*ido);
}
}
}
for (i=0; i<ido; i++)
for (k=0; k<l1; k++)
ch[i + k*ido] = ref(cc,i + k*ip*ido);
}
idl = 2 - ido;
inc = 0;
for (l=1; l<ipph; l++) {
lc = ip - l;
idl += ido;
for (ik=0; ik<idl1; ik++) {
cc[ik + l*idl1] = ch[ik] + wa[idl - 2]*ch[ik + idl1];
cc[ik + lc*idl1] = isign*wa[idl-1]*ch[ik + (ip-1)*idl1];
}
idlj = idl;
inc += ido;
for (j=2; j<ipph; j++) {
jc = ip - j;
idlj += inc;
if (idlj > idp) idlj -= idp;
war = wa[idlj - 2];
wai = wa[idlj-1];
for (ik=0; ik<idl1; ik++) {
cc[ik + l*idl1] += war*ch[ik + j*idl1];
cc[ik + lc*idl1] += isign*wai*ch[ik + jc*idl1];
}
}
}
for (j=1; j<ipph; j++)
for (ik=0; ik<idl1; ik++)
ch[ik] += ch[ik + j*idl1];
for (j=1; j<ipph; j++) {
jc = ip - j;
for (ik=1; ik<idl1; ik+=2) {
ch[ik - 1 + j*idl1] = cc[ik - 1 + j*idl1] - cc[ik + jc*idl1];
ch[ik - 1 + jc*idl1] = cc[ik - 1 + j*idl1] + cc[ik + jc*idl1];
ch[ik + j*idl1] = cc[ik + j*idl1] + cc[ik - 1 + jc*idl1];
ch[ik + jc*idl1] = cc[ik + j*idl1] - cc[ik - 1 + jc*idl1];
}
}
*nac = 1;
if (ido == 2) return;
*nac = 0;
for (ik=0; ik<idl1; ik++)
cc[ik] = ch[ik];
for (j=1; j<ip; j++) {
for (k=0; k<l1; k++) {
cc[(k + j*l1)*ido + 0] = ch[(k + j*l1)*ido + 0];
cc[(k + j*l1)*ido + 1] = ch[(k + j*l1)*ido + 1];
}
}
if (idot <= l1) {
idij = 0;
for (j=1; j<ip; j++) {
idij += 2;
for (i=3; i<ido; i+=2) {
idij += 2;
for (k=0; k<l1; k++) {
cc[i - 1 + (k + j*l1)*ido] =
wa[idij - 2]*ch[i - 1 + (k + j*l1)*ido] -
isign*wa[idij-1]*ch[i + (k + j*l1)*ido];
cc[i + (k + j*l1)*ido] =
wa[idij - 2]*ch[i + (k + j*l1)*ido] +
isign*wa[idij-1]*ch[i - 1 + (k + j*l1)*ido];
}
}
}
} else {
idj = 2 - ido;
for (j=1; j<ip; j++) {
idj += ido;
for (k = 0; k < l1; k++) {
idij = idj;
for (i=3; i<ido; i+=2) {
idij += 2;
cc[i - 1 + (k + j*l1)*ido] =
wa[idij - 2]*ch[i - 1 + (k + j*l1)*ido] -
isign*wa[idij-1]*ch[i + (k + j*l1)*ido];
cc[i + (k + j*l1)*ido] =
wa[idij - 2]*ch[i + (k + j*l1)*ido] +
isign*wa[idij-1]*ch[i - 1 + (k + j*l1)*ido];
}
}
}
}
} /* passf */
/* ----------------------------------------------------------------------
radf2,radb2, radf3,radb3, radf4,radb4, radf5,radb5, radfg,radbg.
Treal FFT passes fwd and bwd.
---------------------------------------------------------------------- */
static void radf2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[])
{
int i, k, ic;
Treal ti2, tr2;
for (k=0; k<l1; k++) {
ch[2*k*ido] =
ref(cc,k*ido) + ref(cc,(k + l1)*ido);
ch[(2*k+1)*ido + ido-1] =
ref(cc,k*ido) - ref(cc,(k + l1)*ido);
}
if (ido < 2) return;
if (ido != 2) {
for (k=0; k<l1; k++) {
for (i=2; i<ido; i+=2) {
ic = ido - i;
tr2 = wa1[i - 2]*ref(cc, i-1 + (k + l1)*ido) + wa1[i - 1]*ref(cc, i + (k + l1)*ido);
ti2 = wa1[i - 2]*ref(cc, i + (k + l1)*ido) - wa1[i - 1]*ref(cc, i-1 + (k + l1)*ido);
ch[i + 2*k*ido] = ref(cc,i + k*ido) + ti2;
ch[ic + (2*k+1)*ido] = ti2 - ref(cc,i + k*ido);
ch[i - 1 + 2*k*ido] = ref(cc,i - 1 + k*ido) + tr2;
ch[ic - 1 + (2*k+1)*ido] = ref(cc,i - 1 + k*ido) - tr2;
}
}
if (ido % 2 == 1) return;
}
for (k=0; k<l1; k++) {
ch[(2*k+1)*ido] = -ref(cc,ido-1 + (k + l1)*ido);
ch[ido-1 + 2*k*ido] = ref(cc,ido-1 + k*ido);
}
} /* radf2 */
| |
{}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
response_types_map = {
200: "V1Token",
204: "object",
403: "object",
404: "object",
}
return self.api_client.call_api(
'/api/v1/orgs/{owner}/sa/{entity}/tokens', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_service_account(self, owner, uuid, **kwargs): # noqa: E501
"""Delete service account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_service_account(owner, uuid, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param uuid: Uuid identifier of the entity (required)
:type uuid: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_service_account_with_http_info(owner, uuid, **kwargs) # noqa: E501
def delete_service_account_with_http_info(self, owner, uuid, **kwargs): # noqa: E501
"""Delete service account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_service_account_with_http_info(owner, uuid, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param uuid: Uuid identifier of the entity (required)
:type uuid: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'owner',
'uuid'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_service_account" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_service_account`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `delete_service_account`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/orgs/{owner}/sa/{uuid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_service_account_token(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Delete service account token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_service_account_token(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param entity: Entity: project name, hub name, registry name, ... (required)
:type entity: str
:param uuid: Uuid identifier of the sub-entity (required)
:type uuid: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_service_account_token_with_http_info(owner, entity, uuid, **kwargs) # noqa: E501
def delete_service_account_token_with_http_info(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Delete service account token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_service_account_token_with_http_info(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param entity: Entity: project name, hub name, registry name, ... (required)
:type entity: str
:param uuid: Uuid identifier of the sub-entity (required)
:type uuid: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'owner',
'entity',
'uuid'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_service_account_token" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_service_account_token`") # noqa: E501
# verify the required parameter 'entity' is set
if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501
local_var_params['entity'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity` when calling `delete_service_account_token`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `delete_service_account_token`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'entity' in local_var_params:
path_params['entity'] = local_var_params['entity'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/orgs/{owner}/sa/{entity}/tokens/{uuid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_service_account(self, owner, uuid, **kwargs): # noqa: E501
"""Get service account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please | |
to form initial markov state. Stack along last dimension
init_markov_state = torch.cat(self.markov_length * [initial_env_state], dim=-1)
return init_markov_state
def env2markov(self, old_markov_state: torch.tensor, new_env_state: np.ndarray):
# Function to update Markov states by dropping the oldest environmental state in Markov state and instead adding
# the latest environmental state observation to the Markov state representation
# Preprocessing of new env state
new_env_state = self.preprocess_env_state(new_env_state)
# Obtain new Markov state by dropping oldest state, shifting all other states to the back, and adding latest
# env state observation to the front
new_markov_state = torch.cat([new_env_state, old_markov_state[..., : -self.depth_processed_env_state]], dim=-1)
return new_markov_state
def learn(self):
# Function to train the PPO agent
# Evaluate the initial performance of policy network before training
self.eval_and_log(eval_type=INITIAL)
# Start training
for iteration in range(self.iterations):
# Each iteration consists of two steps:
# 1. Collecting new training data
# 2. Updating networks for multiple epochs based on newly generated training data
print('Iteration:', iteration+1, "of", self.iterations, "\nEpsilon: {:.2f}".format(self.epsilon.value))
# Init data collection and storage for current iteration
num_observed_train_steps = 0
observations = []
obs_temp = []
# Init (parallel) envs
state = self.init_markov_state(self.env.reset())
# Collect training data
with torch.no_grad():
while num_observed_train_steps < self.T:
# Predict action (actually being multiple parallel ones)
action = self.policy(state.to(self.device)).cpu()
# Perform action in env (self.dilation times)
accumulated_reward = torch.zeros((self.parallel_agents,))
for _ in range(self.dilation):
next_state, reward, terminal_state, _ = self.env.step(action.numpy())
# Sum rewards over time steps
accumulated_reward += reward
# If either env is done, stop repeating actions
if terminal_state.any():
break
# Transform latest observations to tensor data
reward = accumulated_reward
next_state = self.env2markov(state, next_state) # Note: state == Markov state, next_state == state as returned by env
terminal_state = torch.tensor(terminal_state) # Boolean indicating whether one of parallel envs has terminated or not
# Get log-prob of chosen action (= log \pi_{\theta_{old}}(a_t|s_t) in accompanying report)
log_prob = self.policy.log_prob(action.to(self.device)).cpu()
# Store observable data
observation = (state.unsqueeze(1), action, reward, next_state.unsqueeze(1), terminal_state, log_prob)
obs_temp.append(observation)
# Add number of newly (in parallel) experienced state transitions to counter
num_observed_train_steps += self.parallel_agents
# Prepare for next iteration
if terminal_state.any() or num_observed_train_steps >= self.T:
# Reset env for case where num_observed_train_steps < max_trajectory_length T (in which case a new iteration would follow)
state = self.init_markov_state(self.env.reset())
# Add temporarily stored observations (made during current iteration) to list of all freshly
# observed training data collected so far for next weight update step; to be stored in 'observations':
# Compute state value of final observed state (= V(s_T))
last_state = obs_temp[-1][3]
target_state_val = self.val_net(last_state.to(self.device).squeeze(1)).squeeze().cpu() # V(s_T)
termination_mask = (1 - obs_temp[-1][4].int()).float() # Only associate last observed state with valuation of 0 if it is terminal
target_state_val = target_state_val * termination_mask
# Compute the target state value and advantage estimate for each state in agent's trajectory
# (batch-wise for all parallel agents in parallel)
for t in range(len(obs_temp)-1, -1, -1):
# Compute target state value:
# V^{target}_t = r_t + \gamma * r_{t+1} + ... + \gamma^{n-1} * r_{t+n-1} + \gamma^n * V(s_{t+n}), where t+n=T
target_state_val = obs_temp[t][2] + self.gamma * target_state_val # <- reward r_t obtained in state s_t + discounted future reward
# Compute advantage estimate
state_val = self.val_net(obs_temp[t][0].to(self.device).squeeze(1)).squeeze().cpu() # V(s_t)
advantage = target_state_val - state_val
# Augment a previously observed observation tuple
extra = (target_state_val, advantage)
augmented_obs = obs_temp[t] + extra
# Add all parallel agents' individual observations to overall (iteration's) observations list
for i in range(self.parallel_agents):
# Create i^th agent's private observation tuple for time step t in its current trajectory
# element \in {state, action, reward, next_state, terminal_state, log_prob, target_val, advantage}
single_agent_tuple = tuple([element[i] for element in augmented_obs])
observations.append(single_agent_tuple)
# Empty temporary list of observations after they have been added to more persistent list of freshly collected train data
obs_temp = []
else:
# Trajectory continues from time step t to t+1 (for all parallel agents)
state = next_state
# Perform weight updates for multiple epochs on freshly collected training data stored in 'observations'
iteration_loss = 0.
for epoch in range(self.epochs):
acc_epoch_loss = 0. # Loss accumulated over multiple minibatches during epoch
# Shuffle data
random.shuffle(observations) # Shuffle in place!
# Perform weight update on each minibatch contained in shuffled observations
for i in range(0, len(observations), self.batch_size):
# Reset all gradients
self.optimizer_p.zero_grad()
self.optimizer_v.zero_grad()
# Sample minibatch
minibatch = observations[i: i+self.batch_size]
# Get all states, actions, log_probs, target_values, and advantage_estimates from minibatch
state, action, _, _, _, log_prob_old, target_state_val, advantage = zip(*minibatch)
# Transform batch of tuples to batch tensors
state_ = torch.vstack(state).to(self.device) # Minibatch of states
target_state_val_ = torch.vstack(target_state_val).squeeze().to(self.device)
advantage_ = torch.vstack(advantage).squeeze().to(self.device)
log_prob_old_ = torch.vstack(log_prob_old).squeeze().to(self.device)
if self.dist_type == DISCRETE:
action_ = torch.vstack(action).squeeze().to(self.device) # Minibatch of actions
else:
action_ = torch.vstack(action).to(self.device) # Minibatch of actions
# Compute log_prob for minibatch of actions
_ = self.policy(state_)
log_prob = self.policy.log_prob(action_).squeeze()
# Compute current state value estimates
state_val = self.val_net(state_).squeeze()
# Evaluate loss function first component-wise, then combined:
# L^{CLIP}
L_CLIP = self.L_CLIP(log_prob, log_prob_old_, advantage_)
# L^{V}
L_V = self.L_VF(state_val, target_state_val_)
if self.dist_type == DISCRETE or self.std_trainable:
# An entropy bonus is added only if the agent faces a discrete action space or if we manually
# declare that the standard deviation is trainable in continuous action spaces
# H (= Entropy)
L_ENTROPY = self.L_ENTROPY()
# L^{CLIP + H + V} = L^{CLIP} + h*H + v*L^{V}
loss = - L_CLIP - self.h * L_ENTROPY + self.v * L_V
else:
# L^{CLIP + H + V} = L^{CLIP} + h*H + v*L^{V}
loss = - L_CLIP + self.v * L_V
# Error handling
if nan_error(loss):
if self.dist_type == DISCRETE or self.std_trainable:
print_nan_error_loss(loss, L_CLIP, L_V, action_, log_prob, log_prob_old_, state_, state_val, L_ENTROPY)
else:
print_nan_error_loss(loss, L_CLIP, L_V, action_, log_prob, log_prob_old_, state_, state_val)
raise OverflowError('Loss is nan. See print statement above.')
# Backprop loss
loss.backward()
# Perform weight update for both the policy and value net
self.optimizer_p.step()
self.optimizer_v.step()
# Document training progress after one weight update
acc_epoch_loss += loss.cpu().detach().numpy()
# Document training progress after one full epoch/iteration of training
iteration_loss += acc_epoch_loss
self.log('devel_epoch_loss', acc_epoch_loss)
# Potentially decrease learning rates and standard deviation every training iteration
if self.lr_scheduler_pol:
self.lr_scheduler_pol.step()
if self.lr_scheduler_val:
self.lr_scheduler_val.step()
if self.decay_standard_dev:
self.policy.std.step()
# Potentially decrease epsilon
self.epsilon.step()
# Document training progress at the end of a full iteration
self.log_train_stats(iteration_loss=iteration_loss)
# Perform a short intermediate evaluation
self.eval_and_log(eval_type=INTERMEDIATE)
print()
# Clean up after training
self.env.close()
# Final evaluation
self.eval_and_log(eval_type=FINAL)
return self.training_stats
def L_CLIP(self, log_prob, log_prob_old, advantage):
# Computes PPO's main objective L^{CLIP}
prob_ratio = torch.exp(log_prob - log_prob_old) # = pi_theta(a_t|s_t) / pi_theta_old(a_t|s_t)
unclipped = prob_ratio * advantage
clipped = torch.clip(prob_ratio, min=1.-self.epsilon.value, max=1.+self.epsilon.value) * advantage
return torch.mean(torch.min(unclipped, clipped))
def L_ENTROPY(self):
# Computes entropy bonus for policy net
return torch.mean(self.policy.entropy())
def L_VF(self, state_val: torch.tensor, target_state_val: torch.tensor):
# Loss function for state-value network. Quadratic loss between predicted and target state value
return torch.mean((state_val - target_state_val) ** 2)
def save_policy_net(self, path_policy: str = './policy_model.pt'):
# Save policy network
del self.policy.std # Scheduler object can't be saved as part of a model; would break the saving process
torch.save(self.policy, path_policy)
print('Saved policy net.')
def save_value_net(self, path_val_net: str = './val_net_model.pt'):
# Save state value network
torch.save(self.val_net, path_val_net)
print('Saved value net.')
def load(self, path_policy: str = './policy_model.pt', path_val_net: str = None, train_stats_path: str = None):
# Load a policy network and possibly state value network from file
self.policy = torch.load(path_policy)
self.policy.eval()
print('Loaded policy net.')
if path_val_net:
self.val_net = torch.load(path_val_net)
self.val_net.eval()
print('Loaded value net.')
if train_stats_path:
with open(train_stats_path) as json_file:
self.training_stats = json.load(json_file)
print('Loaded training stats.')
def save_train_stats(self, path: str = './train_stats.json'):
with open(path, 'w') as outfile:
json.dump(self.training_stats, outfile)
print('Saved training stats.')
def log(self, key, value):
self.training_stats[key].append(value)
def eval_and_log(self, eval_type: int):
# Perform an evaluation and store the results
if eval_type == INITIAL:
# Assesses quality of untrained policy
print('Initial evaluation:')
total_rewards, _, total_restarts = self.eval(time_steps=self.time_steps_extensive_eval, render=False)
self.log('init_acc_reward', total_rewards)
self.log('init_total_restarts', total_restarts)
elif eval_type == INTERMEDIATE:
# Assesses quality of policy during training procedure
print("Current iteration's demo:")
total_rewards, _, total_restarts = self.eval()
| |
value.
value: The value to locate in the System.Collections.SortedList object. The value can be null.
Returns: true if the System.Collections.SortedList object contains an element with the specified value; otherwise,false.
"""
pass
def CopyTo(self,array,arrayIndex):
"""
CopyTo(self: SortedList,array: Array,arrayIndex: int)
Copies System.Collections.SortedList elements to a one-dimensional System.Array object,starting at the specified index in the array.
array: The one-dimensional System.Array object that is the destination of the System.Collections.DictionaryEntry objects copied from System.Collections.SortedList. The
System.Array must have zero-based indexing.
arrayIndex: The zero-based index in array at which copying begins.
"""
pass
def GetByIndex(self,index):
"""
GetByIndex(self: SortedList,index: int) -> object
Gets the value at the specified index of a System.Collections.SortedList object.
index: The zero-based index of the value to get.
Returns: The value at the specified index of the System.Collections.SortedList object.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: SortedList) -> IDictionaryEnumerator
Returns an System.Collections.IDictionaryEnumerator object that iterates through a System.Collections.SortedList object.
Returns: An System.Collections.IDictionaryEnumerator object for the System.Collections.SortedList object.
"""
pass
def GetKey(self,index):
"""
GetKey(self: SortedList,index: int) -> object
Gets the key at the specified index of a System.Collections.SortedList object.
index: The zero-based index of the key to get.
Returns: The key at the specified index of the System.Collections.SortedList object.
"""
pass
def GetKeyList(self):
"""
GetKeyList(self: SortedList) -> IList
Gets the keys in a System.Collections.SortedList object.
Returns: An System.Collections.IList object containing the keys in the System.Collections.SortedList object.
"""
pass
def GetValueList(self):
"""
GetValueList(self: SortedList) -> IList
Gets the values in a System.Collections.SortedList object.
Returns: An System.Collections.IList object containing the values in the System.Collections.SortedList object.
"""
pass
def IndexOfKey(self,key):
"""
IndexOfKey(self: SortedList,key: object) -> int
Returns the zero-based index of the specified key in a System.Collections.SortedList object.
key: The key to locate in the System.Collections.SortedList object.
Returns: The zero-based index of the key parameter,if key is found in the System.Collections.SortedList object; otherwise,-1.
"""
pass
def IndexOfValue(self,value):
"""
IndexOfValue(self: SortedList,value: object) -> int
Returns the zero-based index of the first occurrence of the specified value in a System.Collections.SortedList object.
value: The value to locate in the System.Collections.SortedList object. The value can be null.
Returns: The zero-based index of the first occurrence of the value parameter,if value is found in the System.Collections.SortedList object; otherwise,-1.
"""
pass
def Remove(self,key):
"""
Remove(self: SortedList,key: object)
Removes the element with the specified key from a System.Collections.SortedList object.
key: The key of the element to remove.
"""
pass
def RemoveAt(self,index):
"""
RemoveAt(self: SortedList,index: int)
Removes the element at the specified index of a System.Collections.SortedList object.
index: The zero-based index of the element to remove.
"""
pass
def SetByIndex(self,index,value):
"""
SetByIndex(self: SortedList,index: int,value: object)
Replaces the value at a specific index in a System.Collections.SortedList object.
index: The zero-based index at which to save value.
value: The System.Object to save into the System.Collections.SortedList object. The value can be null.
"""
pass
@staticmethod
def Synchronized(list):
"""
Synchronized(list: SortedList) -> SortedList
Returns a synchronized (thread-safe) wrapper for a System.Collections.SortedList object.
list: The System.Collections.SortedList object to synchronize.
Returns: A synchronized (thread-safe) wrapper for the System.Collections.SortedList object.
"""
pass
def TrimToSize(self):
"""
TrimToSize(self: SortedList)
Sets the capacity to the actual number of elements in a System.Collections.SortedList object.
"""
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
"""
__contains__(self: IDictionary,key: object) -> bool
Determines whether the System.Collections.IDictionary object contains an element with the specified key.
key: The key to locate in the System.Collections.IDictionary object.
Returns: true if the System.Collections.IDictionary contains an element with the key; otherwise,false.
"""
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,initialCapacity: int)
__new__(cls: type,comparer: IComparer)
__new__(cls: type,comparer: IComparer,capacity: int)
__new__(cls: type,d: IDictionary)
__new__(cls: type,d: IDictionary,comparer: IComparer)
"""
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
Capacity=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the capacity of a System.Collections.SortedList object.
Get: Capacity(self: SortedList) -> int
Set: Capacity(self: SortedList)=value
"""
Count=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of elements contained in a System.Collections.SortedList object.
Get: Count(self: SortedList) -> int
"""
IsFixedSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether a System.Collections.SortedList object has a fixed size.
Get: IsFixedSize(self: SortedList) -> bool
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether a System.Collections.SortedList object is read-only.
Get: IsReadOnly(self: SortedList) -> bool
"""
IsSynchronized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether access to a System.Collections.SortedList object is synchronized (thread safe).
Get: IsSynchronized(self: SortedList) -> bool
"""
Keys=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the keys in a System.Collections.SortedList object.
Get: Keys(self: SortedList) -> ICollection
"""
SyncRoot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an object that can be used to synchronize access to a System.Collections.SortedList object.
Get: SyncRoot(self: SortedList) -> object
"""
Values=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the values in a System.Collections.SortedList object.
Get: Values(self: SortedList) -> ICollection
"""
class Stack(object):
"""
Represents a simple last-in-first-out (LIFO) non-generic collection of objects.
Stack()
Stack(initialCapacity: int)
Stack(col: ICollection)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return Stack()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Clear(self):
"""
Clear(self: Stack)
Removes all objects from the System.Collections.Stack.
"""
pass
def Clone(self):
"""
Clone(self: Stack) -> object
Creates a shallow copy of the System.Collections.Stack.
Returns: A shallow copy of the System.Collections.Stack.
"""
pass
def Contains(self,obj):
"""
Contains(self: Stack,obj: object) -> bool
Determines whether an element is in the System.Collections.Stack.
obj: The System.Object to locate in the System.Collections.Stack. The value can be null.
Returns: true,if obj is found in the System.Collections.Stack; otherwise,false.
"""
pass
def CopyTo(self,array,index):
"""
CopyTo(self: Stack,array: Array,index: int)
Copies the System.Collections.Stack to an existing one-dimensional System.Array,starting at the specified array index.
array: The one-dimensional System.Array that is the destination of the elements copied from System.Collections.Stack. The System.Array must have zero-based indexing.
index: The zero-based index in array at which copying begins.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: Stack) -> IEnumerator
Returns an System.Collections.IEnumerator for the System.Collections.Stack.
Returns: An System.Collections.IEnumerator for the System.Collections.Stack.
"""
pass
def Peek(self):
"""
Peek(self: Stack) -> object
Returns the object at the top of the System.Collections.Stack without removing it.
Returns: The System.Object at the top of the System.Collections.Stack.
"""
pass
def Pop(self):
"""
Pop(self: Stack) -> object
Removes and returns the object at the top of the System.Collections.Stack.
Returns: The System.Object removed from the top of the System.Collections.Stack.
"""
pass
def Push(self,obj):
"""
Push(self: Stack,obj: object)
Inserts an object at the top of the System.Collections.Stack.
obj: The System.Object to push onto the System.Collections.Stack. The value can be null.
"""
pass
@staticmethod
def Synchronized(stack):
"""
Synchronized(stack: Stack) -> Stack
Returns a synchronized (thread safe) wrapper for the System.Collections.Stack.
stack: The System.Collections.Stack to synchronize.
Returns: A synchronized wrapper around the System.Collections.Stack.
"""
pass
def ToArray(self):
"""
ToArray(self: Stack) -> Array[object]
Copies the System.Collections.Stack to a new array.
Returns: A new array containing copies of the elements of the System.Collections.Stack.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,initialCapacity: int)
__new__(cls: type,col: ICollection)
"""
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Count=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of elements contained in the System.Collections.Stack.
Get: Count(self: Stack) -> int
"""
IsSynchronized=property(lambda | |
x+2, y, s->dots);
# } else {
# PUT0("\n");
# }
# }
#
# #*************************************************************************
# * draw_tabrest - draws rest at position x
# *************************************************************************
# void draw_tabrest (float x, struct SYMBOL *s)
# {
# int flags;
# float y;
#
# y = get_staffheight(ivc,NETTO) + tab_flagspace(&voice[ivc].key);
# flags = tab_flagnumber(s->len);
#
# switch (tabfmt.rhstyle) {
# case RHNONE:
# #no rhythm flags
# return;
# break;
# case RHSIMPLE:
# case RHGRID:
# PUT3("%.1f %.1f %d tabsrest ", x, y, flags);
# break;
# case RHDIAMOND:
# if (flags <= -4)
# PUT2("%.1f %.1f tablonga ", x, y);
# else if (flags == -3)
# PUT2("%.1f %.1f tabbrevis ", x, y);
# else
# PUT3("%.1f %.1f %d tabdrest ", x, y, flags);
# break;
# case RHMODERN:
# case RHMODERNBEAMS:
# PUT3("%.1f %.1f %d tabmrest ", x, y, flags);
# break;
# }
#
# if (s->dots > 0) {
# PUT3("%.1f %.1f %d tabdt\n", x+2, y, s->dots);
# } else {
# PUT0("\n");
# }
#
# }
#
# #*************************************************************************
# * draw_tabbrest - draws multibar rest at position x
# *************************************************************************
# void draw_tabbrest (float x, struct SYMBOL *s)
# {
# float y;
#
# y = get_staffheight(ivc,NETTO) + tab_flagspace(&voice[ivc].key);
#
# #do not care about rhstyle
# PUT1("(%d) ", s->fullmes);
# PUT4("%.2f %.2f %.2f %.2f", x, y+9, x, y);
# PUT0(" tabbrest");
# PUT0("\n");
#
# }
#
# #*************************************************************************
# * draw_tabbar - draws bar at position x
# *************************************************************************
# void draw_tabbar (float x, struct SYMBOL *s)
# {
#
# if (s->u==B_SNGL) # draw the bar
# PUT2("%.1f %.1f tabbar\n", x, get_staffheight(ivc,NETTO));
# else if (s->u==B_DBL)
# PUT2("%.1f %.1f tabdbar\n", x, get_staffheight(ivc,NETTO));
# else if (s->u==B_LREP)
# PUT4("%.1f %.1f tabfbar1 %.1f %d tabrdots\n",
# x, get_staffheight(ivc,NETTO), x+10,
# tab_numlines(&voice[ivc].key)-1);
# else if (s->u==B_RREP) {
# PUT4("%.1f %.1f tabfbar2 %.1f %d tabrdots\n",
# x, get_staffheight(ivc,NETTO), x-10,
# tab_numlines(&voice[ivc].key)-1);
# }
# else if (s->u==B_DREP) {
# PUT4("%.1f %.1f tabfbar1 %.1f %d tabrdots\n",
# x-1, get_staffheight(ivc,NETTO), x+9,
# tab_numlines(&voice[ivc].key)-1);
# PUT4("%.1f %.1f tabfbar2 %.1f %d tabrdots\n",
# x+1, get_staffheight(ivc,NETTO), x-9,
# tab_numlines(&voice[ivc].key)-1);
# }
# else if (s->u==B_FAT1)
# PUT2("%.1f %.1f tabfbar1\n", x, get_staffheight(ivc,NETTO));
# else if (s->u==B_FAT2)
# PUT2("%.1f %.1f tabfbar2\n", x, get_staffheight(ivc,NETTO));
# else if (s->u==B_INVIS)
# ;
# else
# printf (">>> dont know how to draw bar type %d\n", s->u);
#
# PUT0("\n");
#
# }
#
# #*************************************************************************
# * draw_tabbarnums - draws numbers on bar lines
# *************************************************************************
# void draw_tabbarnums (FILE *fp)
# {
# int i,last,ok,got_note;
#
# last=0;
# got_note=0;
# for (i=0;i<nsym;i++) {
# if ((sym[i].type==NOTE)||(sym[i].type==REST)||(sym[i].type==BREST))
# got_note=1;
#
# if ((sym[i].type==BAR) && (sym[i].gchords && !sym[i].gchords->empty())) {
# if (last != 2) set_font (fp, cfmt.barlabelfont, 0);
# PUT3 (" %.1f %.1f M (%s) cshow ",
# sym[i].x, get_staffheight(ivc,NETTO)+tabfont.size,
# sym[i].gchords->begin()->text.c_str());
# last=2;
# }
#
# if ((sym[i].type==BAR) && sym[i].t) {
# ok=0;
# if ((cfmt.barnums>0) && (sym[i].t%cfmt.barnums==0)) ok=1;
# if ((cfmt.barnums==0) && (!got_note) && (sym[i].t > 1)) ok=1;
# if ((cfmt.barnums!=0) && (!sym[i].gchords->empty())) ok=0;
#
# if (ok) {
# if (last != 1) set_font (fp, cfmt.barnumfont, 0);
# #| if ((mvoice>1) && (cfmt.barnums==0)) |
# if (cfmt.barnums==0)
# PUT2 (" 0 %.1f M (%d) rshow ",
# get_staffheight(ivc,NETTO)+tabfont.size, sym[i].t);
# else
# PUT3 (" %.1f %.1f M (%d) cshow ",
# sym[i].x, get_staffheight(ivc,NETTO)+tabfont.size, sym[i].t);
# last=1;
# }
# }
# }
# PUT0("\n");
# }
#
# #*************************************************************************
# * draw_tabendings - draws first/second endings
# *************************************************************************
# void draw_tabendings (void)
# {
# int i;
# float height;
#
# height = get_staffheight(ivc,BRUTTO) + 5.0;
# for (i=0;i<num_ending;i++) {
# if (ending[i].b<0)
# PUT4("%.1f %.1f %.1f (%d) end2\n",
# ending[i].a, ending[i].a+50, height, ending[i].num);
# else {
# if (ending[i].type==E_CLOSED) {
# PUT4("%.1f %.1f %.1f (%d) end1\n",
# ending[i].a, ending[i].b, height, ending[i].num);
# }
# else {
# PUT4("%.1f %.1f %.1f (%d) end2\n",
# ending[i].a, ending[i].b, height, ending[i].num);
# }
# }
# }
# num_ending=0;
# }
#
# #*************************************************************************
# * calculate_tabbeam - set start/end and other parameters for beam
# * starting at sym[i0]
# *************************************************************************
# int calculate_tabbeam (int i0, struct BEAM *bm)
# {
# int j,j1,j2;
#
# j1=i0; # find first and last note in beam
# j2=-1;
# for (j=i0;j<nsym;j++)
# if (sym[j].word_end) {
# j2=j;
# break;
# }
# if (j2==-1) {
# return 0;
# }
#
# for (j=j1;j<=j2;j++) sym[j].xs=sym[j].x;
#
# bm->i1=j1; # save beam parameters in struct
# bm->i2=j2;
# bm->a=0; #beam fct: y=ax+b
# bm->b=20; #tabflag height
# bm->stem=0;
# bm->t=1.5;
# return 1;
# }
#
#
# #*************************************************************************
# * draw_tabbeams - draw the beams for one word in tablature
# * only used when tabrhstyle=grid
# *************************************************************************
# void draw_tabbeams (struct BEAM *bm, int rhstyle)
# {
# int j,j1,j2,j3,inbeam,k1,k2,num,p,r;
# int nflags,nthbeam;
# float x1,x2,xn,dx;
# float y,dy;
#
# j1=bm->i1;
# j2=bm->i2;
#
# # modernflags stems are shorter and shifted to the right
# # => adjust stemoffset dx and vertical beam distance dy
# if (rhstyle == RHMODERNBEAMS) {
# dx = 2.6; dy = 2.5;
# } else {
# dx = 0.0; dy = 4.0;
# }
#
# # draw stems
# y = get_staffheight(ivc,NETTO) + tab_flagspace(&voice[ivc].key);
# for (j=j1;j<=j2;j++) {
# if (rhstyle == RHMODERNBEAMS)
# PUT2("%.1f %.1f 0 tabmflag ", sym[j].x, y);
# else
# PUT2("%.1f %.1f 0 tabsflag ", sym[j].x, y);
# if (sym[j].dots > 0)
# PUT3("%.1f %.1f %d tabdt\n", sym[j].x+2, y, sym[j].dots);
# else
# PUT0("\n");
# }
# y += 20.0; #tabflag height
#
# # make first beam over whole word
# x1=sym[j1].x + dx;
# x2=sym[j2].x + dx;
# num=sym[j1].u;
# for (j=j1;j<=j2;j++) { # numbers for nplets on same beam
# if (sym[j].p_plet>0) {
# p=sym[j].p_plet;
# r=sym[j].r_plet;
# j3=j+r-1;
# if (j3<=j2) {
# xn=0.5*(sym[j].x+sym[j3].x);
# PUT3("%.1f %.1f (%d) bnum\n", xn, y+5, p);
# sym[j].p_plet=0;
# }
# }
# }
# PUT5("%.1f %.1f %.1f %.1f %.1f bm\n", x1,y,x2,y,bm->t);
# y = y - bm->t - dy;
#
# # loop over beams where 'nthbeam' or more flags
# for (nthbeam=2;nthbeam<5;nthbeam++) {
# k1=k2=0;
# inbeam=0;
# for (j=j1;j<=j2;j++) {
# if (sym[j].type!=NOTE) continue;
# nflags=tab_flagnumber(sym[j].len);
# if ((!inbeam) && (nflags>=nthbeam)) {
# k1=j;
# inbeam=1;
# }
# if (inbeam && ((nflags<nthbeam) || (j==j2))) {
# if ((nflags>=nthbeam) && (j==j2)) k2=j;
# x1=sym[k1].x + dx;
# x2=sym[k2].x + dx;
# inbeam=0;
# if (k1==k2) {
# if (k1==j1)
# PUT5("%.1f %.1f %.1f %.1f %.1f bm\n", x1,y,x1+BEAM_STUB,y,bm->t);
# else
# PUT5("%.1f %.1f %.1f %.1f %.1f bm\n", x1-BEAM_STUB,y,x1,y,bm->t);
# }
# else
# PUT5("%.1f %.1f %.1f %.1f %.1f bm\n", x1,y,x2,y,bm->t);;
# inbeam=0;
# }
# k2=j;
# }
# y = y - bm->t - dy;
# }
# }
#
# #*************************************************************************
# * draw_tabtimesig - draws time signature at position x
# *************************************************************************
# void draw_tabtimesig (float x, struct SYMBOL *s)
# {
# if (s->invis) return;
# if (s->w==1)
# PUT2("%.1f %d tabcsig\n", x, tab_numlines(&voice[ivc].key));
# else if (s->w==2)
# PUT2("%.1f %d tabctsig\n", x, tab_numlines(&voice[ivc].key));
# else if (s->w==3)
# PUT3("%.1f %d (%s) tabt1sig\n", x, tab_numlines(&voice[ivc].key), s->text);
# else
# # PUT4("%.1f %.1f (%d) (%d) tabtsig\n",
# x, get_staffheight(ivc,NETTO), s->u, s->v);
# PUT4("%.1f %d (%s) (%d) tabtsig\n",
# x, tab_numlines(&voice[ivc].key), s->text, s->v);
# }
#
# #*************************************************************************
# * draw_tabdeco - draws chord decoration at position x
# * gchy (vertical position of gchords) is adjusted if necessary
# *************************************************************************
# void draw_tabdeco (float x, struct SYMBOL *s, float* gchy)
# {
# int i,italian, german;
# float fingeringshift, tablineshift, line;
#
# # tablature system inverted?
# italian = ((voice[ivc].key.ktype==ITALIANTAB) ||
# (voice[ivc].key.ktype==ITALIAN7TAB) ||
# (voice[ivc].key.ktype==ITALIAN8TAB) ||
# (voice[ivc].key.ktype==ITALIAN5TAB) ||
# (voice[ivc].key.ktype==ITALIAN4TAB));
#
# # german tablature?
# german = (voice[ivc].key.ktype==GERMANTAB);
#
# # frenchtab is drawn between lines, italiantab on the lines
# # this also affects some decorations
# if ((voice[ivc].key.ktype==FRENCHTAB) ||
# (voice[ivc].key.ktype==FRENCH5TAB) ||
# (voice[ivc].key.ktype==FRENCH4TAB))
# fingeringshift=0.0;
# else if (voice[ivc].key.ktype==GERMANTAB)
# fingeringshift=1.6;
# else #spanishtab or italiantab
# fingeringshift=0.25;
#
# # german tablature draws letters at lower positions
# if (voice[ivc].key.ktype==GERMANTAB)
# tablineshift = 1.5;
# else
# tablineshift = 0.0;
#
#
#
# # decorations, applying to an entire chord
# for (i=0;i<s->dc.n;i++) {
# switch (s->dc.t[i]) {
# case D_HOLD:
# PUT2("%.1f %.1f hld\n", x,
# get_staffheight(ivc,NETTO) + tab_flagspace(&voice[ivc].key));
# break;
# case D_INDEX:
# if (!italian)
# PUT2("%.1f %.2f tabi\n", x,
# fingeringshift+tabline(lowest_course(s)));
# else
# PUT2("%.1f %.2f tabi\n", x,
# fingeringshift+tabline(highest_course(s)));
# break;
# case D_MEDIUS:
# if (!italian)
# PUT2("%.1f %.2f tabm\n", x,
# fingeringshift+tabline(lowest_course(s)));
# else
# PUT2("%.1f %.2f tabm\n", x,
# fingeringshift+tabline(highest_course(s)));
# break;
# case D_ANNULARIUS:
# if (!italian)
# PUT2("%.1f %.2f taba\n", x,
# fingeringshift+tabline(lowest_course(s)));
# else
# PUT2("%.1f %.2f taba\n", x,
# fingeringshift+tabline(highest_course(s)));
# break;
# case D_POLLIX:
# if (voice[ivc].key.ktype==SPANISHTAB)
# PUT2("%.1f %.2f tabpguitar\n", x,
# fingeringshift+tabline(lowest_course(s)));
# else
# if (!italian)
# PUT2("%.1f %.2f tabp\n", x,
# fingeringshift+tabline(lowest_course(s)));
# else
# PUT2("%.1f %.2f tabp\n", x,
# fingeringshift+tabline(highest_course(s)));
# break;
# case D_TABACC:
# PUT2("%.1f %.1f tabacc\n", x+1+(float)tabfont.size/2.0,
# tablineshift+tabline(highest_course(s)));
# break;
# case D_TABX:
# PUT2("%.1f %.1f tabx\n", x+1+(float)tabfont.size/2.0,
# tablineshift+tabline(highest_course(s)));
# break;
# case D_TABU:
# PUT2("%.1f %.1f tabu\n", x+1+(float)tabfont.size/2.0,
# tablineshift+tabline(highest_course(s)));
# break;
# case D_TABV:
# PUT2("%.1f %.2f tabv\n", x,
# 0.125+fingeringshift+tabline(highest_course(s)));
# break;
# case D_TABSTAR:
# PUT2("%.1f %.1f tabstar\n", x+1+(float)tabfont.size/2.0,
# tablineshift+tabline(highest_course(s)));
# break;
# case D_TABCROSS:
# PUT2("%.1f %.1f tabcross\n", x+1+(float)tabfont.size/2.0,
# tablineshift+tabline(highest_course(s)));
# break;
# case D_TABOLINE:
# if (italian)
# std::cerr << "Warning: decoration 'L' (oblique line) ignored in italiantab" << std::endl;
# else if (german)
# std::cerr << "Warning: decoration 'L' (oblique line) ignored in germantab" << std::endl;
# else
# PUT2("%.1f %.2f taboline\n", x,
# tablineshift+fingeringshift+tabline(lowest_course(s)));
# break;
# case D_TABTRILL:
# if (voice[ivc].key.ktype==FRENCHTAB) {
# #replace with accent in frenchtab
# PUT2("%.1f %d tabacc\n", x+1+(float)tabfont.size/2.0,
# tabline(highest_course(s)));
# } else {
# PUT2("%.1f %.1f tabtrl\n", x, *gchy-12);
# *gchy-=18;
# }
# break;
# case D_STRUMUP:
# if (italian) {
# PUT2("%.1f %.1f tabstrdn\n", x, *gchy);
# } else {
# PUT2("%.1f %.1f tabstrup\n", x, *gchy);
# }
# *gchy-=14;
# break;
# case D_STRUMDOWN:
# if (italian) {
# PUT2("%.1f %.1f tabstrup\n", x, *gchy);
# } else {
# PUT2("%.1f %.1f tabstrdn\n", x, *gchy);
# }
# *gchy-=14;
# break;
# case D_SEGNO:
# PUT2("%.1f %.1f sgno\n", x, *gchy-20);
# *gchy-=22;
# break;
# case D_CODA:
# PUT2("%.1f %.1f coda\n", x, *gchy-20);
# *gchy-=22;
# break;
# case | |
<reponame>louisVottero/vtool<filename>python/vtool/maya_lib/corrective.py
# Copyright (C) 2022 <NAME> <EMAIL> All rights reserved.
from __future__ import absolute_import
import string
from .. import util
if util.is_in_maya():
import maya.cmds as cmds
from . import anim
from . import core
from . import blendshape
from . import attr
from . import space
from . import geo
from . import deform
from . import shade
from . import rigs_util
def get_pose_instance(pose_name, pose_group = 'pose_gr'):
"""
Get a pose instance from the pose name.
Args:
pose_name (str): The name of a pose.
Returns:
object: The instance of the pose at the pose type.
"""
if not cmds.objExists(pose_name):
return
if cmds.objExists('%s.type' % pose_name):
pose_type = cmds.getAttr('%s.type' % pose_name)
if not cmds.objExists('%s.type' % pose_name):
pose_type = 'cone'
pose = corrective_type[pose_type]()
pose.set_pose_group(pose_group)
pose.set_pose(pose_name)
return pose
class PoseManager(object):
"""
Convenience for working with poses.
"""
def __init__(self):
self.poses = []
self._namespace = None
self.pose_group = 'pose_gr'
self.detached_attributes = {}
self.sub_detached_dict = {}
def _check_pose_group(self):
if not self.pose_group:
return
if not cmds.objExists(self.pose_group):
selection = cmds.ls(sl = True)
self.pose_group = cmds.group(em = True, n = self.pose_group)
data = rigs_util.StoreControlData(self.pose_group)
data.set_data()
if selection:
cmds.select(selection)
def is_pose(self, name):
"""
Check if name matches the name of a pose.
Args:
name (str): Check if the node at name is a pose.
Returns:
bool
"""
if PoseBase().is_a_pose(name):
return True
return False
def is_pose_mesh_in_sculpt(self, index, pose_name):
pose = self.get_pose_instance(pose_name)
if hasattr(pose, 'is_mesh_visibile'):
pose.is_mesh_in_sculpt(index)
def get_pose_instance(self, pose_name):
"""
Get the instance of a pose.
Args:
pose_name (str): The name of a pose.
Returns:
object: The instance of the pose at the pose type.
"""
namespace = core.get_namespace(self.pose_group)
if namespace:
if not pose_name.startswith(namespace):
pose_name = '%s:%s' % (namespace, pose_name)
pose = get_pose_instance(pose_name, self.pose_group)
return pose
def get_poses(self, all_descendents = False):
"""
Get the poses under the pose_gr
Returns:
list: The names of poses.
"""
self._check_pose_group()
if not self.pose_group:
return
namespace = core.get_namespace(self.pose_group)
relatives = cmds.listRelatives(self.pose_group, ad = all_descendents)
poses = []
if not relatives:
return
end_poses = []
for relative in relatives:
if self.is_pose(relative):
#this is because in some cases cmds.listRelatives was not returning namespace. Possibly a refresh issue.
if namespace:
if not relative.startswith(namespace):
relative = '%s:%s' % (namespace, relative)
pose_type = cmds.getAttr('%s.type' % relative)
if pose_type == 'combo':
end_poses.append(relative)
else:
poses.append(relative)
if end_poses:
poses = poses + end_poses
return poses
def get_pose_control(self, name):
"""
Get the control of a pose.
Args:
name (str): The name of a pose.
Returns:
str: The name of the pose.
"""
pose = self.get_pose_instance(name)
control = pose.pose_control
return control
def get_pose_type(self, name):
pose = self.get_pose_instance(name)
pose_type = pose.get_type()
return pose_type
def set_namespace(self, namespace):
self._namespace = namespace
pose_group = '%s:%s' % (namespace, 'pose_gr')
if not cmds.objExists(pose_group):
self.pose_group = cmds.rename( self.pose_group, pose_group )
else:
self.pose_group = pose_group
#self.pose_group = cmds.rename( self.pose_group, '%s:%s' % (namespace, self.pose_group))
rels = cmds.listRelatives(self.pose_group, ad = True)
for rel in rels:
nicename = core.get_basename(rel, remove_namespace = True)
pose_name = '%s:%s' % (self._namespace, nicename)
if not cmds.objExists(pose_name):
cmds.rename(rel, '%s:%s' % (self._namespace, nicename))
#cmds.refresh()
def set_pose_group(self, pose_gr_name):
"""
Set the pose group to work with.
Args:
pose_gr_name (str): The name of a pose group.
"""
self.pose_group = pose_gr_name
def set_weights_to_zero(self):
"""
Set all poses in the pose_gr to zero.
"""
poses = self.get_poses()
if not poses:
return
for pose_name in poses:
input_value = attr.get_attribute_input('%s.weight' % pose_name)
if not input_value:
if cmds.objExists('%s.weight' % pose_name):
cmds.setAttr('%s.weight' % pose_name, 0)
def set_default_pose(self):
"""
Set the default control pose. This is the control pose the rig should revert to by default.
"""
self._check_pose_group()
store = rigs_util.StoreControlData(self.pose_group)
store.set_data()
def set_pose_to_default(self):
"""
Set the control pose to the default pose.
This is handy for resetting control positions after going to a pose.
"""
self._check_pose_group()
store = rigs_util.StoreControlData(self.pose_group)
if self._namespace:
store.set_namesapce(self._namespace)
store.eval_data()
self.set_weights_to_zero()
def set_pose(self, pose):
"""
Set the control pose to the current pose.
This is handy for returning a character to the pose it was sculpted in.
Args:
pose (str): The name of a pose.
"""
pose_instance = self.get_pose_instance(pose)
if pose_instance:
pose_instance.goto_pose()
else:
util.warning('%s not found' % pose)
def set_pose_data(self, pose):
"""
Set the pose data from the control values.
This is handy for making sure a character can get back into pose before sculpting it.
Args:
pose (str): The name of a pose.
"""
store = rigs_util.StoreControlData(pose)
store.set_data()
def set_poses(self, pose_list):
"""
Not in use. This was the beginning of a combo system.
It proved difficult to extrapulate a combo pose from multiple poses.
Args:
pose_list (list): A list of pose names.
"""
data_list = []
for pose_name in pose_list:
store = rigs_util.StoreControlData(pose_name)
data_list.append( store.eval_data(True) )
store = rigs_util.StoreControlData().eval_multi_transform_data(data_list)
def create_pose(self, pose_type, name = None):
"""
Create a pose.
Args:
pose_type (str): The name of a pose type.
name (str): The name for the pose.
Returns:
str: The name of the new pose.
"""
pose = None
self._check_pose_group()
if pose_type == 'cone':
pose = self.create_cone_pose(name)
if pose_type == 'no reader':
pose = self.create_no_reader_pose(name)
if pose_type == 'combo':
pose = self.create_combo_pose(name)
if pose_type == 'timeline':
pose = self.create_timeline_pose(name)
if pose_type == 'group':
pose = self.create_group_pose(name)
return pose
@core.undo_chunk
def create_cone_pose(self, name = None):
"""
Create a cone pose.
Args:
name (str): The name for the pose.
Returns:
str: The name of the pose.
"""
selection = cmds.ls(sl = True, l = True)
if not selection:
return
if not cmds.nodeType(selection[0]) == 'joint' or not len(selection):
return
if not name:
joint = selection[0].split('|')
joint = joint[-1]
name = 'pose_%s' % joint
pose = PoseCone(selection[0], name)
pose.set_pose_group(self.pose_group)
pose_control = pose.create()
self.pose_control = pose_control
return pose_control
@core.undo_chunk
def create_no_reader_pose(self, name = None):
"""
Create a no reader pose.
Args:
name (str): The name for the pose.
Returns:
str: The name of the pose.
"""
if not name:
name = core.inc_name('pose_no_reader_1')
pose = PoseNoReader(name)
pose.set_pose_group(self.pose_group)
pose_control = pose.create()
self.pose_control = pose_control
return pose_control
def create_combo_pose(self, name = None):
"""
Create a combo pose.
Args:
name (str): The name for the pose.
Returns:
str: The name of the pose.
"""
if not name:
name = core.inc_name('pose_combo_1')
pose = PoseCombo(name)
pose.set_pose_group(self.pose_group)
pose_control = pose.create()
self.pose_control = pose_control
return pose_control
@core.undo_chunk
def create_timeline_pose(self, name = None):
"""
Create a no timeline pose.
Args:
name (str): The name for the pose.
Returns:
str: The name of the pose.
"""
current_time = str(cmds.currentTime(q = True))
time_number_strings = current_time.split('.')
seconds_name = time_number_strings[0]
sub_seconds_name = time_number_strings[1]
time_name = seconds_name.rjust(4, '0') + '_' + sub_seconds_name.rjust(2, '0')
if not name:
name = core.inc_name('pose_timeline_%s_1' % time_name)
pose = PoseTimeline(name)
pose.set_pose_group(self.pose_group)
pose_control = pose.create()
self.pose_control = pose_control
return pose_control
def create_group_pose(self, name = None):
"""
Create a group pose.
Args:
name (str): The name for the pose.
Returns:
str: The name of the pose.
"""
if not name:
name = core.inc_name('pose_group_1')
pose = PoseGroup(name)
pose.set_pose_group(self.pose_group)
pose_control = pose.create()
self.pose_control = pose_control
return pose_control
@core.undo_chunk
def reset_pose(self, pose_name):
pose = self.get_pose_instance(pose_name)
pose.reset_target_meshes()
@core.undo_chunk
def update_pose_meshes(self, pose_name, only_not_in_sculpt = False):
pose = self.get_pose_instance(pose_name)
pose.update_target_meshes(only_not_in_sculpt)
@core.undo_chunk
def update_pose(self, pose_name):
control = self.get_pose_control(pose_name)
self.set_pose_data(control)
instance = self.get_pose_instance(pose_name)
if hasattr(instance, 'rematch_cone_to_joint'):
instance.goto_pose()
instance.rematch_cone_to_joint()
@core.undo_chunk
def revert_pose_vertex(self, pose_name):
instance = self.get_pose_instance(pose_name)
instance.revert_selected_verts()
@core.undo_chunk
def rename_pose(self, pose_name, new_name):
pose | |
from __future__ import annotations
__all__ = (
"ResourceEvent",
"ResourceConflict",
"ResourceNotFound",
"NoCurrentContext",
"TeardownError",
"Context",
"executor",
"context_teardown",
"current_context",
"get_resource",
"get_resources",
"require_resource",
"_Dependency",
"inject",
"resource",
)
import logging
import re
import sys
import types
import warnings
from asyncio import (
AbstractEventLoop,
Future,
current_task,
get_event_loop,
get_running_loop,
iscoroutinefunction,
)
from collections.abc import Coroutine
from collections.abc import Sequence as ABCSequence
from concurrent.futures import Executor
from contextvars import ContextVar, Token, copy_context
from dataclasses import dataclass, field
from enum import Enum, auto
from functools import partial, wraps
from inspect import (
Parameter,
getattr_static,
isasyncgenfunction,
isawaitable,
isclass,
signature,
)
from traceback import format_exception
from types import TracebackType
from typing import (
Any,
AsyncGenerator,
Awaitable,
Callable,
Dict,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
get_type_hints,
overload,
)
import asyncio_extras
from async_generator import async_generator
from typeguard import check_argument_types
from asphalt.core.event import Event, Signal, wait_event
from asphalt.core.utils import callable_name, qualified_name
if sys.version_info >= (3, 10):
from typing import ParamSpec
else:
from typing_extensions import ParamSpec
if sys.version_info >= (3, 8):
from typing import get_args, get_origin
else:
from typing_extensions import get_args, get_origin
logger = logging.getLogger(__name__)
factory_callback_type = Callable[["Context"], Any]
resource_name_re = re.compile(r"\w+")
T_Resource = TypeVar("T_Resource")
T_Retval = TypeVar("T_Retval")
T_Context = TypeVar("T_Context", bound="Context")
T_Self = TypeVar("T_Self")
P = ParamSpec("P")
_current_context: ContextVar[Context | None] = ContextVar(
"_current_context", default=None
)
class ResourceContainer:
"""
Contains the resource value or its factory callable, plus some metadata.
:ivar value_or_factory: the resource value or the factory callback
:ivar types: type names the resource was registered with
:vartype types: Tuple[type, ...]
:ivar str name: name of the resource
:ivar str context_attr: the context attribute of the resource
:ivar bool is_factory: ``True`` if ``value_or_factory`` if this is a resource factory
"""
__slots__ = "value_or_factory", "types", "name", "context_attr", "is_factory"
def __init__(
self,
value_or_factory: Any,
types: Tuple[type, ...],
name: str,
context_attr: Optional[str],
is_factory: bool,
) -> None:
self.value_or_factory = value_or_factory
self.types = types
self.name = name
self.context_attr = context_attr
self.is_factory = is_factory
def generate_value(self, ctx: Context) -> Any:
assert self.is_factory, "generate_value() only works for resource factories"
value = self.value_or_factory(ctx)
container = ResourceContainer(
value, self.types, self.name, self.context_attr, False
)
for type_ in self.types:
ctx._resources[(type_, self.name)] = container
if self.context_attr:
setattr(ctx, self.context_attr, value)
return value
def __repr__(self) -> str:
typenames = ", ".join(qualified_name(cls) for cls in self.types)
value_repr = (
"factory=%s" % callable_name(self.value_or_factory)
if self.is_factory
else "value=%r" % self.value_or_factory
)
return (
"{self.__class__.__name__}({value_repr}, types=[{typenames}], name={self.name!r}, "
"context_attr={self.context_attr!r})".format(
self=self, value_repr=value_repr, typenames=typenames
)
)
class ResourceEvent(Event):
"""
Dispatched when a resource or resource factory has been added to a context.
:ivar resource_types: types the resource was registered under
:vartype resource_types: Tuple[type, ...]
:ivar str name: name of the resource
:ivar bool is_factory: ``True`` if a resource factory was added, ``False`` if a regular
resource was added
"""
__slots__ = "resource_types", "resource_name", "is_factory"
def __init__(
self,
source: Context,
topic: str,
types: Tuple[type, ...],
name: str,
is_factory: bool,
) -> None:
super().__init__(source, topic)
self.resource_types = types
self.resource_name = name
self.is_factory = is_factory
class ResourceConflict(Exception):
"""
Raised when a new resource that is being published conflicts with an existing resource or
context variable.
"""
class ResourceNotFound(LookupError):
"""Raised when a resource request cannot be fulfilled within the allotted time."""
def __init__(self, type: type, name: str) -> None:
super().__init__(type, name)
self.type = type
self.name = name
def __str__(self):
return "no matching resource was found for type={typename} name={self.name!r}".format(
self=self, typename=qualified_name(self.type)
)
class TeardownError(Exception):
"""
Raised after context teardown when one or more teardown callbacks raised an exception.
:ivar exceptions: exceptions raised during context teardown, in the order in which they were
raised
:vartype exceptions: List[Exception]
"""
def __init__(self, exceptions: List[Exception]) -> None:
super().__init__(exceptions)
self.exceptions = exceptions
def __str__(self):
separator = "----------------------------\n"
tracebacks = separator.join(
"\n".join(format_exception(type(exc), exc, exc.__traceback__))
for exc in self.exceptions
)
return "{} exceptions(s) were raised by teardown callbacks:\n{}{}".format(
len(self.exceptions), separator, tracebacks
)
class NoCurrentContext(Exception):
"""Raised by :func: `current_context` when there is no active context."""
def __init__(self) -> None:
super().__init__("There is no active context")
class ContextState(Enum):
open = auto()
closing = auto()
closed = auto()
class Context:
"""
Contexts give request handlers and callbacks access to resources.
Contexts are stacked in a way that accessing an attribute that is not present in the current
context causes the attribute to be looked up in the parent instance and so on, until the
attribute is found (or :class:`AttributeError` is raised).
:param parent: the parent context, if any
:ivar Context parent: the parent context, if any
:var Signal resource_added: a signal (:class:`ResourceEvent`) dispatched when a resource
has been published in this context
"""
resource_added = Signal(ResourceEvent)
_loop: AbstractEventLoop | None = None
_reset_token: Token
def __init__(self, parent: Optional[Context] = None) -> None:
assert check_argument_types()
if parent is None:
self._parent = _current_context.get(None)
else:
warnings.warn(
"Explicitly passing the parent context has been deprecated. "
"The context stack is now tracked by the means of PEP 555 context "
"variables.",
DeprecationWarning,
stacklevel=2,
)
self._parent = parent
self._state = ContextState.open
self._resources: Dict[Tuple[type, str], ResourceContainer] = {}
self._resource_factories: Dict[Tuple[type, str], ResourceContainer] = {}
self._resource_factories_by_context_attr: Dict[str, ResourceContainer] = {}
self._teardown_callbacks: List[Tuple[Callable, bool]] = []
def __getattr__(self, name):
# First look for a resource factory in the whole context chain
for ctx in self.context_chain:
factory = ctx._resource_factories_by_context_attr.get(name)
if factory:
return factory.generate_value(self)
# When that fails, look directly for an attribute in the parents
for ctx in self.context_chain[1:]:
value = getattr_static(ctx, name, None)
if value is not None:
return getattr(ctx, name)
raise AttributeError(f"no such context variable: {name}")
@property
def context_chain(self) -> List[Context]:
"""Return a list of contexts starting from this one, its parent and so on."""
contexts = []
ctx: Optional[Context] = self
while ctx is not None:
contexts.append(ctx)
ctx = ctx.parent
return contexts
@property
def loop(self) -> AbstractEventLoop:
"""Return the event loop associated with this context."""
if self._loop is None:
self._loop = get_running_loop()
return self._loop
@property
def parent(self) -> Optional[Context]:
"""Return the parent context, or ``None`` if there is no parent."""
return self._parent
@property
def closed(self) -> bool:
"""
Return ``True`` if the teardown process has at least been initiated, ``False``
otherwise.
"""
return self._state is not ContextState.open
def _check_closed(self) -> None:
if self._state is ContextState.closed:
raise RuntimeError("this context has already been closed")
def add_teardown_callback(
self, callback: Callable, pass_exception: bool = False
) -> None:
"""
Add a callback to be called when this context closes.
This is intended for cleanup of resources, and the list of callbacks is processed in the
reverse order in which they were added, so the last added callback will be called first.
The callback may return an awaitable. If it does, the awaitable is awaited on before
calling any further callbacks.
:param callback: a callable that is called with either no arguments or with the exception
that ended this context, based on the value of ``pass_exception``
:param pass_exception: ``True`` to pass the callback the exception that ended this context
(or ``None`` if the context ended cleanly)
"""
assert check_argument_types()
self._check_closed()
self._teardown_callbacks.append((callback, pass_exception))
async def close(self, exception: BaseException | None = None) -> None:
"""
Close this context and call any necessary resource teardown callbacks.
If a teardown callback returns an awaitable, the return value is awaited on before calling
any further teardown callbacks.
All callbacks will be processed, even if some of them raise exceptions. If at least one
callback raised an error, this method will raise a :exc:`~.TeardownError` at the end.
After this method has been called, resources can no longer be requested or published on
this context.
:param exception: the exception, if any, that caused this context to be closed
:raises .TeardownError: if one or more teardown callbacks raise an exception
"""
self._check_closed()
if self._state is ContextState.closing:
raise RuntimeError("this context is already closing")
self._state = ContextState.closing
try:
exceptions = []
while self._teardown_callbacks:
callbacks, self._teardown_callbacks = self._teardown_callbacks, []
for callback, pass_exception in reversed(callbacks):
try:
retval = callback(exception) if pass_exception else callback()
if isawaitable(retval):
await retval
except Exception as e:
exceptions.append(e)
if exceptions:
raise TeardownError(exceptions)
finally:
self._state = ContextState.closed
def __enter__(self):
warnings.warn(
"Using Context as a synchronous context manager has been deprecated",
DeprecationWarning,
)
self._check_closed()
if self._loop is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self._loop = get_event_loop()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.loop.run_until_complete(self.close(exc_val))
async def __aenter__(self) -> Context:
self._check_closed()
if self._loop is None:
self._loop | |
path(self, *paths):
"""Convert build path to msvc path and prepend root"""
return self.rootpath + msvc_path_join(*list(map(msvc_path, paths)))
def apath(self, path, *paths):
"""Convert build path to msvc path and prepend root if not absolute"""
### On Unix, os.path.isabs won't do the right thing if "item"
### contains backslashes or drive letters
if os.path.isabs(path):
return msvc_path_join(msvc_path(path), *list(map(msvc_path, paths)))
else:
return self.rootpath + msvc_path_join(msvc_path(path),
*list(map(msvc_path, paths)))
def get_install_targets(self):
"Generate the list of targets"
# Get list of targets to generate project files for
install_targets = self.graph.get_all_sources(gen_base.DT_INSTALL) \
+ self.projects
# Don't create projects for scripts
install_targets = [x for x in install_targets if not isinstance(x, gen_base.TargetScript)]
# Drop the libsvn_fs_base target and tests if we don't have BDB
if not self.bdb_lib:
install_targets = [x for x in install_targets if x.name != 'libsvn_fs_base']
install_targets = [x for x in install_targets if not (isinstance(x, gen_base.TargetExe)
and x.install == 'bdb-test')]
# Don't build serf when we don't have it or for 1.3+
if not self.serf_lib or (self.serf_ver_maj, self.serf_ver_min) >= (1, 3):
install_targets = [x for x in install_targets if x.name != 'serf']
# Drop the serf target if we don't have both serf and openssl
if not self.serf_lib:
install_targets = [x for x in install_targets if x.name != 'libsvn_ra_serf']
# Don't build zlib if we have an already compiled serf
if self.serf_lib and (self.serf_ver_maj, self.serf_ver_min) >= (1, 3):
install_targets = [x for x in install_targets if x.name != 'zlib']
# Drop the swig targets if we don't have swig
if not self.swig_path and not self.swig_libdir:
install_targets = [x for x in install_targets
if not (isinstance(x, gen_base.TargetSWIG)
or isinstance(x, gen_base.TargetSWIGLib)
or isinstance(x, gen_base.TargetSWIGProject))]
# Drop the Java targets if we don't have a JDK
if not self.jdk_path:
install_targets = [x for x in install_targets
if not (isinstance(x, gen_base.TargetJava)
or isinstance(x, gen_base.TargetJavaHeaders)
or x.name == '__JAVAHL__'
or x.name == '__JAVAHL_TESTS__'
or x.name == 'libsvnjavahl')]
dll_targets = []
for target in install_targets:
if isinstance(target, gen_base.TargetLib):
if target.msvc_fake:
install_targets.append(self.create_fake_target(target))
if target.msvc_export:
if self.disable_shared:
target.msvc_static = True
else:
dll_targets.append(self.create_dll_target(target))
install_targets.extend(dll_targets)
for target in install_targets:
target.project_guid = self.makeguid(target.name)
# sort these for output stability, to watch out for regressions.
install_targets.sort(key = lambda t: t.name)
return install_targets
def create_fake_target(self, dep):
"Return a new target which depends on another target but builds nothing"
section = gen_base.TargetProject.Section(gen_base.TargetProject,
dep.name + "_fake",
{'path': 'build/win32'}, self)
section.create_targets()
section.target.msvc_name = dep.msvc_name and dep.msvc_name + "_fake"
self.graph.add(gen_base.DT_LINK, section.target.name, dep)
dep.msvc_fake = section.target
return section.target
def create_dll_target(self, dep):
"Return a dynamic library that depends on a static library"
target = gen_base.TargetLib(dep.name,
{ 'path' : dep.path,
'msvc-name' : dep.name + "_dll" },
self)
target.msvc_export = dep.msvc_export
# move the description from the static library target to the dll.
target.desc = dep.desc
dep.desc = None
# The dependency should now be static.
dep.msvc_export = None
dep.msvc_static = True
# Remove the 'lib' prefix, so that the static library will be called
# svn_foo.lib
dep.name = dep.name[3:]
# However, its name should still be 'libsvn_foo' in Visual Studio
dep.msvc_name = target.name
# We renamed dep, so right now it has no dependencies. Because target has
# dep's old dependencies, transfer them over to dep.
deps = self.graph.deps[gen_base.DT_LINK]
deps[dep.name] = deps[target.name]
for key in deps.keys():
# Link everything except tests against the dll. Tests need to be linked
# against the static libraries because they sometimes access internal
# library functions.
# ### The magic behavior for 'test' in a name and 'entries-dump' should
# ### move to another option in build.conf
if dep in deps[key] and key.find("test") == -1 and key != 'entries-dump':
deps[key].remove(dep)
deps[key].append(target)
# The dll has exactly one dependency, the static library.
deps[target.name] = [ dep ]
return target
def get_configs(self, target):
"Get the list of configurations for the project"
configs = [ ]
for cfg in self.configs:
configs.append(
ProjectItem(name=cfg,
lower=cfg.lower(),
defines=self.get_win_defines(target, cfg),
libdirs=self.get_win_lib_dirs(target, cfg),
libs=self.get_win_libs(target, cfg),
))
return configs
def get_proj_sources(self, quote_path, target):
"Get the list of source files for each project"
sources = [ ]
javac_exe = "javac"
javah_exe = "javah"
jar_exe = "jar"
if self.jdk_path:
javac_exe = os.path.join(self.jdk_path, "bin", javac_exe)
javah_exe = os.path.join(self.jdk_path, "bin", javah_exe)
jar_exe = os.path.join(self.jdk_path, "bin", jar_exe)
if not isinstance(target, gen_base.TargetProject):
for source, object, reldir in self.get_win_sources(target):
cbuild = None
ctarget = None
cdesc = None
if isinstance(target, gen_base.TargetJavaHeaders):
classes = self.path(target.classes)
if self.junit_path is not None:
classes = "%s;%s" % (classes, self.junit_path)
headers = self.path(target.headers)
classname = target.package + "." + source.class_name
cbuild = "%s -verbose -force -classpath %s -d %s %s" \
% (self.quote(javah_exe), self.quote(classes),
self.quote(headers), classname)
ctarget = self.path(object.filename_win)
cdesc = "Generating %s" % (object.filename_win)
elif isinstance(target, gen_base.TargetJavaClasses):
classes = targetdir = self.path(target.classes)
if self.junit_path is not None:
classes = "%s;%s" % (classes, self.junit_path)
sourcepath = self.path(source.sourcepath)
cbuild = "%s -g -target 1.5 -source 1.5 -classpath %s -d %s " \
"-sourcepath %s $(InputPath)" \
% tuple(map(self.quote, (javac_exe, classes,
targetdir, sourcepath)))
ctarget = self.path(object.filename)
cdesc = "Compiling %s" % (source)
rsrc = self.path(str(source))
if quote_path and '-' in rsrc:
rsrc = '"%s"' % rsrc
sources.append(ProjectItem(path=rsrc, reldir=reldir, user_deps=[],
custom_build=cbuild, custom_target=ctarget,
custom_desc=cdesc,
extension=os.path.splitext(rsrc)[1]))
if isinstance(target, gen_base.TargetJavaClasses) and target.jar:
classdir = self.path(target.classes)
jarfile = msvc_path_join(classdir, target.jar)
cbuild = "%s cf %s -C %s %s" \
% (self.quote(jar_exe), jarfile, classdir,
" ".join(target.packages))
deps = [x.custom_target for x in sources]
sources.append(ProjectItem(path='makejar', reldir='', user_deps=deps,
custom_build=cbuild, custom_target=jarfile,
extension=''))
if isinstance(target, gen_base.TargetSWIG):
swig_options = self.swig.opts[target.lang].split()
swig_options.append('-DWIN32')
swig_deps = []
for include_dir in self.get_win_includes(target):
swig_options.append("-I%s" % self.quote(include_dir))
for obj in self.graph.get_sources(gen_base.DT_LINK, target.name):
if isinstance(obj, gen_base.SWIGObject):
for cobj in self.graph.get_sources(gen_base.DT_OBJECT, obj):
if isinstance(cobj, gen_base.SWIGObject):
csrc = self.path(cobj.filename)
cout = csrc
# included header files that the generated c file depends on
user_deps = swig_deps[:]
for iobj in self.graph.get_sources(gen_base.DT_SWIG_C, cobj):
isrc = self.path(str(iobj))
if not isinstance(iobj, gen_base.SWIGSource):
user_deps.append(isrc)
continue
cbuild = '%s %s -o %s $(InputPath)' \
% (self.swig_exe, " ".join(swig_options), cout)
cdesc = 'Generating %s' % cout
sources.append(ProjectItem(path=isrc, reldir=None,
custom_build=cbuild,
custom_target=csrc,
custom_desc=cdesc,
user_deps=user_deps,
extension=''))
def_file = self.get_def_file(target)
if def_file is not None:
gsrc = self.path("build/generator/extractor.py")
deps = [self.path('build.conf')]
for header in target.msvc_export:
deps.append(self.path('subversion/include', header))
cbuild = "%s $(InputPath) %s > %s" \
% (self.quote(sys.executable), " ".join(deps), def_file)
cdesc = 'Generating %s ' % def_file
sources.append(ProjectItem(path=gsrc, reldir=None,
custom_build=cbuild,
custom_target=def_file,
custom_desc=cdesc,
user_deps=deps,
extension=''))
sources.append(ProjectItem(path=def_file, reldir=None,
custom_build=None, user_deps=[],
extension=''))
sources.sort(key = lambda x: x.path)
return sources
def get_output_name(self, target):
if isinstance(target, gen_base.TargetExe):
return target.name + '.exe'
elif isinstance(target, gen_base.TargetJava):
### This target file is not actually built, but we need it to keep
### the VC Express build happy.
return target.name
elif isinstance(target, gen_base.TargetApacheMod):
return target.name + '.so'
elif isinstance(target, gen_base.TargetLib):
if target.msvc_static:
return '%s-%d.lib' % (target.name, self.version)
else:
return os.path.basename(target.filename)
elif isinstance(target, gen_base.TargetProject):
### Since this target type doesn't produce any output, we shouldn't
### need to specify an output filename. But to keep the VC.NET template
### happy for now we have to return something
return target.name + '.exe'
elif isinstance(target, gen_base.TargetI18N):
return target.name
def get_output_pdb(self, target):
name = self.get_output_name(target)
name = os.path.splitext(name)
return name[0] + '.pdb'
def get_output_dir(self, target):
if isinstance(target, gen_base.TargetJavaHeaders):
return msvc_path("../" + target.headers)
elif isinstance(target, gen_base.TargetJavaClasses):
return msvc_path("../" + target.classes)
else:
return msvc_path(target.path)
def get_intermediate_dir(self, target):
if isinstance(target, gen_base.TargetSWIG):
return msvc_path_join(msvc_path(target.path), target.name)
else:
return self.get_output_dir(target)
def get_def_file(self, target):
if isinstance(target, gen_base.TargetLib) and target.msvc_export \
and not self.disable_shared:
return target.name + ".def"
return None
def gen_proj_names(self, install_targets):
"Generate project file names for the targets"
# Generate project file names for the targets: replace dashes with
# underscores and replace *-test with test_* (so that the test
# programs are visually separare from the rest of the projects)
for target in install_targets:
if target.msvc_name:
target.proj_name = target.msvc_name
continue
name = target.name
pos = name.find('-test')
if pos >= 0:
proj_name = 'test_' + name[:pos].replace('-', '_')
elif isinstance(target, gen_base.TargetSWIG):
proj_name = 'swig_' + name.replace('-', '_')
else:
proj_name = name.replace('-', '_')
target.proj_name = proj_name
def get_external_project(self, target, proj_ext):
if not ((isinstance(target, gen_base.TargetLinked)
or isinstance(target, gen_base.TargetI18N))
and target.external_project):
return None
if target.external_project[:5] == 'serf/' | |
<reponame>paulguerrero/points2surf<gh_stars>100-1000
import os
import os.path
import sys
import torch
import torch.utils.data as data
import numpy as np
import scipy.spatial as spatial
import trimesh
from source.base import mesh_io
from source.base import utils
from source.base import file_utils
from source import sdf
def load_shape(point_filename, imp_surf_query_filename, imp_surf_dist_filename,
query_grid_resolution=None, epsilon=None):
"""
do NOT modify the returned points! kdtree uses a reference, not a copy of these points,
so modifying the points would make the kdtree give incorrect results
:param point_filename:
:param imp_surf_query_filename:
:param imp_surf_dist_filename:
:param query_grid_resolution: if not None: create query points at grid of this resolution
:param epsilon:
:return:
"""
mmap_mode = None
# mmap_mode = 'r'
pts_np = np.load(point_filename + '.npy', mmap_mode=mmap_mode)
if pts_np.shape[1] > 3:
pts_np = pts_np[:, 0:3]
if pts_np.dtype != np.float32:
print('Warning: pts_np must be converted to float32: {}'.format(point_filename))
pts_np = pts_np.astype(np.float32)
# otherwise KDTree construction may run out of recursions
leaf_size = 1000
sys.setrecursionlimit(int(max(1000, round(pts_np.shape[0]/leaf_size))))
kdtree = spatial.cKDTree(pts_np, leaf_size)
if imp_surf_dist_filename is not None:
imp_surf_dist_ms = np.load(imp_surf_dist_filename, mmap_mode=mmap_mode)
if imp_surf_dist_ms.dtype != np.float32:
print('Warning: imp_surf_dist_ms must be converted to float32')
imp_surf_dist_ms = imp_surf_dist_ms.astype(np.float32)
else:
imp_surf_dist_ms = None
if imp_surf_query_filename is not None:
imp_surf_query_point_ms = np.load(imp_surf_query_filename, mmap_mode=mmap_mode)
if imp_surf_query_point_ms.dtype != np.float32:
print('Warning: imp_surf_query_point_ms must be converted to float32')
imp_surf_query_point_ms = imp_surf_query_point_ms.astype(np.float32)
elif query_grid_resolution is not None:
# get query points near at grid nodes near the point cloud
# get patches for those query points
imp_surf_query_point_ms = sdf.get_voxel_centers_grid_smaller_pc(
pts=pts_np, grid_resolution=query_grid_resolution,
distance_threshold_vs=epsilon)
else:
imp_surf_query_point_ms = None
return Shape(
pts=pts_np, kdtree=kdtree,
imp_surf_query_point_ms=imp_surf_query_point_ms, imp_surf_dist_ms=imp_surf_dist_ms)
class SequentialPointcloudPatchSampler(data.sampler.Sampler):
def __init__(self, data_source):
self.data_source = data_source
self.total_patch_count = None
self.total_patch_count = 0
for shape_ind, _ in enumerate(self.data_source.shape_names):
self.total_patch_count = self.total_patch_count + self.data_source.shape_patch_count[shape_ind]
def __iter__(self):
return iter(range(self.total_patch_count))
def __len__(self):
return self.total_patch_count
class SequentialShapeRandomPointcloudPatchSampler(data.sampler.Sampler):
def __init__(self, data_source, patches_per_shape, seed=None, sequential_shapes=False, identical_epochs=False):
self.data_source = data_source
self.patches_per_shape = patches_per_shape
self.sequential_shapes = sequential_shapes
self.seed = seed
self.identical_epochs = identical_epochs
self.total_patch_count = None
self.shape_patch_inds = None
if self.seed is None:
self.seed = np.random.random_integers(0, 2**32-1, 1)[0]
self.rng = np.random.RandomState(self.seed)
self.total_patch_count = 0
for shape_ind, _ in enumerate(self.data_source.shape_names):
self.total_patch_count = self.total_patch_count + \
min(self.patches_per_shape, self.data_source.shape_patch_count[shape_ind])
def __iter__(self):
# optionally always pick the same permutation (mainly for debugging)
if self.identical_epochs:
self.rng.seed(self.seed)
# global point index offset for each shape
shape_patch_offset = list(np.cumsum(self.data_source.shape_patch_count))
shape_patch_offset.insert(0, 0)
shape_patch_offset.pop()
shape_inds = range(len(self.data_source.shape_names))
if not self.sequential_shapes:
shape_inds = self.rng.permutation(shape_inds)
# return a permutation of the points in the dataset
# where all points in the same shape are adjacent (for performance reasons):
# first permute shapes, then concatenate a list of permuted points in each shape
self.shape_patch_inds = [[]]*len(self.data_source.shape_names)
point_permutation = []
for shape_ind in shape_inds:
start = shape_patch_offset[shape_ind]
end = shape_patch_offset[shape_ind]+self.data_source.shape_patch_count[shape_ind]
global_patch_inds = self.rng.choice(range(start, end),
size=min(self.patches_per_shape, end-start), replace=False)
point_permutation.extend(global_patch_inds)
# save indices of shape point subset
self.shape_patch_inds[shape_ind] = global_patch_inds - start
return iter(point_permutation)
def __len__(self):
return self.total_patch_count
class RandomPointcloudPatchSampler(data.sampler.Sampler):
def __init__(self, data_source, patches_per_shape, seed=None, identical_epochs=False):
self.data_source = data_source
self.patches_per_shape = patches_per_shape
self.seed = seed
self.identical_epochs = identical_epochs
self.total_patch_count = None
if self.seed is None:
self.seed = np.random.random_integers(0, 2**32-1, 1)[0]
self.rng = np.random.RandomState(self.seed)
self.total_patch_count = 0
for shape_ind, _ in enumerate(self.data_source.shape_names):
self.total_patch_count = self.total_patch_count + \
min(self.patches_per_shape, self.data_source.shape_patch_count[shape_ind])
def __iter__(self):
# optionally always pick the same permutation (mainly for debugging)
if self.identical_epochs:
self.rng.seed(self.seed)
return iter(self.rng.choice(
sum(self.data_source.shape_patch_count), size=self.total_patch_count, replace=False))
def __len__(self):
return self.total_patch_count
class Shape:
def __init__(self, pts, kdtree,
imp_surf_query_point_ms, imp_surf_dist_ms):
self.pts = pts
self.kdtree = kdtree
self.imp_surf_query_point_ms = imp_surf_query_point_ms
self.imp_surf_dist_ms = imp_surf_dist_ms
class Cache:
def __init__(self, capacity, loader, loadfunc):
self.elements = {}
self.used_at = {}
self.capacity = capacity
self.loader = loader
self.loadfunc = loadfunc
self.counter = 0
def get(self, element_id):
if element_id not in self.elements:
# cache miss
# if at capacity, throw out least recently used item
if len(self.elements) >= self.capacity:
remove_id = min(self.used_at, key=self.used_at.get)
del self.elements[remove_id]
del self.used_at[remove_id]
# load element
self.elements[element_id] = self.loadfunc(self.loader, element_id)
self.used_at[element_id] = self.counter
self.counter += 1
return self.elements[element_id]
class PointcloudPatchDataset(data.Dataset):
def __init__(self, root, shape_list_filename, points_per_patch, patch_radius, patch_features, epsilon,
seed=None, identical_epochs=False, center='point',
cache_capacity=1, point_count_std=0.0,
pre_processed_patches=False, query_grid_resolution=None,
sub_sample_size=500, reconstruction=False, uniform_subsample=False, fixed_subsample=False,
num_workers=1):
# initialize parameters
self.root = root
self.shape_list_filename = shape_list_filename
self.patch_features = patch_features
self.points_per_patch = points_per_patch
self.patch_radius = patch_radius
self.identical_epochs = identical_epochs
self.pre_processed_patches = pre_processed_patches
self.center = center
self.point_count_std = point_count_std
self.seed = seed
self.query_grid_resolution = query_grid_resolution
self.sub_sample_size = sub_sample_size
self.reconstruction = reconstruction
self.num_workers = num_workers
self.epsilon = epsilon
self.uniform_subsample = uniform_subsample
self.fixed_subsample = fixed_subsample
self.include_connectivity = False
self.include_imp_surf = False
self.include_p_index = False
self.include_patch_pts_ids = False
for pfeat in self.patch_features:
if pfeat == 'imp_surf':
self.include_imp_surf = True
elif pfeat == 'imp_surf_magnitude':
self.include_imp_surf = True
elif pfeat == 'imp_surf_sign':
self.include_imp_surf = True
elif pfeat == 'p_index':
self.include_p_index = True
elif pfeat == 'patch_pts_ids':
self.include_patch_pts_ids = True
else:
raise ValueError('Unknown patch feature: %s' % pfeat)
self.shape_cache = Cache(cache_capacity, self, PointcloudPatchDataset.load_shape_by_index)
# get all shape names in the dataset
self.shape_names = []
with open(os.path.join(root, self.shape_list_filename)) as f:
self.shape_names = f.readlines()
self.shape_names = [x.strip() for x in self.shape_names]
self.shape_names = list(filter(None, self.shape_names))
# initialize rng for picking points in the local subsample of a patch
if self.seed is None:
self.seed = np.random.random_integers(0, 2**32-1, 1)[0]
self.rng = np.random.RandomState(self.seed)
# initialize rng for picking points in the global subsample of a patch
if self.seed is None:
self.seed = np.random.random_integers(0, 2**32-1, 1)[0]
self.rng_global_sample = np.random.RandomState(self.seed)
# get basic information for each shape in the dataset
self.shape_patch_count = []
print('getting information for {} shapes'.format(len(self.shape_names)))
for shape_ind, shape_name in enumerate(self.shape_names):
# print('getting information for shape %s' % shape_name)
def load_pts():
# load from text file and save in more efficient numpy format
point_filename = os.path.join(self.root, '04_pts', shape_name + '.xyz')
if os.path.isfile(point_filename) or os.path.isfile(point_filename + '.npy'):
pts = file_utils.load_npy_if_valid(point_filename, 'float32', mmap_mode='r')
if pts.shape[1] > 3:
pts = pts[:, 0:3]
else: # if no .xyz file, try .off and discard connectivity
mesh_filename = os.path.join(self.root, '04_pts', shape_name+'.xyz')
pts, _ = mesh_io.load_mesh(mesh_filename)
np.savetxt(fname=os.path.join(self.root, '04_pts', shape_name+'.xyz'), X=pts)
np.save(os.path.join(self.root, '04_pts', shape_name+'.xyz.npy'), pts)
return pts
if self.include_imp_surf:
if self.reconstruction:
# get number of grid points near the point cloud
pts = load_pts()
grid_pts_near_surf_ms = \
sdf.get_voxel_centers_grid_smaller_pc(
pts=pts, grid_resolution=query_grid_resolution, distance_threshold_vs=self.epsilon)
self.shape_patch_count.append(grid_pts_near_surf_ms.shape[0])
# un-comment to get a debug output for the necessary query points
# mesh_io.write_off('debug/{}'.format(shape_name + '.off'), grid_pts_near_surf_ms, [])
# self.shape_patch_count.append(query_grid_resolution ** 3) # full grid
else:
query_dist_filename = os.path.join(self.root, '05_query_pts', shape_name + '.ply.npy')
query_dist = np.load(query_dist_filename)
self.shape_patch_count.append(query_dist.shape[0])
else:
pts = load_pts()
self.shape_patch_count.append(pts.shape[0])
# returns a patch centered at the point with the given global index
# and the ground truth normal the the patch center
def __getitem__(self, index):
# find shape that contains the point with given global index
shape_ind, patch_ind = self.shape_index(index)
def get_patch_points(shape, query_point):
from source.base import point_cloud
# optionally always pick the same points for a given patch index (mainly for debugging)
if self.identical_epochs:
self.rng.seed((self.seed + index) % (2**32))
patch_pts_ids = point_cloud.get_patch_kdtree(
kdtree=shape.kdtree, rng=self.rng, query_point=query_point,
patch_radius=self.patch_radius,
points_per_patch=self.points_per_patch, n_jobs=1)
# find -1 ids for padding
patch_pts_pad_ids = patch_pts_ids == -1
patch_pts_ids[patch_pts_pad_ids] = 0
pts_patch_ms = shape.pts[patch_pts_ids, :]
# replace padding points with query point so that they appear in the patch origin
pts_patch_ms[patch_pts_pad_ids, :] = query_point
patch_radius_ms = utils.get_patch_radii(pts_patch_ms, query_point)\
if self.patch_radius <= 0.0 else self.patch_radius
pts_patch_ps = utils.model_space_to_patch_space(
pts_to_convert_ms=pts_patch_ms, pts_patch_center_ms=query_point,
patch_radius_ms=patch_radius_ms)
return patch_pts_ids, pts_patch_ps, pts_patch_ms, patch_radius_ms
shape = self.shape_cache.get(shape_ind)
imp_surf_query_point_ms = shape.imp_surf_query_point_ms[patch_ind]
# get neighboring points
patch_pts_ids, patch_pts_ps, pts_patch_ms, patch_radius_ms = \
get_patch_points(shape=shape, query_point=imp_surf_query_point_ms)
imp_surf_query_point_ps = utils.model_space_to_patch_space_single_point(
imp_surf_query_point_ms, imp_surf_query_point_ms, patch_radius_ms)
# surf dist can be None because we have no ground truth for evaluation
# need a number or Pytorch will complain when assembling the batch
if self.reconstruction:
imp_surf_dist_ms = np.array([np.inf])
imp_surf_dist_sign_ms = np.array([np.inf])
else:
imp_surf_dist_ms = shape.imp_surf_dist_ms[patch_ind]
imp_surf_dist_sign_ms = np.sign(imp_surf_dist_ms)
imp_surf_dist_sign_ms = 0.0 if imp_surf_dist_sign_ms < 0.0 else 1.0
if self.sub_sample_size > 0:
pts_sub_sample_ms = utils.get_point_cloud_sub_sample(
sub_sample_size=self.sub_sample_size, pts_ms=shape.pts,
query_point_ms=imp_surf_query_point_ms, rng=self.rng_global_sample,
uniform=self.uniform_subsample, fixed=self.fixed_subsample)
else:
pts_sub_sample_ms = np.array([], dtype=np.float32)
if not self.reconstruction:
import trimesh.transformations as trafo
# random rotation of shape and patch as data augmentation
rand_rot = trimesh.transformations.random_rotation_matrix(self.rng.rand(3))
# rand_rot = trimesh.transformations.identity_matrix()
pts_sub_sample_ms = \
trafo.transform_points(pts_sub_sample_ms, rand_rot).astype(np.float32)
patch_pts_ps = \
trafo.transform_points(patch_pts_ps, rand_rot).astype(np.float32)
imp_surf_query_point_ms = \
trafo.transform_points(np.expand_dims(imp_surf_query_point_ms, 0), rand_rot)[0].astype(np.float32)
imp_surf_query_point_ps = \
trafo.transform_points(np.expand_dims(imp_surf_query_point_ps, 0), rand_rot)[0].astype(np.float32)
patch_data = dict()
# create new arrays to close the memory mapped files
patch_data['patch_pts_ps'] = patch_pts_ps
patch_data['patch_radius_ms'] = np.array(patch_radius_ms, dtype=np.float32)
patch_data['pts_sub_sample_ms'] = pts_sub_sample_ms
| |
from tkinter import *
from tkinter import Tk, ttk, messagebox, simpledialog
from classes import readFile
from random import randint
objList = []
tournamentList = []
gender = ""
read = readFile.ReadFile()
json = read.readPrize()
ranking = read.readRankingPoints()
tournamentList = read.readTournement()
class App:
def __init__(self, master):
self.master = master
self.homeLayout(master)
#Home tab layout
def homeLayout(self, root):
t = StringVar()
self.label0 = self.createLabel('label0', "DADSAA Tournement System.", 0, 0, tab0)
self.label1 = self.createLabel('label1', "Enter Tournament Data.", 2, 0, tab0)
self.label1.grid(pady=(20,20))
self.enter_button1 = self.createButton('enter_button1', 'Enter', 2, 1, tab0)
self.enter_button1['command'] = lambda: self.choice()
self.label2 = self.createLabel('label2', "Merge and sort match data", 3, 0, tab0)
self.label2.grid(pady=(20,20))
self.enter_button2 = self.createButton('enter_button2', 'Enter', 3, 1, tab0)
self.enter_button2['command'] = lambda: self.merge()
self.statsLabel = self.createLabel('statsLabel', "Player Statistics Page", 4, 0, tab0)
self.statsLabel.grid(pady=(20,20))
self.enter_button3 = self.createButton('close_button0', 'Enter', 4, 1, tab0)
self.enter_button3['command'] = lambda: self.getPlayerStats()
self.label3 = self.createLabel('label3', "Exit", 5, 0, tab0)
self.label3.grid(pady=(20,20))
self.close_button0 = self.createButton('close_button0', 'Close', 5, 1, tab0)
self.close_button0['command'] = lambda: root.quit()
self.label4 = self.createLabel('label4', "Instructions:", 6, 0, tab0)
self.label4.grid(pady=(20,20))
self.label5 = Label(tab0, textvariable=t)
self.label5.grid(row=7, column=0)
text = """0. All output files will be .csv and save files winners files have the format:
gender, Tournamenet or statistic (leaderboard or save File) .csv\n
1. Player name file can be called anything but must contain the word 'PLAYERS' and be in .csv format (comma seperated values).\n
2. Matches can also be called anything but must have the format of
Player A set Player B set. Also be in the format of .csv.\n
3. Manual input must be in the same format of Player A set Player B set.\n
4. In order to find specific player statistics, the user must have completed 1 tournament minimum.\n
5. In order to find season specific player statistics the user must have completed the season of 4 tournaments
and procceed to merge both the save files and statistics into 1 file retrospectivly.\n
6. In order to start season 2 As stated in 5. season 1 needs to have been completed and merged in order to move on."""
t.set(text)
#defines the choices for the choice input tab
def choice(self):
v = StringVar()
y = StringVar()
z = StringVar()
a = StringVar()
z.set("season1")
a.set("TAC1")
v.set("male")
y.set("new")
self.enableTab(0,1)
self.label6 = self.createLabel('label6', "Data Entry:", 0, 0, tab1)
self.label6.grid(padx=(350,0))
self.label7 = self.createLabel('label7', "Select Gender:", 2, 0, tab1)
self.label7.grid(pady=(20,20))
self.r0 = self.createRadio('r0',"Male", v, "male", 2, 1, tab1)
self.r1 = self.createRadio('r1',"Female", v, "female", 2, 2, tab1)
self.label8 = self.createLabel('label8', "Continue from save file?", 4, 0, tab1)
self.r2 = self.createRadio('r2',"Yes", y, "save", 4, 1, tab1)
self.r3 = self.createRadio('r3',"No", y, "new", 4, 2, tab1)
self.r3.grid(pady=(20,20))
self.label9 = self.createLabel('label9', "Please select a Season:", 5,0,tab1)
self.r4 = self.createRadio('r4',"Season 1", z, "season1", 5, 1, tab1)
self.r5 = self.createRadio('r5',"Season 2", z, "season2", 5, 2, tab1)
self.r5.grid(pady=(20,20))
self.label10 = self.createLabel('label10', "Please select a Tournamenet:", 6,0,tab1)
self.r6 = self.createRadio('r6',"TAC1", a, "TAC1", 6, 1, tab1)
self.r7 = self.createRadio('r7',"TAE21", a, "TAE21", 6, 2, tab1)
self.r8 = self.createRadio('r8',"TAW11", a, "TAW11", 6, 3, tab1)
self.r9 = self.createRadio('r9',"TBS2", a, "TBS2", 6, 4, tab1)
self.r9.grid(pady=(20,20))
self.button1 = self.createButton('button1', "Submit", 9, 0, tab1)
self.button1.grid(pady=(20,20))
self.button1['command'] = lambda: self.dataEntry(v, y, a, z)
self.back_button0 = self.createButton('back_button0', "Back", 9,1,tab1)
self.back_button0['command']= command= lambda: self.enableTab(1,0)
#returns objlist
def dataEntry(self, v, y, a, z):
gender = v.get()
saveFile = y.get()
tournament = a.get()
season = z.get()
messagebox.showinfo("Player Selection", "select a file containing the 32 player Names e.g. MALE PLAYERS and is located within the tournament files folder")
try:
objList = read.readPlayerNames(gender)
if(len(objList) == 32 and "MP" in objList[0].name or "FP" in objList[0].name):
if season == 'season1':
self.modeInput(season,tournament, saveFile, objList, gender, "")
else:
messagebox.showinfo("Statistics File Selection", "Please select the Master stat file to read in for a season. e.g male season1 Master stat.csv")
fileName = read.file()
statFile = read.statFile(fileName)
read.getStats(statFile, objList)
for n in objList:
n.season['TAC1'] = n.TAC1
n.season['TAW11'] = n.TAW11
n.season['TAE21'] = n.TAE21
n.season['TBS2'] = n.TBS2
n.TBS2 = []
n.TAW11 = []
n.TAC1 = []
n.TAE21 = []
self.modeInput(season,tournament, saveFile, objList, gender, "seedings")
else:
messagebox.showinfo("Error", "Incorrect player name file.\n Redirecting... Please try again")
self.enableTab(1,0)
#error message and backbutton
except TypeError as e:
messagebox.showerror("Error", "Incorrect player name file.\n Redirecting... Please try again")
self.enableTab(1,0)
except PermissionError as e:
messagebox.showerror("Error", "File is open please shut the file and try again.")
self.enableTab(1,0)
except FileNotFoundError as p:
messagebox.showerror("Error", "File is Not found Please try again.")
self.enableTab(1,0)
except IndexError as g:
messagebox.showerror("Error", "Wrong File loaded or the file does not contain the correct statistics, Please try again.")
self.enableTab(1,0)
#define tournment choices
def modeInput(self, season,tournament, save, objList, gender, mode):
self.rounds = 1
self.usedSave = 0
self.enableTab(1,2)
#tab title label
self.label11 = self.createLabel('label11', 'Mode Selection Menu', 0,1, tab2)
self.label11.grid(padx=(200, 0), pady=(20,20))
self.submitButton = self.createButton('submitButton', "", 1,0, tab2)
self.submitButton.grid(padx=(20,20))
self.enter_button3 = self.createButton('enter_button3', "",1,1, tab2)
self.enter_button3.grid(padx=(20,20))
self.stat_button1 = self.createButton('stat_button1', "",1,2, tab2)
self.stat_button1.grid(padx=(20,20))
#returns the difficulty
diff = read.getTournamentDiff(tournament, tournamentList)
#gets the prizes based on the season
prizeList = read.getPrize(json, tournament)
if save == "save":
messagebox.showinfo("Save File", "Please select a save file. e.g male season1 TAC1 Round1 save File.csv located within the Save Files folder")
try:
saveFile = read.file()
statFile = read.statFile(saveFile)
self.rounds = read.getRoundNumber(tournament, saveFile)
read.getScores(saveFile, objList)
read.getStats(statFile, objList)
self.usedSave = 1
file = gender + ".csv has been loaded"
tour = season + " " + tournament + " Has been selected and the loaded save file round is currently at: " + str(self.rounds + self.usedSave)
messagebox.showinfo("Notice", file + "\r" + tour)
except FileNotFoundError as f:
messagebox.showerror("Error", "StatFile Does not exist\rPlease load in the file or manual input first before loading saves.")
self.enableTab(2,1)
except IndexError as d:
messagebox.showerror("Error", "Incorrect file selected, Redirecting back.")
self.enableTab(2,1)
except PermissionError as e:
messagebox.showerror("Error", "File open, Redirecting back.")
self.enableTab(2,1)
else:
file = gender + ".csv has been loaded"
tour = season + " " + tournament + " Has been selected"
messagebox.showinfo("Notice", file + "\r" + tour)
if mode == "":
if self.rounds == 5:
self.postMatch(objList, ranking, prizeList, gender, self.rounds, season, tournament, diff, "")
self.submitButton['text'] = "Read Files"
self.submitButton['command'] = lambda: self.matchSetupReadFile(objList, ranking, prizeList, gender, season, tournament, diff)
self.enter_button3['text'] = "Manual Input"
self.enter_button3['command'] = lambda: self.matchSetupManual(objList, ranking, prizeList, gender, season, tournament, diff)
self.stat_button1['text'] = "Player Statistics"
self.stat_button1['command'] = lambda: self.statistics(objList, 0, tournament)
else:
if self.rounds == 5:
self.postMatch(objList, ranking, prizeList, gender, self.rounds, season, tournament, diff, "")
self.submitButton.destroy()
self.enter_button3['text'] = "Continue"
self.enter_button3['command'] = lambda: self.getSeedings(objList, ranking, prizeList, gender, self.rounds, season, tournament, diff)
self.stat_button1['text'] = "Player Statistics"
self.stat_button1['command'] = lambda: self.statistics(objList, 0, tournament)
#read file option match setup and output/file make
def matchSetupReadFile(self, objList, ranking, prizeList, gender, season, tournament, diff):
tempList = []
g = IntVar()
g.set(1)
self.enableTab(2,5)
messagebox.showinfo("Match Information", "Please select the round " + str(self.rounds + self.usedSave) + " " + tournament + " file.")
mMatchFile = read.file()
if mMatchFile != "":
try:
#read the match file.
mMatches = []
#open the file and read the first round matches
with open(mMatchFile, "r") as m:
next(m)
for line in m:
mMatches = line.split(",")
#take a copy of the matchs MP01 2 MP02 3 and then add them to a list to count there length
tempList.append(mMatches[0] + " " + mMatches[1] + " " + mMatches[2] + " " + mMatches[3])
m.close()
self.enableTab(5,3)
self.preMatch(objList, ranking, prizeList, gender, season, tournament, diff, tempList, "")
#if the file is open already and then is trying to be accessed from here
except PermissionError as e:
messagebox.showinfo("File error", "File is open, please close the file containing the scores.")
#if the file is not the correct file.
except IndexError as d:
messagebox.showinfo("File error", "Incorrect file selected.\rPlease select the correct file and try again.")
else:
messagebox.showinfo("File error", "No File selected please select another and try again.")
self.enableTab(5,2)
#defines the matches manual input mode.
def matchSetupManual(self, objList, ranking, prizeList, gender, season, tournament, diff):
tempList = []
messagebox.showinfo("Match data input", "Please enter the matches for the round " + str(self.rounds + self.usedSave) + " " + tournament)
self.enableTab(2,4)
self.titleLabel = self.createLabel('titleLabel', "Enter the matches as e.g: MP01 | |
kwargs}}
def model_HistGradientBoostingRegressor(self, **kwargs):
### https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingRegressor.html
# TODO not hpo not converging
self.path = "sklearn.ensemble.HistGradientBoostingRegressor"
self.param_space = [
Real(low=0.0001, high=0.9, prior='log', name='learning_rate', num_samples=self.num_samples), # Used for reducing the gradient step.
Integer(low=50, high=500, name='max_iter', num_samples=self.num_samples), # maximum number of trees.
Integer(low=2, high=100, name='max_depth', num_samples=self.num_samples), # maximum number of trees.
Integer(low=10, high=100, name='max_leaf_nodes', num_samples=self.num_samples), # maximum number of leaves for each tree
Integer(low=10, high=100, name='min_samples_leaf', num_samples=self.num_samples), # minimum number of samples per leaf
Real(low=00, high=0.5, name='l2_regularization', num_samples=self.num_samples), # Used for reducing the gradient step.
]
self.x0 = [0.1, 100, 10, 31, 20, 0.0]
return {'model': {'HISTGRADIENTBOOSTINGREGRESSOR':kwargs}}
def model_HuberRegressor(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.HuberRegressor.html
self.path = "sklearn.linear_model.HuberRegressor"
self.param_space = [
Real(low=1.0, high=5.0, name='epsilon', num_samples=self.num_samples),
Integer(low=50, high=500, name='max_iter', num_samples=self.num_samples),
Real(low=1e-5, high=1e-2, name='alpha', num_samples=self.num_samples),
Categorical(categories=[True, False], name='fit_intercept')
]
self.x0 = [2.0, 50, 1e-5, False]
return {'model': {'HUBERREGRESSOR': kwargs}}
def model_KernelRidge(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.kernel_ridge.KernelRidge.html
self.path = "sklearn.kernel_ridge.KernelRidge"
self.param_space = [
Real(low=1.0, high=5.0, name='alpha', num_samples=self.num_samples)
# Categorical(categories=['poly', 'linear', name='kernel'])
]
self.x0 = [1.0] #, 'linear']
return {'model': {'KernelRidge': kwargs}}
def model_KNeighborsRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html
self.path = "sklearn.neighbors.KNeighborsRegressor"
# todo, what if data is customized in build_and_run?
if hasattr(self.ai4water_model, 'config'):
train_frac = self.ai4water_model.config['train_fraction']
else:
train_frac = self.model_kws.get('train_fraction', 0.2)
train_data_length = train_frac * len(self.data)
self.param_space = [
Integer(low=3, high=train_data_length, name='n_neighbors', num_samples=self.num_samples),
Categorical(categories=['uniform', 'distance'], name='weights'),
Categorical(categories=['auto', 'ball_tree', 'kd_tree', 'brute'], name='algorithm'),
Integer(low=10, high=100, name='leaf_size', num_samples=self.num_samples),
Integer(low=1, high=5, name='p', num_samples=self.num_samples)
]
self.x0 = [5, 'uniform', 'auto', 30, 2]
return {'model': {'KNEIGHBORSREGRESSOR': kwargs}}
def model_LassoLars(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoLars.html
self.path = "sklearn.linear_model.LassoLars"
self.param_space = [
Real(low=1.0, high=5.0, name='alpha', num_samples=self.num_samples),
Categorical(categories=[True, False], name='fit_intercept')
]
self.x0 = [1.0, False]
return {'model': {'LassoLars': kwargs}}
def model_Lars(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lars.html
self.path = "sklearn.linear_model.Lars"
self.param_space = [
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=100, high=1000, name='n_nonzero_coefs', num_samples=self.num_samples)
]
self.x0 = [True, 100]
return {'model': {'Lars': kwargs}}
def model_LarsCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LarsCV.html
self.path = "sklearn.linear_model.LarsCV"
self.param_space = [
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=100, high=1000, name='max_iter', num_samples=self.num_samples),
Integer(low=100, high=5000, name='max_n_alphas', num_samples=self.num_samples)
]
self.x0 = [True, 500, 1000]
return {'model': {'LarsCV': kwargs}}
def model_LinearSVR(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVR.html
self.path = "sklearn.svm.LinearSVR"
self.param_space = [
Real(low=1.0, high=5.0, name='C', num_samples=self.num_samples),
Real(low=0.01, high=0.9, name='epsilon', num_samples=self.num_samples),
Real(low=1e-5, high=1e-1, name='tol', num_samples=self.num_samples),
Categorical(categories=[True, False], name='fit_intercept')
]
self.x0 = [1.0, 0.01, 1e-5, True]
return {'model': {'LinearSVR': kwargs}}
def model_Lasso(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html
self.path = "sklearn.linear_model.Lasso"
self.param_space = [
Real(low=1.0, high=5.0, name='alpha', num_samples=self.num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Real(low=1e-5, high=1e-1, name='tol', num_samples=self.num_samples)
]
self.x0 = [1.0, True, 1e-5]
return {'model': {'Lasso': kwargs}}
def model_LassoCV(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoCV.html
self.path = "sklearn.linear_model.LassoCV"
self.param_space = [
Real(low=1e-5, high=1e-2, name='eps', num_samples=self.num_samples),
Integer(low=10, high=1000, name='n_alphas', num_samples=self.num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_iter', num_samples=self.num_samples)
]
self.x0 = [1e-3, 100, True, 1000]
return {'model': {'LassoCV': kwargs}}
def model_LassoLarsCV(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoLarsCV.html
self.path = "sklearn.linear_model.LassoLarsCV"
self.param_space = [
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_n_alphas', num_samples=self.num_samples)
]
self.x0 = [True, 1000]
return {'model': {'LassoLarsCV': kwargs}}
def model_LassoLarsIC(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoLarsIC.html
self.path = "sklearn.linear_model.LassoLarsIC"
self.param_space = [
Categorical(categories=[True, False], name='fit_intercept'),
Categorical(categories=['bic', 'aic'], name='criterion')
]
self.x0 = [True, 'bic']
return {'model': {'LassoLarsIC': kwargs}}
def model_LGBMRegressor(self, **kwargs):
## https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html
self.path = "lightgbm.LGBMRegressor"
self.param_space = [
Categorical(categories=['gbdt', 'dart', 'goss'], name='boosting_type'), # todo, during optimization not working with 'rf'
Integer(low=10, high=200, name='num_leaves', num_samples=self.num_samples),
Real(low=0.0001, high=0.1, name='learning_rate', prior='log', num_samples=self.num_samples),
Integer(low=20, high=500, name='n_estimators', num_samples=self.num_samples)
]
self.x0 = ['gbdt', 31, 0.1, 100]
return {'model': {'LGBMREGRESSOR': kwargs}}
def model_LinearRegression(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html
self.path = "sklearn.linear_model.LinearRegression"
self.param_space = [
Categorical(categories=[True, False], name='fit_intercept')
]
self.x0 = [True]
return {'model': {'LINEARREGRESSION': kwargs}}
def model_MLPRegressor(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html
self.path = "sklearn.neural_network.MLPRegressor"
self.param_space = [
Integer(low=10, high=500, name='hidden_layer_sizes', num_samples=self.num_samples),
Categorical(categories=['identity', 'logistic', 'tanh', 'relu'], name='activation'),
Categorical(categories=['lbfgs', 'sgd', 'adam'], name='solver'),
Real(low=1e-6, high=1e-3, name='alpha', num_samples=self.num_samples),
# Real(low=1e-6, high=1e-3, name='learning_rate')
Categorical(categories=['constant', 'invscaling', 'adaptive'], name='learning_rate'),
]
self.x0 = [10, 'relu', 'adam', 1e-6, 'constant']
return {'model': {'MLPREGRESSOR': kwargs}}
def model_NuSVR(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.svm.NuSVR.html
self.path = "sklearn.svm.NuSVR"
self.param_space = [
Real(low=0.5,high=0.9, name='nu', num_samples=self.num_samples),
Real(low=1.0, high=5.0, name='C', num_samples=self.num_samples),
Categorical(categories=['linear', 'poly', 'rbf', 'sigmoid'], name='kernel')
]
self.x0 = [0.5, 1.0, 'sigmoid']
return {'model': {'NuSVR': kwargs}}
def model_OrthogonalMatchingPursuit(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.OrthogonalMatchingPursuit.html
self.path = "sklearn.linear_model.OrthogonalMatchingPursuit"
self.param_space = [
Categorical(categories=[True, False], name='fit_intercept'),
Real(low=0.1, high=10, name='tol', num_samples=self.num_samples)
]
self.x0 = [True, 0.1]
return {'model': {'OrthogonalMatchingPursuit': kwargs}}
def model_OrthogonalMatchingPursuitCV(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.OrthogonalMatchingPursuitCV.html
self.path = "sklearn.linear_model.OrthogonalMatchingPursuitCV"
self.param_space = [
# Integer(low=10, high=100, name='max_iter'),
Categorical(categories=[True, False], name='fit_intercept')
]
self.x0 = [ # 50,
True]
return {'model': {'OrthogonalMatchingPursuitCV': kwargs}}
def model_OneClassSVM(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.svm.OneClassSVM.html
self.path = "sklearn.svm.OneClassSVM"
self.param_space = [
Categorical(categories=['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'], name='kernel'),
Real(low=0.1, high=0.9, name='nu', num_samples=self.num_samples),
Categorical(categories=[True, False], name='shrinking'),
]
self.x0 = ['rbf', 0.1, True]
return {'model': {'ONECLASSSVM': kwargs}}
def model_PoissonRegressor(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.PoissonRegressor.html
self.path = "sklearn.linear_model.PoissonRegressor"
self.param_space = [
Real(low=0.0, high=1.0, name='alpha', num_samples=self.num_samples),
#Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=50, high=500, name='max_iter', num_samples=self.num_samples),
]
self.x0 = [0.5, 100]
return {'model': {'POISSONREGRESSOR': kwargs}}
def model_Ridge(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html
self.path = "sklearn.linear_model.Ridge"
self.param_space = [
Real(low=0.0, high=3.0, name='alpha', num_samples=self.num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Categorical(categories=['auto', 'svd', 'cholesky', 'saga'], name='solver'),
]
self.x0 = [1.0, True, 'auto']
return {'model': {'Ridge': kwargs}}
def model_RidgeCV(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html
self.path = "sklearn.linear_model.RidgeCV"
self.param_space = [
Categorical(categories=[True, False], name='fit_intercept'),
Categorical(categories=['auto', 'svd', 'eigen'], name='gcv_mode'),
]
self.x0 = [True, 'auto']
return {'model': {'RidgeCV': kwargs}}
def model_RadiusNeighborsRegressor(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsRegressor.html
self.path = "sklearn.neighbors.RadiusNeighborsRegressor"
self.param_space = [
Categorical(categories=['uniform', 'distance'], name='weights'),
Categorical(categories=['auto', 'ball_tree', 'kd_tree', 'brute'], name='algorithm'),
Integer(low=10, high=300, name='leaf_size', num_samples=self.num_samples),
Integer(low=1,high=5, name='p', num_samples=self.num_samples)
]
self.x0 = ['uniform', 'auto', 10, 1]
return {'model': {'RADIUSNEIGHBORSREGRESSOR': kwargs}}
def model_RANSACRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RANSACRegressor.html
self.path = "sklearn.linear_model.RANSACRegressor"
self.param_space = [
Integer(low=10, high=1000, name='max_trials'),
Real(low=0.01, high=0.99, name='min_samples', num_samples=self.num_samples)
]
self.x0 = [10, 0.01]
return {'model': {'RANSACREGRESSOR': kwargs}}
def model_RandomForestRegressor(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
self.path = "sklearn.ensemble.RandomForestRegressor"
self.param_space = [
Integer(low=5, high=50, name='n_estimators', num_samples=self.num_samples),
Integer(low=3, high=30, name='max_depth', num_samples=self.num_samples),
Real(low=0.1, high=0.5, name='min_samples_split', num_samples=self.num_samples),
# Real(low=0.1, high=1.0, name='min_samples_leaf'),
Real(low=0.0, high=0.5, name='min_weight_fraction_leaf', num_samples=self.num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name='max_features')
]
self.x0 = [10, 5, 0.4, # 0.2,
0.1, 'auto']
return {'model': {'RANDOMFORESTREGRESSOR': kwargs}}
def model_SVR(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html
self.path = "sklearn.svm.SVR"
self.param_space = [
# https://stackoverflow.com/questions/60015497/valueerror-precomputed-matrix-must-be-a-square-matrix-input-is-a-500x29243-mat
Categorical(categories=['linear', 'poly', 'rbf', 'sigmoid'], name='kernel'), # todo, optimization not working with 'precomputed'
Real(low=1.0, high=5.0, name='C', num_samples=self.num_samples),
Real(low=0.01, high=0.9, name='epsilon', num_samples=self.num_samples)
]
self.x0 = ['rbf',1.0, 0.01]
return {'model': {'SVR': kwargs}}
def model_SGDRegressor(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html
self.path = "sklearn.linear_model.SGDRegressor"
self.param_space = [
Categorical(categories=['l1', 'l2', 'elasticnet'], name='penalty'),
Real(low=0.01, high=1.0, name='alpha', num_samples=self.num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_iter', num_samples=self.num_samples),
Categorical(categories=['constant', 'optimal', 'invscaling', 'adaptive'], name='learning_rate')
]
self.x0 = ['l2', 0.1, True, 1000, 'invscaling']
return {'model': {'SGDREGRESSOR': kwargs}}
# def model_TransformedTargetRegressor(self, **kwargs):
# ## https://scikit-learn.org/stable/modules/generated/sklearn.compose.TransformedTargetRegressor.html
# self.param_space = [
# Categorical(categories=[None], name='regressor'),
# Categorical(categories=[None], name='transformer'),
# Categorical(categories=[None], name='func')
# ]
# self.x0 = [None, None, None]
# return {'model': {'TransformedTargetRegressor': kwargs}}
def model_TweedieRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.TweedieRegressor.html
self.path = "sklearn.linear_model.TweedieRegressor"
self.param_space = [
Real(low=0.0, high=5.0, name='alpha', num_samples=self.num_samples),
Categorical(categories=['auto', 'identity', 'log'], name='link'),
Integer(low=50, high=500, name='max_iter', num_samples=self.num_samples)
]
self.x0 = [1.0, 'auto',100]
return {'model': {'TWEEDIEREGRESSOR': kwargs}}
def model_TheilsenRegressor(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.TheilSenRegressor.html
self.path = "sklearn.linear_model.TheilSenRegressor"
self.param_space = [
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=30, high=1000, name='max_iter', num_samples=self.num_samples),
Real(low=1e-5, high=1e-1, name='tol', num_samples=self.num_samples),
## Integer(low=self.data.shape[1]+1, high=len(self.data), name='n_subsamples')
]
self.x0 = [True, 50, 0.001]
return {'model': {'THEILSENREGRESSOR': kwargs}}
# TODO
# def model_GAMMAREGRESSOR(self, **kwargs):
# # ValueError: Some value(s) of y are out of the valid range for family GammaDistribution
# return {'GAMMAREGRESSOR': {}}
def model_XGBoostRFRegressor(self, **kwargs):
## https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBRFRegressor
self.path = "xgboost.XGBRFRegressor"
self.param_space = [
Integer(low=5, high=100, name='n_estimators', num_samples=self.num_samples), # Number of gradient boosted trees
Integer(low=3, high=50, name='max_depth', num_samples=self.num_samples), # Maximum tree depth for base learners
Real(low=0.0001, high=0.5, prior='log', name='learning_rate', num_samples=self.num_samples), #
#Categorical(categories=['gbtree', 'gblinear', 'dart'], name='booster'), # todo solve error
Real(low=0.1, high=0.9, name='gamma', num_samples=self.num_samples), # Minimum loss reduction required to make a further partition on a leaf node of the tree.
Real(low=0.1, high=0.9, name='min_child_weight', num_samples=self.num_samples), # Minimum sum of instance weight(hessian) needed in a child.
Real(low=0.1, high=0.9, name='max_delta_step', num_samples=self.num_samples), # Maximum delta step we allow each tree’s weight estimation to be.
Real(low=0.1, high=0.9, name='subsample', num_samples=self.num_samples), # Subsample ratio of the training instance.
Real(low=0.1, high=0.9, name='colsample_bytree', num_samples=self.num_samples),
Real(low=0.1, high=0.9, name='colsample_bylevel', num_samples=self.num_samples),
Real(low=0.1, high=0.9, name='colsample_bynode', num_samples=self.num_samples),
Real(low=0.1, high=0.9, name='reg_alpha', num_samples=self.num_samples),
Real(low=0.1, high=0.9, name='reg_lambda', num_samples=self.num_samples)
]
self.x0 = [50, 3, 0.001, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1
]
return {'model': {'XGBOOSTRFREGRESSOR': kwargs}}
def model_XGBoostRegressor(self, **kwargs):
| |
'QL User\'s Timer count-from date time must remain unchanged')
def test_set_description_valid_q(self):
new_desc_q1 = 'New Timer Description Q1'
desc_changed = self.write_timer_helper_q.set_description(
self.timer_q1_p_id, new_desc_q1)
ref_timestamp = timezone.now().timestamp()
timer_rel = PADSTimer.objects.get(pk=self.timer_q1_p_id)
# Assertions
self.assertEquals(timer_rel.description, new_desc_q1,
'QL User\'s Timer description must be changed')
self.assertTrue(desc_changed,
'Timer helper must indicate success in changing description')
self.assertAlmostEquals(timer_rel.count_from_date_time.timestamp(),
ref_timestamp,
delta=1.0,
msg='Description change must reset Timer')
self.assertEquals(self.timer_a1_p.description, self.timer_a1_p_desc,
'User A\'s Timer\'s description must remain the same')
self.assertEquals(self.timer_a1_p.count_from_date_time,
self.orig_count_time_a1_p,
'User A\'s Timer\'s count-from date time must be unchanged')
self.assertEquals(self.timer_b1_p.description, self.timer_b1_p_desc,
'User B\'s Timer Description must remain the same')
self.assertEquals(self.timer_b1_p.count_from_date_time,
self.orig_count_time_b1_p,
'User B\'s Timer count-from date time must remain the same')
def test_set_description_valid_stopped_b(self):
# Set up a Historical Timer for Test User A
timer_b2_pnr_desc = 'Test Timer B2 by Test User A (Public/Stopped)'
timer_b2_pnr_id= self.write_timer_helper_a.new(
timer_b2_pnr_desc, public=True, running=False)
timer_b2_pnr = PADSTimer.objects.get(pk=timer_b2_pnr_id)
orig_count_time_b2_pnr = timer_b2_pnr.count_from_date_time
# Attempt to change Historical Timer's description
new_desc_b2_pnr = 'Test Timer B2 New Description'
desc_changed = self.write_timer_helper_a.set_description(
timer_b2_pnr_id, new_desc_b2_pnr)
timer_b2_pnr_rel = PADSTimer.objects.get(pk=timer_b2_pnr_id) # Reload
# Assertions
self.assertTrue(desc_changed)
self.assertFalse(timer_b2_pnr.running)
self.assertEquals(timer_b2_pnr_desc, timer_b2_pnr.description)
self.assertEquals(orig_count_time_b2_pnr,
timer_b2_pnr_rel.count_from_date_time)
def test_set_description_bad_description(self):
# Multi-Assertion
for i in bad_str_inputs.values():
desc_changed = self.write_timer_helper_a.set_description(
self.timer_q1_p_id, i)
self.assertFalse(desc_changed,
'Timer helper must indicate failure to set a bad description')
self.assertTrue(self.test_timers_cfdts_unchanged(),
'Descriptions of all Test Timers must remain unchanged')
self.assertTrue(self.test_timers_descs_unchanged(),
'Count-from date times of all Test Timers must not change')
def test_set_description_historical(self):
# Set up a Historical Timer for Test User A
timer_a2_ph_desc = 'Test Timer A2 by Test User A (Public/Historical)'
timer_a2_ph_id= self.write_timer_helper_a.new(
timer_a2_ph_desc, public=True, historical=True)
timer_a2_ph = PADSTimer.objects.get(pk=timer_a2_ph_id)
orig_count_time_a2_ph = timer_a2_ph.count_from_date_time
# Attempt to change Historical Timer's description
new_desc_a2_ph = 'Test User A editing own historical Timer'
desc_changed = self.write_timer_helper_a.set_description(
timer_a2_ph_id, new_desc_a2_ph)
# Assertions
self.assertFalse(desc_changed)
self.assertEqual(timer_a2_ph.description, timer_a2_ph_desc)
self.assertEqual(timer_a2_ph.count_from_date_time,
orig_count_time_a2_ph)
def test_set_description_invalid_id(self):
new_desc_b1_a = 'User A attempting to modify non-existent Timer'
desc_changed = self.write_timer_helper_a.set_description(
-9999, new_desc_b1_a)
self.assertFalse(desc_changed,
'Timer helper must indicate failure to change description')
self.assertTrue(self.test_timers_cfdts_unchanged(),
'Descriptions of all Test Timers must remain unchanged')
self.assertTrue(self.test_timers_descs_unchanged(),
'Count-from date times of all Test Timers must not change')
def test_set_description_signed_out(self):
write_timer_helper = PADSWriteTimerHelper()
new_desc_a1_s = 'Signed out User attempting to modify User A\'s Timer'
desc_changed = write_timer_helper.set_description(
self.timer_a1_p_id, new_desc_a1_s)
self.assertFalse(desc_changed,
'Timer helper must indicate failure to change description')
self.assertTrue(self.test_timers_cfdts_unchanged(),
'Descriptions of all Test Timers must remain unchanged')
self.assertTrue(self.test_timers_descs_unchanged(),
'Count-from date times of all Test Timers must not change')
def test_set_description_wrong_user_a(self):
new_desc_b1_a = 'User A attempting to modify User B\'s Timer'
desc_changed = self.write_timer_helper_a.set_description(
self.timer_b1_p_id, new_desc_b1_a)
self.assertFalse(desc_changed,
'Timer helper must indicate failure to change description')
self.assertTrue(self.test_timers_cfdts_unchanged(),
'Descriptions of all Test Timers must remain unchanged')
self.assertTrue(self.test_timers_descs_unchanged(),
'Count-from date times of all Test Timers must not change')
class PADSWriteTimerHelperResetByIdTests(TestCase):
@classmethod
def setUpTestData(cls):
# Set up test Users and Timer Helpers
# Set Up Test Users and Write Timer Helpers
# Test User A
username_a = 'test-jess-thrbi'
password_a = ' <PASSWORD>'
cls.user_a = write_user_helper.prepare_user_in_db(
username_a, password_a)
cls.user_a.save()
cls.write_timer_helper_a = PADSWriteTimerHelper(cls.user_a.id)
# Test User B
username_b = 'not-jess'
password_b = <PASSWORD>
cls.user_b = write_user_helper.prepare_user_in_db(
username_b, password_b)
cls.user_b.save()
cls.write_timer_helper_b = PADSWriteTimerHelper(cls.user_b.id)
# Test Quick List User
cls.user_q = write_user_helper.prepare_ql_user_in_db()[0]
cls.user_q.save()
cls.write_timer_helper_q = PADSWriteTimerHelper(cls.user_q.id)
# Set Up Test Timers, remember original count-from date time
# Public Timers are used in this test to ensure that the Helpers
# are able to tell between granting write and read access.
# Test User A's Timer
timer_a1_p_desc = 'Test Timer A1 by Test User A (Public)'
cls.timer_a1_p_id = cls.write_timer_helper_a.new(
timer_a1_p_desc, public=True)
cls.timer_a1_p = PADSTimer.objects.get(pk=cls.timer_a1_p_id)
cls.orig_count_time_a1_p = cls.timer_a1_p.count_from_date_time
# Test User B's Timer
timer_b1_p_desc = 'Test Timer B1 by Test User B (Public)'
cls.timer_b1_p_id = cls.write_timer_helper_b.new(
timer_b1_p_desc, public=True)
cls.timer_b1_p = PADSTimer.objects.get(pk=cls.timer_b1_p_id)
cls.orig_count_time_b1_p = cls.timer_b1_p.count_from_date_time
# Test QL User's Timer
timer_q1_p_desc = 'Test Timer Q1 by Test QL User (Public)'
cls.timer_q1_p_id = cls.write_timer_helper_q.new(
timer_q1_p_desc, public=True)
cls.timer_q1_p = PADSTimer.objects.get(pk=cls.timer_q1_p_id)
cls.orig_count_time_q1_p = cls.timer_q1_p.count_from_date_time
def timers_cfdts_unchanged(self):
'''Returns True if the count-from date times of all Test Timers in the
Timer Write Helper Set Description Test Case have remained unchanged.
'''
timer_rel_a1 = PADSTimer.objects.get(pk=self.timer_a1_p_id)
timer_a_cfdt_same = (
self.orig_count_time_a1_p == timer_rel_a1.count_from_date_time)
timer_rel_b1 = PADSTimer.objects.get(pk=self.timer_b1_p_id)
timer_b_cfdt_same = (
self.orig_count_time_b1_p == timer_rel_b1.count_from_date_time)
timer_rel_q1 = PADSTimer.objects.get(pk=self.timer_q1_p_id)
timer_q_cfdt_same = (
self.orig_count_time_q1_p == timer_rel_q1.count_from_date_time)
return (timer_a_cfdt_same & timer_b_cfdt_same & timer_q_cfdt_same)
def test_reset_by_id_valid_a(self):
reset_reason = 'Test User A Resetting Timer A1'
timer_reset = self.write_timer_helper_a.reset_by_id(
self.timer_a1_p_id, reset_reason)
# Beginner's PROTIP: Timers must be reloaded after reset in order to
# get the new count-from date times.
timer_a1_p_rel = PADSTimer.objects.get(pk=self.timer_a1_p_id)
count_time_a = timer_a1_p_rel.count_from_date_time
log_entry_a = PADSTimerReset.objects.filter(
timer_id=self.timer_a1_p_id).order_by('-date_time')[0]
timer_b1_p_rel = PADSTimer.objects.get(pk=self.timer_b1_p_id)
count_time_b = timer_b1_p_rel.count_from_date_time
timer_q1_p_rel = PADSTimer.objects.get(pk=self.timer_q1_p_id)
count_time_q = timer_q1_p_rel.count_from_date_time
ref_timestamp = timezone.now().timestamp()
# Assertions
self.assertTrue(timer_reset,
'Helper must indicate success resetting timer')
self.assertTrue(timer_a1_p_rel.running,
'Timer A1 must be running after reset')
self.assertIn(reset_reason, log_entry_a.reason,
'Timer A1\'s reset must be logged')
self.assertAlmostEqual(count_time_a.timestamp(),
ref_timestamp,
delta=1.0,
msg='Timer A1 count-from date time must be advanced after reset')
self.assertEquals(count_time_b, self.orig_count_time_b1_p,
'Timer B1 count-from date time must remain the same')
self.assertEquals(count_time_q, self.orig_count_time_q1_p,
'Timer Q1 count-from date time must remain the same')
def test_reset_by_id_valid_b(self):
reset_reason = 'Test User B Resetting Timer B1'
timer_reset = self.write_timer_helper_b.reset_by_id(
self.timer_b1_p_id, reset_reason)
# Reload Timers
timer_b1_p_rel = PADSTimer.objects.get(pk=self.timer_b1_p_id)
count_time_b = timer_b1_p_rel.count_from_date_time
log_entry_b = PADSTimerReset.objects.filter(
timer_id=self.timer_b1_p_id).order_by('-date_time')[0]
timer_a1_p_rel = PADSTimer.objects.get(pk=self.timer_a1_p_id)
count_time_a = timer_a1_p_rel.count_from_date_time
timer_q1_p_rel = PADSTimer.objects.get(pk=self.timer_q1_p_id)
count_time_q = timer_q1_p_rel.count_from_date_time
ref_timestamp = timezone.now().timestamp()
# Assertions
self.assertTrue(timer_reset,
'Helper must indicate success resetting timer')
self.assertTrue(timer_b1_p_rel.running,
'Timer B1 must be running after reset')
self.assertIn(reset_reason, log_entry_b.reason,
'Timer B1\'s reset must be logged')
self.assertAlmostEqual(count_time_b.timestamp(),
ref_timestamp,
delta=1.0,
msg='Timer B1 count-from date time must be advanced after reset')
self.assertEquals(count_time_a, self.orig_count_time_a1_p,
'Timer A1 count-from date time must remain the same')
self.assertEquals(count_time_q, self.orig_count_time_q1_p,
'Timer Q1 count-from date time must remain the same')
def test_reset_by_id_wrong_user_q(self):
reset_reason = 'Test User Q Resetting Timer A1'
timer_reset = self.write_timer_helper_q.reset_by_id(
self.timer_a1_p_id, reset_reason)
reset_logged = PADSTimerReset.objects.filter(
timer_id=self.timer_a1_p_id,
reason__icontains=reset_reason).exists()
# Assertions
self.assertFalse(timer_reset,
'Helper must indicate failure to reset other Users\' Timers')
self.assertTrue(self.timers_cfdts_unchanged(),
'Count-from date times of all Test Timers must not change')
self.assertFalse(reset_logged, 'Failed Timer reset must not be logged')
def test_reset_by_id_signed_out(self):
write_timer_helper = PADSWriteTimerHelper()
reset_reason = 'Signed Out User Resetting Timer A1'
timer_reset = write_timer_helper.reset_by_id(
self.timer_a1_p_id, reset_reason)
reset_logged = PADSTimerReset.objects.filter(
reason__icontains=reset_reason).exists()
# Assertions
self.assertFalse(timer_reset,
'Helper must indicate failure to reset Timer')
self.assertTrue(self.timers_cfdts_unchanged(),
'Count-from date times of all Test Timers must not change')
self.assertFalse(reset_logged, 'Failed Timer reset must not be logged')
def test_reset_by_id_bad_reason_a(self):
# First-stage Assertions
for i in bad_str_inputs.values():
timer_reset = self.write_timer_helper_a.reset_by_id(
self.timer_a1_p_id, i)
self.assertFalse(timer_reset,
'Helper must indicate failed Timer reset with invalid reason')
# Second-stage Assertions
self.assertTrue(self.timers_cfdts_unchanged(),
'Count-from date times of all Test Timers must not change')
def test_reset_by_id_historical(self):
# Create new historical Timer for Test QL User
timer_q2_ph_desc = 'Test Timer Q2 by QL User (Pub/Historical)'
reset_reason = 'Test QL User attempting to reset historical Timer'
timer_q2_id = self.write_timer_helper_q.new(
timer_q2_ph_desc, historical=True)
timer_q2 = PADSTimer.objects.get(pk=timer_q2_id)
timer_reset = self.write_timer_helper_q.reset_by_id(
timer_q2_id, reset_reason)
orig_count_time_q2 = timer_q2.count_from_date_time
timer_q2_rel = PADSTimer.objects.get(pk=timer_q2_id) # Reload
reset_logged = PADSTimerReset.objects.filter(
timer_id=timer_q2_id, reason__icontains=reset_reason).exists()
# Assertions
self.assertFalse(timer_reset)
self.assertEquals(timer_q2_rel.count_from_date_time,
orig_count_time_q2)
self.assertFalse(reset_logged,'Failed Timer resets must not be logged')
def test_reset_by_id_invalid_id(self):
reset_reason = 'Test User A resetting non-existent Timer'
# First-stage Assertions
for i in bad_str_inputs.values():
timer_reset = self.write_timer_helper_a.reset_by_id(
i, reset_reason)
self.assertFalse(timer_reset,
'Helper must indicate failed reset with invalid Timer id')
reset_logged = PADSTimerReset.objects.filter(
reason__icontains=reset_reason).exists()
# Second-stage Assertions
self.assertTrue(self.timers_cfdts_unchanged(),
'Count-from date times of all Test Timers must not change')
self.assertFalse(reset_logged,
'Failed Timer resets must not be logged')
class PADSWriteTimerHelperStopByIdTests(TestCase):
@classmethod
def setUpTestData(cls):
# Set up test Users and Timer Helpers
# Set Up Test Users and Write Timer Helpers
# Test User A
username_a = 'test-jess-thsbi'
password_a = ' <PASSWORD>'
cls.user_a = write_user_helper.prepare_user_in_db(
username_a, password_a)
cls.user_a.save()
cls.write_timer_helper_a = PADSWriteTimerHelper(cls.user_a.id)
# Test User B
username_b = 'not-jess'
password_b = <PASSWORD>
cls.user_b = write_user_helper.prepare_user_in_db(
username_b, password_b)
cls.user_b.save()
cls.write_timer_helper_b = PADSWriteTimerHelper(cls.user_b.id)
# Test Quick List User
cls.user_q = write_user_helper.prepare_ql_user_in_db()[0]
cls.user_q.save()
cls.write_timer_helper_q = PADSWriteTimerHelper(cls.user_q.id)
# Set Up Test Timers, remember original count-from date time
# Public Timers are used in this test to ensure that the Helpers
# are able to tell between granting write and read access.
# Test User A's Timer
timer_a1_p_desc | |
ccs_seq[i] for i in reads if i is not None]
jobs.append(pool.apply_async(scan_ccs_chunk, (chunk, is_canonical)))
pool.close()
prog = ProgressBar()
finished_cnt = 0
reads_count = defaultdict(int)
short_reads = []
with open('{}/{}.cand_circ.fa'.format(out_dir, prefix), 'w') as out:
for job in jobs:
finished_cnt += 1
tmp_cnt, tmp_short, ret = job.get()
for key, value in tmp_cnt.items():
reads_count[key] += value
short_reads += tmp_short
for read_id, circ_id, strand, cir_exon_tag, ss_id, clip_info, segments, circ_seq in ret:
out.write('>{}\t{}\t{}\t{}\t{}\t{}\t{}\n{}\n'.format(
read_id, circ_id, strand, cir_exon_tag, ss_id, clip_info, segments, circ_seq
))
prog.update(100 * finished_cnt / len(jobs))
pool.join()
prog.update(100)
return reads_count, short_reads
def recover_ccs_chunk(chunk, is_canonical):
reads_cnt = defaultdict(int)
ret = []
for read_id, segments, ccs, raw in chunk:
# Remove other mapped region that intersect with ccs
seg_st = int(segments.split(';')[0].split('-')[0])
seg_en = int(segments.split(';')[-1].split('-')[1])
ccs_hit = get_primary_alignment(env.ALIGNER.map(ccs * 2))
if ccs_hit is None or seg_en - seg_st < ccs_hit.q_en - ccs_hit.q_st:
continue
reads_cnt['ccs_mapped'] += 1
# Find back-spliced junction site
circ, junc = find_bsj(ccs)
# Candidate alignment situation, more than 85%
circ_hit = get_primary_alignment(env.ALIGNER.map(circ))
if circ_hit is None:
continue
clipped_circ, circ_start, circ_end, clip_info = align_clip_segments(circ, circ_hit)
if circ_start is None or circ_end is None:
continue
clip_base = clip_info[2]
if clip_base > 0.15 * len(ccs) or clip_base > 20:
continue
reads_cnt['bsj'] += 1
# Retrive circRNA positions, convert minimap2 position to real position
host_strand = find_host_gene(circ_hit.ctg, circ_start, circ_end)
ss_site, us_free, ds_free, tmp_signal = find_annotated_signal(circ_hit.ctg, circ_start, circ_end, clip_base, clip_base + 10)
if ss_site is None:
ss_site = find_denovo_signal(circ_hit.ctg, circ_start, circ_end, host_strand, tmp_signal,
us_free, ds_free, clip_base, clip_base + 10, 3, True)
if ss_site is None:
ss_id = 'NA'
strand = 'NA'
correction_shift = 0
else:
reads_cnt['signal'] += 1
ss_id, strand, us_shift, ds_shift = ss_site
circ_start += us_shift
circ_end += ds_shift
correction_shift = min(max(us_shift, us_free), ds_free)
circ_id = '{}:{}-{}'.format(circ_hit.ctg, circ_start + 1, circ_end)
# Get Cirexons
cir_exons = get_blocks(circ_hit)
cir_exons = merge_clip_exon(cir_exons, clip_info)
cir_exons[0][0] = circ_start
cir_exons[-1][1] = circ_end
cir_exon_tag = []
for cir_exon_start, cir_exon_end, cir_exon_length in cir_exons:
cir_exon_tag.append('{}-{}|{}'.format(cir_exon_start + 1, cir_exon_end, cir_exon_length))
# BSJ correction for 5' prime region
circ_seq = clipped_circ if circ_hit.strand > 0 else revcomp(clipped_circ)
circ_seq = circ_seq[correction_shift:] + circ_seq[:correction_shift]
ret.append((
read_id, circ_id, strand, ','.join(cir_exon_tag), ss_id,
'{}|{}-{}'.format(junc, clip_base, len(circ)), segments, circ_seq
))
return reads_cnt, ret
def recover_ccs_reads(short_reads, ref_fasta, ss_index, gtf_index, intron_index, is_canonical, out_dir, prefix, threads):
from bwapy import BwaAligner
# Second scanning of short reads
genome = Fasta(ref_fasta)
options = '-x ont2d -T 19'
bwa_aligner = Aligner(BwaAligner(ref_fasta, options=options))
chunk_size = 250
jobs = []
pool = Pool(threads, env.initializer, (bwa_aligner, genome.contig_len, genome, gtf_index, intron_index, ss_index))
for reads in grouper(short_reads, chunk_size):
chunk = [i for i in reads if i is not None]
jobs.append(pool.apply_async(recover_ccs_chunk, (chunk, is_canonical)))
pool.close()
prog = ProgressBar()
prog.update(0)
finished_cnt = 0
reads_count = defaultdict(int)
with open('{}/{}.cand_circ.fa'.format(out_dir, prefix), 'a') as out:
for job in jobs:
finished_cnt += 1
tmp_cnt, ret = job.get()
for key, value in tmp_cnt.items():
reads_count[key] += value
for read_id, circ_id, strand, cir_exon_tag, ss_id, clip_info, segments, circ_seq in ret:
out.write('>{}\t{}\t{}\t{}\t{}\t{}\t{}\n{}\n'.format(
read_id, circ_id, strand, cir_exon_tag, ss_id, clip_info, segments, circ_seq
))
prog.update(100 * finished_cnt / len(jobs))
pool.join()
prog.update(100)
return reads_count
def check_read(segments, seq):
from spoa import poa
fasta = [seq[int(i.split('-')[0]):int(i.split('-')[1])] for i in segments.split(';')]
poa(fasta, 1, True, -1, -1, -1, -1, -1)
def scan_raw_chunk(chunk, is_canonical, circ_reads):
reads_cnt = defaultdict(int)
ret = []
short_reads = []
for read_id, seq in chunk:
if read_id in circ_reads:
continue
# API for short reads
if len(seq) < 300:
short_reads.append((read_id, seq))
continue
# Remove reads that have ambiguous mapping
raw_hits = sorted([i for i in env.ALIGNER.map(seq) if i.is_primary], key=lambda x: [x.q_st, x.q_en])
if len(raw_hits) == 0:
continue
elif len(raw_hits) == 1:
raw_hit = remove_long_insert(raw_hits[0])
if raw_hit.mlen < len(seq) * .45 or raw_hit.mlen > len(seq) - 50:
continue
if raw_hit.q_st < 50 and raw_hit.q_en > len(seq) - 50:
continue
circ, junc = find_bsj(seq)
if junc is None:
continue
elif len(raw_hits) == 2:
head, tail = remove_long_insert(raw_hits[0]), remove_long_insert(raw_hits[1])
if head.ctg != tail.ctg:
continue
if not head.q_st + head.mlen * 0.45 < tail.q_st:
continue
if head.r_en - 20 < tail.r_st:
continue
if head.q_en < tail.q_st - 50:
continue
circ, junc = find_bsj(seq)
if junc is None or junc < head.q_en - 10 or junc > tail.q_st + 10:
continue
else:
continue
circ_hits = sorted([remove_long_insert(i) for i in env.ALIGNER.map(circ) if i.is_primary], key=lambda x: [x.q_st, x.q_en])
if len(circ_hits) == 0:
continue
elif len(circ_hits) == 1:
circ_hit = circ_hits[0]
if circ_hit.mlen <= max([i.mlen for i in raw_hits]):
continue
if min(junc, len(seq) - junc) < 30:
continue
if not junc + circ_hit.q_st < len(seq) < junc + circ_hit.q_en:
continue
circ_ctg, circ_start, circ_end, circ_strand = circ_hit.ctg, circ_hit.r_st, circ_hit.r_en, circ_hit.strand
clip_base = circ_hit.q_st + len(seq) - circ_hit.q_en
cir_exons = get_parital_blocks(circ_hit, len(seq) - junc)
elif len(circ_hits) == 2:
head, tail = circ_hits[0], circ_hits[1]
if head.ctg != tail.ctg or head.strand != tail.strand:
continue
if not head.q_st + (head.q_en - head.q_st) * 0.5 < tail.q_st:
continue
if head.r_en - 20 < tail.r_st:
continue
if head.q_en < tail.q_st - 20:
continue
circ_ctg, circ_start, circ_end, circ_strand = head.ctg, tail.r_st, head.r_en, head.strand
clip_base = abs(tail.q_st - head.q_en)
head_exons = get_blocks(head)
tail_exons = get_blocks(tail)
cir_exons = merge_exons(tail_exons, head_exons)
circ = circ[tail.q_st:] + circ[:tail.q_st]
else:
continue
if clip_base > 20:
continue
# Retrive circRNA positions, convert minimap2 position to real position
host_strand = find_host_gene(circ_ctg, circ_start, circ_end)
try:
ss_site, us_free, ds_free, tmp_signal = find_annotated_signal(circ_ctg, circ_start, circ_end, clip_base, clip_base + 10)
except Exception as e:
print(e)
if ss_site is None:
ss_site = find_denovo_signal(circ_ctg, circ_start, circ_end, host_strand, tmp_signal,
us_free, ds_free, clip_base, clip_base + 10, 3, True)
if ss_site is None:
strand = 'NA'
ss_id = 'NA'
correction_shift = 0
else:
ss_id, strand, us_shift, ds_shift = ss_site
circ_start += us_shift
circ_end += ds_shift
correction_shift = min(max(us_shift, -us_free), ds_free)
circ_id = '{}:{}-{}'.format(circ_ctg, circ_start + 1, circ_end)
cir_exons[0][0] = circ_start
cir_exons[-1][1] = circ_end
cir_exon_tag = []
for cir_exon_start, cir_exon_end, cir_exon_len in cir_exons:
cir_exon_tag.append('{}-{}|{}'.format(cir_exon_start, cir_exon_end, cir_exon_len))
circ_seq = circ if circ_strand > 0 else revcomp(circ)
circ_seq = circ_seq[correction_shift:] + circ_seq[:correction_shift]
ret.append((
read_id, circ_id, strand, ','.join(cir_exon_tag), ss_id, '{}|{}-NA'.format(junc, clip_base), 'partial', circ_seq
))
reads_cnt['partial'] += 1
return reads_cnt, ret, short_reads
def scan_raw_reads(in_file, ref_fasta, gtf_index, intron_index, ss_index, is_canonical, out_dir, prefix, threads):
import gzip
import mappy as mp
from CIRI_long.utils import to_str
circ_reads = {}
with open('{}/{}.cand_circ.fa'.format(out_dir, prefix), 'r') as f:
for line in f:
read_id = line.rstrip().split()[0].lstrip('>')
circ_reads[read_id] = 1
f.readline()
# Auto detect input format
is_fastq = 1
is_gz = 1
if in_file.endswith('.fa') or in_file.endswith('.fasta'):
is_fastq = 0
is_gz = 0
fq = open(in_file, 'r')
elif in_file.endswith('.fa.gz') or in_file.endswith('.fasta.gz'):
is_fastq = 0
is_gz = 1
fq = gzip.open(in_file, 'rb')
elif in_file.endswith('.fq') or in_file.endswith('.fastq'):
is_gz = 0
fq = open(in_file, 'r')
elif in_file.endswith('.fq.gz') or in_file.endswith('.fastq.gz'):
fq = gzip.open(in_file, 'rb')
else:
sys.exit('Wrong format of input')
# Prepare aligners
faidx = Faidx(ref_fasta)
contig_len = faidx.contig_len
faidx.close()
aligner = mp.Aligner(ref_fasta, n_threads=threads, preset='splice')
jobs = []
pool = Pool(threads, env.initializer, (aligner, contig_len, aligner, gtf_index, intron_index, ss_index))
# Init jobs
chunk = []
chunk_size = 1000
for line in fq:
if is_gz:
header = to_str(line).rstrip().split(' ')[0]
seq = to_str(fq.readline()).rstrip()
else:
header = line.rstrip().split(' ')[0]
seq = fq.readline().rstrip()
if is_fastq:
header = header.lstrip('@')
fq.readline()
fq.readline()
else:
header = header.lstrip('>')
# Split into chunks
chunk.append((header, seq))
if len(chunk) == chunk_size:
jobs.append(pool.apply_async(scan_raw_chunk, (chunk, is_canonical, circ_reads)))
chunk = []
if len(chunk) > 0:
jobs.append(pool.apply_async(scan_raw_chunk, (chunk, is_canonical, circ_reads)))
pool.close()
# Receive results
reads_cnt = defaultdict(int)
prog = ProgressBar()
prog.update(0)
finished_job = 0
short_reads = []
with open('{}/{}.low_confidence.fa'.format(out_dir, prefix), 'w') as out:
for job in jobs:
tmp_cnt, tmp_ret, tmp_short = job.get()
for key, value in tmp_cnt.items():
reads_cnt[key] += value
short_reads += tmp_short
for read_id, circ_id, strand, cir_exon_tag, ss_id, clip_info, segments, circ_seq in tmp_ret:
out.write('>{}\t{}\t{}\t{}\t{}\t{}\t{}\n{}\n'.format(
read_id, circ_id, strand, cir_exon_tag, ss_id, clip_info, segments, circ_seq
))
finished_job += 1
prog.update(100 * finished_job // len(jobs))
prog.update(100)
pool.join()
return reads_cnt, short_reads
# def recover_raw_chunk(chunk, is_canonical):
# reads_cnt = defaultdict(int)
#
# ret = []
# for read_id, seq in chunk:
# raw_hits = ALIGNER.map(seq)
# if raw_hits is None:
# continue
# raw_hits = sorted([i for i in raw_hits if i.is_primary], key=lambda x: [x.q_st, x.q_en])
# if len(raw_hits) == 0:
# continue
#
# if len(raw_hits) == 1:
# raw_hit = raw_hits[0]
# if raw_hit.q_en | |
matplotlib axes containing a plot of the surface.
Parameters
==========
ax : Axes
An existing matplotlib axes to plot to.
plot_kwargs : dict
Arguments to be passed to Axes.plot().
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
ax.set_ylabel('Vertical Position [m]')
ax.set_xlabel('Horizontal Position [m]')
ax.plot(self.x, self.y, **plot_kwargs)
# TODO : These two lines probably only need to be set if ax is None.
ax.set_aspect('equal')
ax.grid()
return ax
class HorizontalSurface(Surface):
def __init__(self, height, length, start=0.0, num_points=100):
"""Instantiates a class that represents a horizontal surface at a
height above the x axis.abs
Parameters
==========
height : float
The height of the surface above the horizontal x axis in meters.
length : float
The length of the surface in meters.
start : float, optional
The x location of the start of the left most point of the surface.
num_points : integer, optional
The number of (x,y) coordinates.
"""
x = np.linspace(start, start + length, num=num_points)
y = height * np.ones_like(x)
super(HorizontalSurface, self).__init__(x, y)
def distance_from(self, xp, yp):
"""Returns the shortest distance from point (xp, yp) to the surface.
Parameters
==========
xp : float
The horizontal, x, coordinate of the point.
yp : float
The vertical, y, coordinate of the point.
Returns
=======
distance : float
The shortest distance from the point to the surface. If the point
is above the surface a positive distance is returned, else a
negative distance.
"""
return yp - self.y[0]
class FlatSurface(Surface):
"""Class that represents a flat surface angled relative to the
horizontal."""
def __init__(self, angle, length, init_pos=(0.0, 0.0), num_points=100):
"""Instantiates a flat surface that is oriented at a counterclockwise
angle from the horizontal.
Parameters
==========
angle : float
The angle of the surface in radians. Counterclockwise (about z) is
positive, clockwise is negative.
length : float
The distance in meters along the surface from the initial position.
init_pos : 2-tuple of floats, optional
The x and y coordinates in meters that locate the start of the
surface.
num_points : integer, optional
The number of points used to define the surface coordinates.
"""
if angle >= np.pi / 2.0 or angle <= -np.pi / 2.0:
raise InvalidJumpError('Angle must be between -90 and 90 degrees')
self._angle = angle
x = np.linspace(init_pos[0], init_pos[0] + length * np.cos(angle),
num=num_points)
y = np.linspace(init_pos[1], init_pos[1] + length * np.sin(angle),
num=num_points)
super(FlatSurface, self).__init__(x, y)
@property
def angle(self):
"""Returns the angle wrt to horizontal in radians of the surface."""
return self._angle
def distance_from(self, xp, yp):
"""Returns the shortest distance from point (xp, yp) to the surface.
Parameters
==========
xp : float
The horizontal, x, coordinate of the point.
yp : float
The vertical, y, coordinate of the point.
Returns
=======
distance : float
The shortest distance from the point to the surface. If the point
is above the surface a positive distance is returned, else a
negative distance.
"""
if compute_dist_from_flat is None:
m = np.tan(self.angle)
d = (yp - m * xp) * np.cos(self.angle)
return d
else:
return compute_dist_from_flat(self.angle, xp, yp)
class ClothoidCircleSurface(Surface):
"""Class that represents a surface made up of a circle bounded by two
clothoids."""
def __init__(self, entry_angle, exit_angle, entry_speed, tolerable_acc,
init_pos=(0.0, 0.0), gamma=0.99, num_points=200):
"""Instantiates a clothoid-circle-clothoid curve.
Parameters
==========
entry_angle : float
The entry angle tangent to the start of the left clothoid in
radians.
exit_angle : float
The exit angle tangent to the end of the right clothoid in radians.
entry_speed : float
The magnitude of the skier's velocity in meters per second as they
enter the left clothiod.
tolerable_acc : float
The tolerable normal acceleration of the skier in G's.
init_pos : 2-tuple of floats
The x and y coordinates of the start of the left clothoid.
gamma : float
Fraction of circular section.
num_points : integer, optional
The number of points in each of the three sections of the curve.
"""
self.entry_angle = entry_angle
self.exit_angle = exit_angle
self.entry_speed = entry_speed
self.tolerable_acc = tolerable_acc
self.init_pos = init_pos
self.gamma = gamma
self.num_points = num_points
X, Y = self._create_surface()
super(ClothoidCircleSurface, self).__init__(X, Y)
def _create_surface(self):
# TODO : Break this function into smaller functions.
lam = -self.entry_angle
beta = self.exit_angle
rotation_clothoid = (lam - beta) / 2
# used to rotate symmetric clothoid so that left side is at lam and
# right sid is at beta
# radius_min is the radius of the circular part of the transition.
# Every other radius length (in the clothoid) will be longer than that,
# as this will ensure the g - force felt by the skier is always less
# than a desired value. This code ASSUMES that the velocity at the
# minimum radius is equal to the velocity at the end of the approach.
radius_min = self.entry_speed**2 / (self.tolerable_acc * GRAV_ACC)
# x,y data for circle
thetaCir = 0.5 * self.gamma * (lam + beta)
xCirBound = radius_min * np.sin(thetaCir)
xCirSt = -radius_min * np.sin(thetaCir)
xCir = np.linspace(xCirSt, xCirBound, num=self.num_points)
# x,y data for one clothoid
A_squared = radius_min**2 * (1 - self.gamma) * (lam + beta)
A = np.sqrt(A_squared)
clothoid_length = A * np.sqrt((1 - self.gamma) * (lam + beta))
# generates arc length points for one clothoid
s = np.linspace(clothoid_length, 0, num=self.num_points)
X1 = s - (s**5) / (40*A**4) + (s**9) / (3456*A**8)
Y1 = (s**3) / (6*A**2) - (s**7) / (336*A**6) + (s**11) / (42240*A**10)
X2 = X1 - X1[0]
Y2 = Y1 - Y1[0]
theta = (lam + beta) / 2
X3 = np.cos(theta)*X2 + np.sin(theta)*Y2
Y3 = -np.sin(theta)*X2 + np.cos(theta)*Y2
X4 = X3
Y4 = Y3
X5 = -X4 + 2*X4[0]
Y5 = Y4
X4 = X4 - radius_min*np.sin(thetaCir)
Y4 = Y4 + radius_min*(1 - np.cos(thetaCir))
X4 = X4[::-1]
Y4 = Y4[::-1]
X5 = X5 + radius_min*np.sin(thetaCir)
Y5 = Y5 + radius_min*(1 - np.cos(thetaCir))
# stitching together clothoid and circular data
xLCir = xCir[xCir <= 0]
yLCir = radius_min - np.sqrt(radius_min**2 - xLCir**2)
xRCir = xCir[xCir >= 0]
yRCir = radius_min - np.sqrt(radius_min**2 - xRCir**2)
X4 = np.hstack((X4, xLCir[1:-1]))
Y4 = np.hstack((Y4, yLCir[1:-1]))
X5 = np.hstack((xRCir[0:-2], X5))
Y5 = np.hstack((yRCir[0:-2], Y5))
X6 = np.cos(rotation_clothoid)*X4 + np.sin(rotation_clothoid)*Y4
Y6 = -np.sin(rotation_clothoid)*X4 + np.cos(rotation_clothoid)*Y4
X7 = np.cos(rotation_clothoid)*X5 + np.sin(rotation_clothoid)*Y5
Y7 = -np.sin(rotation_clothoid)*X5 + np.cos(rotation_clothoid)*Y5
X = np.hstack((X6, X7))
Y = np.hstack((Y6, Y7))
# Shift the entry point of the curve to be at X=0, Y=0.
X -= np.min(X)
Y -= Y[np.argmin(X)]
# Shift the entry point of the curve to be at the end of the flat
# surface.
X += self.init_pos[0]
Y += self.init_pos[1]
return X, Y
class TakeoffSurface(Surface):
"""Class that represents a surface made up of a circle bounded by two
clothoids with a flat exit surface."""
def __init__(self, skier, entry_angle, exit_angle, entry_speed,
time_on_ramp=0.25, gamma=0.99, init_pos=(0.0, 0.0),
num_points=200):
"""Instantiates the takeoff curve with the flat takeoff ramp added to
the terminus of the clothoid-circle-clothoid curve.
Parameters
==========
skier : Skier
A skier instance.
entry_angle : float
The entry angle tangent to the start of the left clothoid in
radians.
exit_angle : float
The exit angle tangent to the end of the right clothoid in radians.
entry_speed : float
The magnitude of the skier's velocity in meters per second as they
enter the left clothiod.
time_on_ramp : float, optional
The time in seconds that the skier should be on the takeoff ramp
before launch.
gamma : float, optional
Fraction of circular section.
init_pos : 2-tuple of floats, optional
The x and y coordinates of the start of the left clothoid.
num_points : integer, optional
The number of points in each of the three sections of the curve.
"""
self.skier = skier
self.entry_angle = entry_angle
self.exit_angle = exit_angle
self.entry_speed = entry_speed
self.time_on_ramp = time_on_ramp
self.gamma = gamma
self.init_pos = init_pos
self.num_points = num_points
clt_cir_clt = ClothoidCircleSurface(entry_angle, exit_angle,
entry_speed,
skier.tolerable_sliding_acc,
init_pos=init_pos, gamma=gamma,
num_points=num_points)
ramp_entry_speed = skier.end_speed_on(clt_cir_clt,
init_speed=self.entry_speed)
ramp_len = time_on_ramp * ramp_entry_speed # meters
start_x = clt_cir_clt.x[-1]
start_y = clt_cir_clt.y[-1]
points_per_meter = | |
<reponame>psuchand/capstone-taxi
##############################################
#Helper functions
#Plot profit for given cabbies
##############################################
import pandas as pd
import numpy as np
def plot_profit_for_drivers(drivers):
"""
If cabbies is a list, we plot the profit
(calculated via the rides dataframe)
for drivers in that list.
If drivers is not a list, it is a dataframe,
and its index is the required list.
"""
import numpy as np
drivers_list = []
if type(drivers) in [list,np.ndarray] :
drivers_list = drivers
else:
drivers_list = drivers.index
#Calculate profit per hack_license
df = rides[rides.hack_license.isin(drivers_list)]
df = df.groupby('hack_license')['profit'].sum()
print("Mean profit = %.2f"%df.mean())
print("Median profit = %.2f"%df.median())
print("Profit 10 percent quantile = %.2f\nProfit 90 percent quantile = %.2f\n"%(df.quantile(.1), df.quantile(.9)))
#Histogram. X-axis is profit, Y is number of cabbies
df.hist(bins=40, normed = True)
def plot_wage_for_drivers(drivers):
"""
If cabbies is a list, we plot their hourly wage
(calculated via the rides and wage dataframe)
for drivers in that list.
If drivers is not a list, it is a dataframe,
and its index is the required list.
"""
drivers_list = []
if type(drivers) in [list,np.ndarray] :
drivers_list = drivers
else:
drivers_list = drivers.index
#Calculate profit per hack_license
df = rides[rides.hack_license.isin(drivers_list)]
df = df.groupby('hack_license')['profit'].sum()
print("Mean profit = %.2f"%df.mean())
print("Median profit = %.2f"%df.median())
print("Profit 10 percent quantile = %.2f\nProfit 90 percent quantile = %.2f\n"%(df.quantile(.1), df.quantile(.9)))
#Histogram. X-axis is profit, Y is number of cabbies
df.hist(bins=40, normed = True)
#Minimum number of rides in a location, before it can be considered frequented
MIN_CLUSTER = 5
def frequented_pickup_locations(df, top_percent = .9):
"""
Given a dataframe, return ordered pairs of
the locations the most frequently occuring locations, as
determined by the given quantile.
"""
X = df[['pos']].groupby('pos').size()
X = X[X > MIN_CLUSTER]
upper_quantile = X.quantile(top_percent)
X = X[X >= upper_quantile]
#Print statistics
print("Statistics for table. X = position frequented by driver, Y = #pickups.")
print X.describe()
print("\n")
return X
def locations_frequented_by_drivers(df, drivers, top_percent = .9):
"""
This function determines locations frequented by the given top_percent of drivers.
rides is the main dataset of all drivers.
"""
import numpy as np
drivers_list = []
if type(drivers) in [list,np.ndarray] :
drivers_list = drivers
else:
drivers_list = drivers.index
return frequented_pickup_locations(df[df.hack_license.isin(drivers_list)], top_percent=top_percent)
def locations_frequented_by_most_profitable_cabbies(df):
"""
Return locations frequented by the most profitable cabbies.
"""
profit_by_rider = rides[['hack_license', 'profit']].groupby('hack_license').sum()
upper_quantile = profit_by_rider.quantile(.9)
most_profitable_riders = profit_by_rider[profit_by_rider >= upper_quantile]
return frequented_pickup_locations(df[df.hack_license.isin(most_profitable_riders.index)])
def locations_frequented_by_least_profitable_cabbies(df):
"""
Return locations frequented by the least profitable cabbies.
"""
profit_by_rider = rides[['hack_license', 'profit']].groupby('hack_license').sum()
upper_quantile = profit_by_rider.quantile(.1)
least_profitable_riders = profit_by_rider[profit_by_rider <= upper_quantile]
return frequented_pickup_locations(df[df.hack_license.isin(least_profitable_riders.index)])
#Determine the fraction of a driver's fares that come from a given set of locations
def percent_fares_from_given_positions(X, good_positions):
"""
This function determines the percent of a given driver's fares come from the given
collection of good positions.
Details:
X is a dataframe with keys
hack_license, pickup_longitude, pickup_latitude
This function does NOT round gps coordinates before processing.
"""
df = X[['hack_license', 'pos']]
gb = df.groupby('hack_license')
df = gb.apply(lambda z: z['pos'].isin(good_positions.index))
df = df.reset_index()
del df['level_1']
return df.groupby('hack_license').apply(lambda z: z.mean())
#A threshold value used for data cleanup
MIN_PICKUPS = 1
def cleanup(df):
"""
1) Remove all cabbies that haven't made more than MIN_PICKUPS pickups
2.) Only keep drivers whose #pickups made is within 2 standard deviations
of the median.
"""
riders = df['hack_license'].value_counts()
mean = riders.mean()
std = riders.std()
riders = riders[riders <= (mean + 2*std)]
riders = riders[riders >= (mean - 2*std)]
riders = riders[riders >= MIN_PICKUPS]
riders = riders.index
rides = df[df.hack_license.isin(riders)]
#Clean up by fare amount and tip amount.
MAX_RIDE_TIME = rides.trip_time_in_secs.quantile(.99)
rides = rides[(rides.trip_time_in_secs < MAX_RIDE_TIME) & (rides.trip_time_in_secs > 0)]
MAX_TIP = rides.tip_amount.quantile(.99)
rides = rides[(rides.tip_amount < MAX_RIDE_TIME) & (rides.tip_amount > 0)]
MAX_FARE = rides.fare_amount.quantile(.99)
rides = rides[(rides.fare_amount < MAX_RIDE_TIME) & (rides.fare_amount > 0)]
print "Returned %d rows"%len(rides.index)
return rides
#Plot profit for drivers that frequent good positions
FREQUENTING_THRESHOLD = .5
NOT_FREQUENTING_THRESHOLD = .2
def plot_profit_for_riders_frequenting_and_not_frequenting_good_positions(rides, good_positions):
"""
Plot profit for riders frequenting, and not frequenting good positions
"""
df = percent_fares_from_given_positions(rides, good_positions)
#Plot profit for drivers that frequent good positions
print df.head()
drivers_frequenting = df[df.pos >= FREQUENTING_THRESHOLD]
drivers_not_frequenting = df[df.pos <= NOT_FREQUENTING_THRESHOLD]
print("drivers_frequenting")
print drivers_frequenting.describe()
plot_profit_for_drivers(drivers_frequenting)
print("drivers_not_frequenting")
print drivers_not_frequenting.describe()
plot_profit_for_drivers(drivers_not_frequenting)
##############################################
#Initialization
def plot_points(coords):
"""
Given a collection of points, plot them.
"""
#Plot a given set of gps coordinates on the map
import matplotlib
matplotlib.rcParams['figure.figsize'] = (40,30)
#Wall Street and Broadway
lat_0 = 40.707854
lon_0 = -74.011536
GPS_COORDS_LONGS, GPS_COORDS_LATS = zip(*coords.tolist())
GPS_COORDS_LONGS = [float(z) for z in GPS_COORDS_LONGS]
GPS_COORDS_LATS = [float(z) for z in GPS_COORDS_LATS]
my_map = Basemap(projection='merc', lat_0=lat_0, lon_0=lon_0,
resolution = 'h', area_thresh = .1,
llcrnrlon = llcrnrlon, llcrnrlat = llcrnrlat,
urcrnrlon = urcrnrlon, urcrnrlat = urcrnrlat)
longs, lats = my_map(GPS_COORDS_LONGS, GPS_COORDS_LATS)
print "Number of points: ", len(longs)
my_map.drawmapboundary()
my_map.readshapefile(DATA_DIR + r"gadm-us/NewYork-shp/shape/roads", "osm-nyc")
my_map.plot(longs, lats, 'ro', markersize = 10, alpha = 1, label = "Positions with least waiting time (<= 1 min)")
plt.legend(fontsize = 'xx-large')
plt.title("Locations for Taxi drivers to pick up customers with least waiting time (near 106th and Broadway)")
plt.show
from bokeh.io import output_file, show
from bokeh.models import (GMapPlot, GMapOptions, ColumnDataSource, Circle, DataRange1d, PanTool, WheelZoomTool, BoxSelectTool
)
def coord_from_string(coord_string):
"""
Coordinates are encoded as strings, convert
back to coordinates.
"""
s = coord_string
try:
return [float(z) for z in s]
except Exception, e:
replace = list("(,)\'\"")
for t in replace:
s = s.replace(t, " ")
return [float(z) for z in s.strip().split()]
def extract_longs_lats(coords):
"""
Convert coordinates as above to a list of
longitude, latitude pairs.
"""
a = np.array([coord_from_string(z) for z in coords])
a = a.transpose()
longs = a[0]
lats = a[1]
return longs, lats
def plot_points_gmaps(coords_blue, coords_red, filename = "gmap_plot.html"):
"""
Plot a collection of points via google maps
"""
def coords_to_ColumnDataSource(coords):
"""
Convert coordinates as above to a column data source as required by Google's API
"""
longs, lats = extract_longs_lats(coords)
return ColumnDataSource(
data= dict(
lon=longs,
lat=lats
)
)
center = coord_from_string(coords_blue[0])
map_options = GMapOptions(lng=center[0], lat=center[1], map_type="roadmap", zoom=11)
plot = GMapPlot(
x_range=DataRange1d(), y_range=DataRange1d(), map_options=map_options,
api_key = "<KEY>"
)
source_blue = coords_to_ColumnDataSource(coords_blue)
source_red = coords_to_ColumnDataSource(coords_red)
circle_blue = Circle(x="lon", y="lat", size=5, fill_color="blue", fill_alpha=0.8, line_color=None)
circle_red = Circle(x="lon", y="lat", size=5, fill_color="red", fill_alpha=0.8, line_color=None)
plot.add_glyph(source_blue, circle_blue)
plot.add_glyph(source_red, circle_red)
plot.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool())
output_file(filename)
show(plot)
#Maximum number of hours between taxi rides within one shift.
MAX_BREAK = 3
def hourly_wage_df(rides):
"""
Calculate an hourly wage for each driver
"""
#Load data, make sure it is in chronological order
wage = rides.loc[:,('hack_license','pickup_datetime')]
print("Starting with num rows = ", len(wage.index))
grouped = wage.groupby('hack_license')
#Put elements of group in chronological order, then shift
f = lambda z: z.sort_values().shift(-1)
print("Calculating idle time ...")
shifted_pickup = grouped.transform(f)
#Load data, make sure it is in chronological order
wage = rides.loc[:,('hack_license','dropoff_datetime','trip_time_in_secs')]
wage['shifted_pickup'] = shifted_pickup
wage['idle_time'] = wage.shifted_pickup - wage.dropoff_datetime
#Convert idle time to seconds
print("Converting times to seconds...")
wage.loc[:,"idle_time"] = wage.loc[:,"idle_time"].apply(lambda z: float(z.total_seconds()))
#If the next trip that this driver took is before the previous dropoff, there is an error. Replace these values with 0.
wage.loc[ wage.idle_time < 0,("idle_time")] = 0
#These trips correspond to the last trip of the driver
wage.loc[wage.idle_time.isnull(), "idle_time"] = 0
#If the next trip is more than 3 hours before the previous one, assume that the driver went off shift
print("Determining when drivers went on break...")
wage.loc[wage.idle_time > MAX_BREAK*60*60, "idle_time"] = 0
#Return the wage dataset
wage = wage[['hack_license','idle_time','trip_time_in_secs']]
print("Calculating percent idle time, profit, hourly wage, ...")
wage = wage.groupby('hack_license').sum()
wage['percent_time_idle'] = 100*wage.idle_time/(wage.trip_time_in_secs + wage.idle_time)
wage['hours_worked'] = (wage['idle_time'] + wage['trip_time_in_secs'])/float(60*60)
print("Adding profit column")
df = rides[['hack_license', 'profit']].groupby('hack_license')['profit'].sum()
wage = pd.concat([df,wage], axis =1)
wage['hourly_wage'] = wage.profit/wage.hours_worked
print("Ending with num rows = ", len(wage.index))
return wage
def distance_to_dollars(x):
"""
Given a distance in miles, return the cost of getting there
"""
return 3.6*rides.trip_distance/29.0
def set_difference(A,B):
"""
Return elements of A not in B and elements of B not in A
"""
try:
return list(set(A) - set(B)), list(set(B) - set(A))
except Exception:
print ("Not hashable, trying again ... ")
Ahashable = [tuple(z) for z in A]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.