metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jlr-academy/MP-Ashley-Robinson",
"score": 3
} |
#### File: MP-Ashley-Robinson/src/database_utils.py
```python
import pymysql
import os
from dotenv import load_dotenv
from utilities import select_from_list
# Load environment variables from .env file with dotenv.load_dotenv()
# establishes connection to database outlined in .env file & creates connection and cursor objects
def initialise_db():
# Load environment variables from .env file
load_dotenv()
host = os.environ.get("mysql_host")
user = os.environ.get("mysql_user")
password = <PASSWORD>("mysql_pass")
database = os.environ.get("mysql_db")
# Establish a database connection
connection = pymysql.connect(
host,
user,
password,
database
)
#create cursor object that manages db operations
cursor = connection.cursor()
return connection, cursor
# Creates a new table in the database named in .env file.
# adds fields *table_name*_id as PK,
# adds field *table_name*_name as varchar
def create_new_table(table_name:str):
#initialise - gets env vars and creates a connection & cursor
connection, cursor = initialise_db()
#check if tables exist already and create if not
cursor.execute("SHOW TABLES")
table_exists = False
for x in cursor:
if table_name == str(x[0]):
print("The table already exists")
table_exists = True
break
#create required tables
if table_exists == False:
sql = f"CREATE TABLE {table_name}({table_name}_id INT AUTO_INCREMENT PRIMARY KEY, {table_name}_name VARCHAR(255));"
cursor.execute(sql)
#commit chnages and close
connection.commit()
cursor.close()
connection.close()
#Add specified db item to named table
#takes a list of fields and list of values
def add_to_db_table(table_name:str,fields:list,vals:list):
#initialise - gets env vars and creates a connection & cursor
connection, cursor = initialise_db()
#create fields string & values string
converted_list = [str(element) for element in fields]
fields_str = ",".join(converted_list)
converted_list = ["'" + str(element) + "'" for element in vals]
vals_str = ",".join(converted_list)
#Create SQL commands from strings
sql = f"INSERT INTO {table_name}({fields_str}) VALUES({vals_str});"
#Execute changes
cursor.execute(sql)
#commit chnages and close
connection.commit()
cursor.close()
connection.close()
#updates the db named in .envs with given SQL syntax
def db_update_SQL_syntax(SQL_syntax:str):
#initialise - gets env vars and creates a connection & cursor
connection, cursor = initialise_db()
#Create SQL commands from strings
sql = str(SQL_syntax)
#Execute changes
cursor.execute(sql)
#commit chnages and close
connection.commit()
cursor.close()
connection.close()
# returns a list of dictionaries from a specified table
def db_table_to_list_of_dics(db_table:str):
#initialise - gets env vars and creates a connection & cursor
connection, cursor = initialise_db()
#Create SQL commands from strings
sql = str(f"SELECT * FROM {db_table}")
#Execute changes
cursor.execute(sql)
#get field names from db_table
columns = cursor.description
list_of_dics = [{columns[index][0]:column for index, column in enumerate(value)} for value in cursor.fetchall()]
#commit chnages and close
connection.commit()
cursor.close()
connection.close()
return list_of_dics
def list_of_dics_to_db_table(db_table:str, list_to_upload): ### wip - may not be required
#initialise - gets env vars and creates a connection & cursor
connection, cursor = initialise_db()
#Create SQL commands from strings
sql = str(f"SELECT * FROM {db_table}")
#Execute query
cursor.execute(sql)
#get all field names from db table
field_names = [i[0] for i in cursor.description]
#iterate through list of dics and add to db if fields match
for i in list_to_upload:
for field in i:
if field in field_names:
sql = "UPDATE customers SET address = 'Canyon 123' WHERE address = 'Valley 345'"
def get_field_names(db_table):
#initialise - gets env vars and creates a connection & cursor
connection, cursor = initialise_db()
#Create SQL commands from strings
sql = str(f"SELECT * FROM {db_table}")
#Execute query
cursor.execute(sql)
#get all field names from db table
field_names = [i[0] for i in cursor.description]
connection.commit()
cursor.close()
connection.close()
return field_names
def add_new_record_in_db(table_name):
#get all field names from db table
field_names = get_field_names(table_name)
#remove auto inc primary key from list
field_names_less_pk = []
#ask for input of values for each field
vals_list = []
print("Please enter the value for each required field. ")
for i in field_names:
if i[-3:] != "_id":
field_names_less_pk.append(i)
vals_list.append(str(input(f"{i}: ")))
#Add to db table
add_to_db_table(table_name,field_names_less_pk,vals_list)
def print_db_table(table_name):
#get all field names from db table
field_names = get_field_names(table_name)
#initialise - gets env vars and creates a connection & cursor
connection, cursor = initialise_db()
#Create SQL commands from strings
sql = str(f"SELECT * FROM {table_name}")
#Execute query
cursor.execute(sql)
#iterate through table and print each row
rows = cursor.fetchall()
for row in rows:
j=0
print_str = ""
for i in row:
### calculate white space requirement for print -to do
print_str = print_str + f"{field_names[j]}: {i}, "
j=j+1
print(print_str)
#commit changes and close
connection.commit()
cursor.close()
connection.close()
#deletes a record from named table
def delete_db_record(table_name):
#Ask which item requires deleting
delete_id = int(input("Enter id number of the item you would like to delete: "))
#initialise - gets env vars and creates a connection & cursor
connection, cursor = initialise_db()
#Create SQL commands from strings
sql = str(f"DELETE FROM {table_name} WHERE {table_name}_id={delete_id};")
#Execute query
cursor.execute(sql)
#commit changes and close
connection.commit()
cursor.close()
connection.close()
def amend_db_record(table_name):
#get the item that requires updating
amend_id = input("\n Enter id number of the item you would like to amend: ")
#get the field that requires updating
field_names = get_field_names(table_name)
field_selected = select_from_list(field_names)
amend_field_str = field_names[field_selected] # to do - stop from trying to change the id number
#get the new required value
new_value = input(f"\n What would you like the new value for {amend_field_str} to be? ")
#Create SQL syntax
SQL_syntax = f"UPDATE {table_name} SET {amend_field_str} = '{new_value}' WHERE {table_name}_id = {amend_id};"
#update the database
db_update_SQL_syntax(SQL_syntax)
### below used to create dummy data
''' create_new_table("couriers")
create_new_table("products")
courier_fields_list =["couriers_name", "contact"]
product_field_list =["products_name","price"]
add_to_db("couriers",courier_fields_list,["DHL","07563355888"])
add_to_db("couriers",courier_fields_list,["Parcelforce","07563355999"])
add_to_db("products",product_field_list,["Pepsi","£0.75"])
add_to_db("products",product_field_list,["Sandwich","£2.50"])
add_to_db("products",product_field_list,["Crisps","£0.90"]) '''
```
#### File: MP-Ashley-Robinson/src/menus.py
```python
from utilities import print_list, create_order, del_list_item, amend_list_item, amend_order_status, print_db_table, add_new_record_in_db, delete_db_record, amend_db_record, display_orders
from datetime import datetime
import os
from cafe_ASCII_art import cafe_banner
##### MENUS #####
#welcome note
def welcome_note(): #####WIP
if datetime.now().time() < 12:
return "Good Morning"
elif datetime.now().time() > 16.30:
return "Good Evening"
else:
return "Good afternoon"
#Start menu
def start_menu_choice():
clear_screen()
print('\nWhat would you like review? ')
print('''
1 Inventory
2 Couriers
3 Customers
4 Orders
0 Exit app''')
choicemade = int(input())
return choicemade
#product menu
def products_menu_choice():
choicemade = 1
while choicemade !=0:
clear_screen()
print("What would you like to do with your products?")
print('''
1 See Products
2 Amend Products
3 Create New products
0 Exit to Main Menu
''')
choicemade = int(input())
if choicemade == 1: # see list
clear_screen()
print_db_table("products")
input('Press any key to continue ')
elif choicemade == 2: # amend list item
clear_screen()
print('This is how your product list currently looks')
print_db_table("products")
ans = input('Would you like to amend (A) or delete (D) a product? ')
if ans.upper() == "D":
#del_list_item(products_list)
delete_db_record("products")
else:
#amend_product()
amend_db_record("products")
elif choicemade == 3: # append list
add_new_record_in_db("products")
elif choicemade == 0:
return
else:
print('Invalid choice. Try again')
#Courier menu
def couriers_menu_choice():
choicemade = 1
while choicemade !=0:
clear_screen()
print("What would you like to do with Couriers?")
print('''
1 See Couriers
2 Amend Couriers
3 Create new Courier
0 Exit to Main Menu
''')
choicemade = int(input())
if choicemade == 1:
clear_screen()
print_db_table("couriers")
input('Press any key to continue ')
elif choicemade == 2:
clear_screen()
print('This is how your courier list currently looks')
print_db_table("couriers")
ans = input('Would you like to amend (A) or delete (D) a Courier? ')
if ans.upper() == "D":
#del_list_item(courier_list)
delete_db_record("couriers")
else:
amend_db_record("couriers")
elif choicemade == 3:
add_new_record_in_db("couriers")
elif choicemade == 0:
return
else:
print('Invalid choice. Try again')
#Customers Menu
def customers_menu_choice():
choicemade = 1
while choicemade !=0:
clear_screen()
print("What would you like to do with Customers?")
print('''
1 See Customers
2 Amend Customers
3 Create new Customer
0 Exit to Main Menu
''')
choicemade = int(input())
if choicemade == 1:
clear_screen()
print_db_table("customers")
input('Press any key to continue ')
elif choicemade == 2:
clear_screen()
print('This is how your customers currently looks')
print_db_table("customers")
ans = input('Would you like to amend (A) or delete (D) a Customer? ')
if ans.upper() == "D":
#del_list_item(courier_list)
delete_db_record("customers")
else:
amend_db_record("customers")
elif choicemade == 3:
add_new_record_in_db("customers")
elif choicemade == 0:
return
else:
print('Invalid choice. Try again')
#Orders Menu
def orders_menu_choice(orders_list):
clear_screen()
choicemade = 1
while choicemade !=0:
print("What would you like to do with Orders?")
print('''
1 See Orders
2 Amend Orders
3 Create New Order
4 Update Customer Fields
5 Update Order Status
0 Exit to Main Menu
''')
choicemade = int(input())
if choicemade == 1:
clear_screen()
display_orders()
input('Press any key to continue ')
elif choicemade == 2:
print('These are your current orders ')
display_orders()
ans = input('Would you like to amend (A) or delete (D) an Order? ')
if ans.upper() == "D":
del_list_item(orders_list)
else:
amend_list_item(orders_list)
elif choicemade == 3:
create_order()
elif choicemade == 4:
print("This functionality not available on your current subscription level. \n Consider upgrading to Premium.") #### update required to add
elif choicemade == 5:
amend_order_status(orders_list)
elif choicemade == 0:
return
else:
print('Invalid choice. Try again')
def clear_screen():
os.system('cls' if os.name=='nt' else 'clear')
print(cafe_banner)
print('{:^48s}'.format('Welcome to CAFE APP\n'))
print('{:^48s}'.format("A Lazy Pig application\n"))
``` |
{
"source": "jlrainbolt/MG5_v2_6_1",
"score": 2
} |
#### File: madgraph/iolibs/files.py
```python
import logging
import os
import shutil
logger = logging.getLogger('madgraph.files')
#===============================================================================
# read_from_file
#===============================================================================
def read_from_file(filename, myfunct, *args, **opt):
"""Open a file, apply the function myfunct (with sock as an arg)
on its content and return the result. Deals properly with errors and
returns None if something goes wrong.
"""
try:
sock = open(filename, 'r')
try:
ret_value = myfunct(sock, *args)
finally:
sock.close()
except IOError, (errno, strerror):
if opt.has_key('print_error'):
if not opt['print_error']:
return None
logger.error("I/O error on file %s (%s): %s" % (filename,errno, strerror))
return None
return ret_value
#===============================================================================
# write_to_file
#===============================================================================
def write_to_file(filename, myfunct, *args, **opts):
"""Open a file for writing, apply the function myfunct (with sock as an arg)
on its content and return the result. Deals properly with errors and
returns None if something goes wrong.
"""
try:
sock = open(filename, 'w')
try:
ret_value = myfunct(sock, *args)
finally:
sock.close()
except IOError, (errno, strerror):
if 'log' not in opts or opts['log']:
logger.error("I/O error (%s): %s" % (errno, strerror))
return None
return ret_value
#===============================================================================
# append_to_file
#===============================================================================
def append_to_file(filename, myfunct, *args):
"""Open a file for appending, apply the function myfunct (with
sock as an arg) on its content and return the result. Deals
properly with errors and returns None if something goes wrong.
"""
try:
sock = open(filename, 'a')
try:
ret_value = myfunct(sock, *args)
finally:
sock.close()
except IOError, (errno, strerror):
logger.error("I/O error (%s): %s" % (errno, strerror))
return None
return ret_value
#===============================================================================
# check piclke validity
#===============================================================================
def is_uptodate(picklefile, path_list=None, min_time=1343682423):
"""Check if the pickle files is uptodate compare to a list of files.
If no files are given, the pickle files is checked against it\' current
directory"""
if not os.path.exists(picklefile):
return False
if path_list is None:
dirpath = os.path.dirname(picklefile)
path_list = [ os.path.join(dirpath, file) for file in \
os.listdir(dirpath)]
assert type(path_list) == list, 'is_update expect a list of files'
pickle_date = os.path.getctime(picklefile)
if pickle_date < min_time:
return False
for path in path_list:
try:
if os.path.getmtime(path) > pickle_date:
return False
except Exception:
continue
#all pass
return True
################################################################################
## helper function for universal file treatment
################################################################################
def format_path(path):
"""Format the path in local format taking in entry a unix format"""
if path[0] != '/':
return os.path.join(*path.split('/'))
else:
return os.path.sep + os.path.join(*path.split('/'))
def cp(path1, path2, log=True, error=False):
""" simple cp taking linux or mix entry"""
path1 = format_path(path1)
path2 = format_path(path2)
try:
shutil.copy(path1, path2)
except IOError, why:
try:
if os.path.exists(path2):
path2 = os.path.join(path2, os.path.split(path1)[1])
shutil.copytree(path1, path2)
except IOError, why:
if error:
raise
if log:
logger.warning(why)
except shutil.Error:
# idetical file
pass
def rm(path, log=True):
"""removes path, that can be a single element or a list"""
if type(path) == list:
for p in path:
rm(p, log)
else:
path = format_path(path)
try:
os.remove(path)
except OSError:
shutil.rmtree(path, ignore_errors = True)
def mv(path1, path2):
"""simple mv taking linux or mix format entry"""
path1 = format_path(path1)
path2 = format_path(path2)
try:
shutil.move(path1, path2)
except Exception:
# An error can occur if the files exist at final destination
if os.path.isfile(path2):
os.remove(path2)
shutil.move(path1, path2)
return
elif os.path.isdir(path2) and os.path.exists(
os.path.join(path2, os.path.basename(path1))):
path2 = os.path.join(path2, os.path.basename(path1))
os.remove(path2)
shutil.move(path1, path2)
else:
raise
def put_at_end(src, *add):
with open(src,'ab') as wfd:
for f in add:
with open(f,'rb') as fd:
shutil.copyfileobj(fd, wfd, 1024*1024*100)
#100Mb chunk to avoid memory issue
def ln(file_pos, starting_dir='.', name='', log=True, cwd=None, abspath=False):
"""a simple way to have a symbolic link without to have to change directory
starting_point is the directory where to write the link
file_pos is the file to link
WARNING: not the linux convention
"""
file_pos = format_path(file_pos)
starting_dir = format_path(starting_dir)
if not name:
name = os.path.split(file_pos)[1]
if cwd:
if not os.path.isabs(file_pos):
file_pos = os.path.join(cwd, file_pos)
if not os.path.isabs(starting_dir):
starting_dir = os.path.join(cwd, starting_dir)
# Remove existing link if necessary
path = os.path.join(starting_dir, name)
if os.path.exists(path):
if os.path.realpath(path) != os.path.realpath(file_pos):
os.remove(os.path.join(starting_dir, name))
else:
return
if not abspath:
target = os.path.relpath(file_pos, starting_dir)
else:
target = file_pos
try:
os.symlink(target, os.path.join(starting_dir, name))
except Exception, error:
if log:
logger.warning('Could not link %s at position: %s' % (file_pos, \
os.path.realpath(starting_dir)))
def copytree(src, dst):
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
```
#### File: madgraph/various/process_checks.py
```python
from __future__ import division
import array
import copy
import fractions
import itertools
import logging
import math
import os
import sys
import re
import shutil
import random
import glob
import re
import subprocess
import time
import datetime
import errno
import pickle
# If psutil becomes standard, the RAM check can be performed with it instead
#import psutil
import aloha
import aloha.aloha_writers as aloha_writers
import aloha.create_aloha as create_aloha
import madgraph.iolibs.export_python as export_python
import madgraph.iolibs.helas_call_writers as helas_call_writers
import models.import_ufo as import_ufo
import madgraph.iolibs.save_load_object as save_load_object
import madgraph.iolibs.file_writers as writers
import madgraph.core.base_objects as base_objects
import madgraph.core.color_algebra as color
import madgraph.core.color_amp as color_amp
import madgraph.core.helas_objects as helas_objects
import madgraph.core.diagram_generation as diagram_generation
import madgraph.various.rambo as rambo
import madgraph.various.misc as misc
import madgraph.various.progressbar as pbar
import madgraph.various.banner as bannermod
import madgraph.various.progressbar as pbar
import madgraph.loop.loop_diagram_generation as loop_diagram_generation
import madgraph.loop.loop_helas_objects as loop_helas_objects
import madgraph.loop.loop_base_objects as loop_base_objects
import models.check_param_card as check_param_card
from madgraph.interface.madevent_interface import MadLoopInitializer
from madgraph.interface.common_run_interface import AskforEditCard
from madgraph import MG5DIR, InvalidCmd, MadGraph5Error
from madgraph.iolibs.files import cp
import StringIO
import models.model_reader as model_reader
import aloha.template_files.wavefunctions as wavefunctions
from aloha.template_files.wavefunctions import \
ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx
ADDED_GLOBAL = []
temp_dir_prefix = "TMP_CHECK"
pjoin = os.path.join
def clean_added_globals(to_clean):
for value in list(to_clean):
del globals()[value]
to_clean.remove(value)
#===============================================================================
# Fake interface to be instancied when using process_checks from tests instead.
#===============================================================================
class FakeInterface(object):
""" Just an 'option container' to mimick the interface which is passed to the
tests. We put in only what is now used from interface by the test:
cmd.options['fortran_compiler']
cmd.options['complex_mass_scheme']
cmd._mgme_dir"""
def __init__(self, mgme_dir = "", complex_mass_scheme = False,
fortran_compiler = 'gfortran' ):
self._mgme_dir = mgme_dir
self.options = {}
self.options['complex_mass_scheme']=complex_mass_scheme
self.options['fortran_compiler']=fortran_compiler
#===============================================================================
# Logger for process_checks
#===============================================================================
logger = logging.getLogger('madgraph.various.process_checks')
# Helper function to boost momentum
def boost_momenta(p, boost_direction=1, beta=0.5):
"""boost the set momenta in the 'boost direction' by the 'beta'
factor"""
boost_p = []
gamma = 1/ math.sqrt(1 - beta**2)
for imp in p:
bosst_p = imp[boost_direction]
E, px, py, pz = imp
boost_imp = []
# Energy:
boost_imp.append(gamma * E - gamma * beta * bosst_p)
# PX
if boost_direction == 1:
boost_imp.append(-gamma * beta * E + gamma * px)
else:
boost_imp.append(px)
# PY
if boost_direction == 2:
boost_imp.append(-gamma * beta * E + gamma * py)
else:
boost_imp.append(py)
# PZ
if boost_direction == 3:
boost_imp.append(-gamma * beta * E + gamma * pz)
else:
boost_imp.append(pz)
#Add the momenta to the list
boost_p.append(boost_imp)
return boost_p
#===============================================================================
# Helper class MatrixElementEvaluator
#===============================================================================
class MatrixElementEvaluator(object):
"""Class taking care of matrix element evaluation, storing
relevant quantities for speedup."""
def __init__(self, model , param_card = None,
auth_skipping = False, reuse = True, cmd = FakeInterface()):
"""Initialize object with stored_quantities, helas_writer,
model, etc.
auth_skipping = True means that any identical matrix element will be
evaluated only once
reuse = True means that the matrix element corresponding to a
given process can be reused (turn off if you are using
different models for the same process)"""
self.cmd = cmd
# Writer for the Python matrix elements
self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model)
# Read a param_card and calculate couplings
self.full_model = model_reader.ModelReader(model)
try:
self.full_model.set_parameters_and_couplings(param_card)
except MadGraph5Error:
if isinstance(param_card, (str,file)):
raise
logger.warning('param_card present in the event file not compatible.'+
' We will use the default one.')
self.full_model.set_parameters_and_couplings()
self.auth_skipping = auth_skipping
self.reuse = reuse
self.cmass_scheme = cmd.options['complex_mass_scheme']
self.store_aloha = []
self.stored_quantities = {}
#===============================================================================
# Helper function evaluate_matrix_element
#===============================================================================
def evaluate_matrix_element(self, matrix_element, p=None, full_model=None,
gauge_check=False, auth_skipping=None, output='m2',
options=None):
"""Calculate the matrix element and evaluate it for a phase space point
output is either m2, amp, jamp
"""
if full_model:
self.full_model = full_model
process = matrix_element.get('processes')[0]
model = process.get('model')
if "matrix_elements" not in self.stored_quantities:
self.stored_quantities['matrix_elements'] = []
matrix_methods = {}
if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p:
# Evaluate the matrix element for the momenta p
matrix = eval("Matrix_%s()" % process.shell_string())
me_value = matrix.smatrix(p, self.full_model)
if output == "m2":
return matrix.smatrix(p, self.full_model), matrix.amp2
else:
m2 = matrix.smatrix(p, self.full_model)
return {'m2': m2, output:getattr(matrix, output)}
if (auth_skipping or self.auth_skipping) and matrix_element in \
self.stored_quantities['matrix_elements']:
# Exactly the same matrix element has been tested
logger.info("Skipping %s, " % process.nice_string() + \
"identical matrix element already tested" \
)
return None
self.stored_quantities['matrix_elements'].append(matrix_element)
# Create an empty color basis, and the list of raw
# colorize objects (before simplification) associated
# with amplitude
if "list_colorize" not in self.stored_quantities:
self.stored_quantities["list_colorize"] = []
if "list_color_basis" not in self.stored_quantities:
self.stored_quantities["list_color_basis"] = []
if "list_color_matrices" not in self.stored_quantities:
self.stored_quantities["list_color_matrices"] = []
col_basis = color_amp.ColorBasis()
new_amp = matrix_element.get_base_amplitude()
matrix_element.set('base_amplitude', new_amp)
colorize_obj = col_basis.create_color_dict_list(new_amp)
try:
# If the color configuration of the ME has
# already been considered before, recycle
# the information
col_index = self.stored_quantities["list_colorize"].index(colorize_obj)
except ValueError:
# If not, create color basis and color
# matrix accordingly
self.stored_quantities['list_colorize'].append(colorize_obj)
col_basis.build()
self.stored_quantities['list_color_basis'].append(col_basis)
col_matrix = color_amp.ColorMatrix(col_basis)
self.stored_quantities['list_color_matrices'].append(col_matrix)
col_index = -1
# Set the color for the matrix element
matrix_element.set('color_basis',
self.stored_quantities['list_color_basis'][col_index])
matrix_element.set('color_matrix',
self.stored_quantities['list_color_matrices'][col_index])
# Create the needed aloha routines
if "used_lorentz" not in self.stored_quantities:
self.stored_quantities["used_lorentz"] = []
me_used_lorentz = set(matrix_element.get_used_lorentz())
me_used_lorentz = [lorentz for lorentz in me_used_lorentz \
if lorentz not in self.store_aloha]
aloha_model = create_aloha.AbstractALOHAModel(model.get('name'))
aloha_model.add_Lorentz_object(model.get('lorentz'))
aloha_model.compute_subset(me_used_lorentz)
# Write out the routines in Python
aloha_routines = []
for routine in aloha_model.values():
aloha_routines.append(routine.write(output_dir = None,
mode='mg5',
language = 'Python'))
for routine in aloha_model.external_routines:
aloha_routines.append(
open(aloha_model.locate_external(routine, 'Python')).read())
# Define the routines to be available globally
previous_globals = list(globals().keys())
for routine in aloha_routines:
exec(routine, globals())
for key in globals().keys():
if key not in previous_globals:
ADDED_GLOBAL.append(key)
# Add the defined Aloha routines to used_lorentz
self.store_aloha.extend(me_used_lorentz)
# Export the matrix element to Python calls
exporter = export_python.ProcessExporterPython(matrix_element,
self.helas_writer)
try:
matrix_methods = exporter.get_python_matrix_methods(\
gauge_check=gauge_check)
# print "I got matrix_methods=",str(matrix_methods.items()[0][1])
except helas_call_writers.HelasWriterError, error:
logger.info(error)
return None
# If one wants to output the python code generated for the computation
# of these matrix elements, it is possible to run the following cmd
# open('output_path','w').write(matrix_methods[process.shell_string()])
if self.reuse:
# Define the routines (globally)
exec(matrix_methods[process.shell_string()], globals())
ADDED_GLOBAL.append('Matrix_%s' % process.shell_string())
else:
# Define the routines (locally is enough)
exec(matrix_methods[process.shell_string()])
# Generate phase space point to use
if not p:
p, w_rambo = self.get_momenta(process, options)
# Evaluate the matrix element for the momenta p
exec("data = Matrix_%s()" % process.shell_string())
if output == "m2":
return data.smatrix(p, self.full_model), data.amp2
else:
m2 = data.smatrix(p,self.full_model)
return {'m2': m2, output:getattr(data, output)}
@staticmethod
def pass_isolation_cuts(pmoms, ptcut=50.0, drcut=0.5):
""" Check whether the specified kinematic point passes isolation cuts
"""
def Pt(pmom):
""" Computes the pt of a 4-momentum"""
return math.sqrt(pmom[1]**2+pmom[2]**2)
def DeltaR(p1,p2):
""" Computes the DeltaR between two 4-momenta"""
# First compute pseudo-rapidities
p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2)
p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2)
eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3]))
eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3]))
# Then azimutal angle phi
phi1=math.atan2(p1[2],p1[1])
phi2=math.atan2(p2[2],p2[1])
dphi=abs(phi2-phi1)
# Take the wraparound factor into account
dphi=abs(abs(dphi-math.pi)-math.pi)
# Now return deltaR
return math.sqrt(dphi**2+(eta2-eta1)**2)
for i, pmom in enumerate(pmoms[2:]):
# Pt > 50 GeV
if Pt(pmom)<ptcut:
return False
# Delta_R ij > 0.5
for pmom2 in pmoms[3+i:]:
if DeltaR(pmom,pmom2)<drcut:
return False
return True
#===============================================================================
# Helper function get_momenta
#===============================================================================
def get_momenta(self, process, options=None, special_mass=None):
"""Get a point in phase space for the external states in the given
process, with the CM energy given. The incoming particles are
assumed to be oriented along the z axis, with particle 1 along the
positive z axis.
For the CMS check, one must be able to chose the mass of the special
resonance particle with id = -1, and the special_mass option allows
to specify it."""
if not options:
energy=1000
events=None
else:
energy = options['energy']
events = options['events']
to_skip = 0
if not (isinstance(process, base_objects.Process) and \
isinstance(energy, (float,int))):
raise rambo.RAMBOError, "Not correct type for arguments to get_momenta"
sorted_legs = sorted(process.get('legs'), lambda l1, l2:\
l1.get('number') - l2.get('number'))
# If an events file is given use it for getting the momentum
if events:
ids = [l.get('id') for l in sorted_legs]
import MadSpin.decay as madspin
if not hasattr(self, 'event_file'):
fsock = open(events)
self.event_file = madspin.Event(fsock)
skip = 0
while self.event_file.get_next_event() != 'no_event':
event = self.event_file.particle
#check if the event is compatible
event_ids = [p['pid'] for p in event.values()]
if event_ids == ids:
skip += 1
if skip > to_skip:
break
else:
raise MadGraph5Error, 'No compatible events for %s' % ids
p = []
for part in event.values():
m = part['momentum']
p.append([m.E, m.px, m.py, m.pz])
return p, 1
nincoming = len([leg for leg in sorted_legs if leg.get('state') == False])
nfinal = len(sorted_legs) - nincoming
# Find masses of particles
mass = []
for l in sorted_legs:
if l.get('id') != 0:
mass_string = self.full_model.get_particle(l.get('id')).get('mass')
mass.append(self.full_model.get('parameter_dict')[mass_string].real)
else:
if isinstance(special_mass, float):
mass.append(special_mass)
else:
raise Exception, "A 'special_mass' option must be specified"+\
" in get_momenta when a leg with id=-10 is present (for CMS check)"
#mass = [math.sqrt(m.real) for m in mass]
# Make sure energy is large enough for incoming and outgoing particles,
# # Keep the special_mass case separate to be sure that nothing interferes
# # with the regular usage of get_momenta.
# if not (any(l.get('id')==0 for l in sorted_legs) and \
# isinstance(special_mass, float)):
energy = max(energy, sum(mass[:nincoming])*1.2,sum(mass[nincoming:])*1.2)
# else:
# incoming_mass = sum([mass[i] for i, leg in enumerate(sorted_legs) \
# if leg.get('state') == False and leg.get('id')!=0])
# outcoming_mass = sum([mass[i] for i, leg in enumerate(sorted_legs) \
# if leg.get('state') == True and leg.get('id')!=0])
# energy = max(energy, incoming_mass*1.2, outcoming_mass*1.2)
if nfinal == 1:
p = []
energy = mass[-1]
p.append([energy/2,0,0,energy/2])
p.append([energy/2,0,0,-energy/2])
p.append([mass[-1],0,0,0])
return p, 1.0
e2 = energy**2
m1 = mass[0]
p = []
masses = rambo.FortranList(nfinal)
for i in range(nfinal):
masses[i+1] = mass[nincoming + i]
if nincoming == 1:
# Momenta for the incoming particle
p.append([abs(m1), 0., 0., 0.])
p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses)
# Reorder momenta from px,py,pz,E to E,px,py,pz scheme
for i in range(1, nfinal+1):
momi = [p_rambo[(4,i)], p_rambo[(1,i)],
p_rambo[(2,i)], p_rambo[(3,i)]]
p.append(momi)
return p, w_rambo
if nincoming != 2:
raise rambo.RAMBOError('Need 1 or 2 incoming particles')
if nfinal == 1:
energy = masses[1]
if masses[1] == 0.0:
raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\
' state particle massless is invalid')
e2 = energy**2
m2 = mass[1]
mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \
2*m1**2*m2**2 + m2**4) / (4*e2))
e1 = math.sqrt(mom**2+m1**2)
e2 = math.sqrt(mom**2+m2**2)
# Set momenta for incoming particles
p.append([e1, 0., 0., mom])
p.append([e2, 0., 0., -mom])
if nfinal == 1:
p.append([energy, 0., 0., 0.])
return p, 1.
p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses)
# Reorder momenta from px,py,pz,E to E,px,py,pz scheme
for i in range(1, nfinal+1):
momi = [p_rambo[(4,i)], p_rambo[(1,i)],
p_rambo[(2,i)], p_rambo[(3,i)]]
p.append(momi)
return p, w_rambo
#===============================================================================
# Helper class LoopMatrixElementEvaluator
#===============================================================================
class LoopMatrixElementEvaluator(MatrixElementEvaluator):
"""Class taking care of matrix element evaluation for loop processes."""
def __init__(self,cuttools_dir=None, output_path=None, tir_dir={},
cmd=FakeInterface(),*args,**kwargs):
"""Allow for initializing the MG5 root where the temporary fortran
output for checks is placed."""
super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs)
self.mg_root=self.cmd._mgme_dir
# If no specific output path is specified, then write in MG5 root directory
if output_path is None:
self.output_path = self.cmd._mgme_dir
else:
self.output_path = output_path
self.cuttools_dir=cuttools_dir
self.tir_dir=tir_dir
self.loop_optimized_output = cmd.options['loop_optimized_output']
# Set proliferate to true if you want to keep the produced directories
# and eventually reuse them if possible
self.proliferate=True
#===============================================================================
# Helper function evaluate_matrix_element for loops
#===============================================================================
def evaluate_matrix_element(self, matrix_element, p=None, options=None,
gauge_check=False, auth_skipping=None, output='m2',
PS_name = None, MLOptions={}):
"""Calculate the matrix element and evaluate it for a phase space point
Output can only be 'm2. The 'jamp' and 'amp' returned values are just
empty lists at this point.
If PS_name is not none the written out PS.input will be saved in
the file PS.input_<PS_name> as well."""
process = matrix_element.get('processes')[0]
model = process.get('model')
if options and 'split_orders' in options.keys():
split_orders = options['split_orders']
else:
split_orders = -1
if "loop_matrix_elements" not in self.stored_quantities:
self.stored_quantities['loop_matrix_elements'] = []
if (auth_skipping or self.auth_skipping) and matrix_element in \
[el[0] for el in self.stored_quantities['loop_matrix_elements']]:
# Exactly the same matrix element has been tested
logger.info("Skipping %s, " % process.nice_string() + \
"identical matrix element already tested" )
return None
# Generate phase space point to use
if not p:
p, w_rambo = self.get_momenta(process, options=options)
if matrix_element in [el[0] for el in \
self.stored_quantities['loop_matrix_elements']]:
export_dir=self.stored_quantities['loop_matrix_elements'][\
[el[0] for el in self.stored_quantities['loop_matrix_elements']\
].index(matrix_element)][1]
logger.debug("Reusing generated output %s"%str(export_dir))
else:
export_dir=pjoin(self.output_path,temp_dir_prefix)
if os.path.isdir(export_dir):
if not self.proliferate:
raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir))
else:
id=1
while os.path.isdir(pjoin(self.output_path,\
'%s_%i'%(temp_dir_prefix,id))):
id+=1
export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id))
if self.proliferate:
self.stored_quantities['loop_matrix_elements'].append(\
(matrix_element,export_dir))
# I do the import here because there is some cyclic import of export_v4
# otherwise
import madgraph.loop.loop_exporters as loop_exporters
if self.loop_optimized_output:
exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
else:
exporter_class=loop_exporters.LoopProcessExporterFortranSA
MLoptions = {'clean': True,
'complex_mass': self.cmass_scheme,
'export_format':'madloop',
'mp':True,
'SubProc_prefix':'P',
'compute_color_flows': not process.get('has_born'),
'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
'cuttools_dir': self.cuttools_dir,
'fortran_compiler': self.cmd.options['fortran_compiler'],
'output_dependencies': self.cmd.options['output_dependencies']}
MLoptions.update(self.tir_dir)
FortranExporter = exporter_class(export_dir, MLoptions)
FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
FortranExporter.copy_template(model)
FortranExporter.generate_subprocess_directory(matrix_element, FortranModel)
wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
for c in l]))
FortranExporter.convert_model(model,wanted_lorentz,wanted_couplings)
FortranExporter.finalize(matrix_element,"",self.cmd.options, ['nojpeg'])
MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
split_orders=split_orders)
self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions)
if gauge_check:
file_path, orig_file_content, new_file_content = \
self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
['helas_calls_ampb_1.f','loop_matrix.f'])
file = open(file_path,'w')
file.write(new_file_content)
file.close()
if self.loop_optimized_output:
mp_file_path, mp_orig_file_content, mp_new_file_content = \
self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True)
mp_file = open(mp_file_path,'w')
mp_file.write(mp_new_file_content)
mp_file.close()
# Evaluate the matrix element for the momenta p
finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\
export_dir, p, PS_name = PS_name, verbose=False)[0][0]
# Restore the original loop_matrix.f code so that it could be reused
if gauge_check:
file = open(file_path,'w')
file.write(orig_file_content)
file.close()
if self.loop_optimized_output:
mp_file = open(mp_file_path,'w')
mp_file.write(mp_orig_file_content)
mp_file.close()
# Now erase the output directory
if not self.proliferate:
shutil.rmtree(export_dir)
if output == "m2":
# We do not provide details (i.e. amps and Jamps) of the computed
# amplitudes, hence the []
return finite_m2, []
else:
return {'m2': finite_m2, output:[]}
def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False,
DoubleCheckHelicityFilter=False, MLOptions={}):
""" Set parameters in MadLoopParams.dat suited for these checks.MP
stands for multiple precision and can either be a bool or an integer
to specify the mode."""
# Instanciate a MadLoopParam card
file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r')
MLCard = bannermod.MadLoopParam(file)
if isinstance(mp,bool):
mode = 4 if mp else 1
else:
mode = mp
for key, value in MLOptions.items():
if key == "MLReductionLib":
if isinstance(value, int):
ml_reds = str(value)
if isinstance(value,list):
if len(value)==0:
ml_reds = '1'
else:
ml_reds="|".join([str(vl) for vl in value])
elif isinstance(value, str):
ml_reds = value
elif isinstance(value, int):
ml_reds = str(value)
else:
raise MadGraph5Error, 'The argument %s '%str(value)+\
' in fix_MadLoopParamCard must be a string, integer'+\
' or a list.'
MLCard.set("MLReductionLib",ml_reds)
elif key == 'ImprovePS':
MLCard.set('ImprovePSPoint',2 if value else -1)
elif key == 'ForceMP':
mode = 4
elif key in MLCard:
MLCard.set(key,value)
else:
raise Exception, 'The MadLoop options %s specified in function'%key+\
' fix_MadLoopParamCard does not correspond to an option defined'+\
' MadLoop nor is it specially handled in this function.'
if not mode is None:
MLCard.set('CTModeRun',mode)
MLCard.set('CTModeInit',mode)
MLCard.set('UseLoopFilter',loop_filter)
MLCard.set('DoubleCheckHelicityFilter',DoubleCheckHelicityFilter)
MLCard.write(pjoin(dir_name,os.pardir,'SubProcesses','MadLoopParams.dat'))
@classmethod
def get_me_value(cls, proc, proc_id, working_dir, PSpoint=[], PS_name = None,
verbose=True, format='tuple', skip_compilation=False):
"""Compile and run ./check, then parse the output and return the result
for process with id = proc_id and PSpoint if specified.
If PS_name is not none the written out PS.input will be saved in
the file PS.input_<PS_name> as well"""
if verbose:
sys.stdout.write('.')
sys.stdout.flush()
shell_name = None
directories = misc.glob('P%i_*' % proc_id, pjoin(working_dir, 'SubProcesses'))
if directories and os.path.isdir(directories[0]):
shell_name = os.path.basename(directories[0])
# If directory doesn't exist, skip and return 0
if not shell_name:
logging.info("Directory hasn't been created for process %s" %proc)
return ((0.0, 0.0, 0.0, 0.0, 0), [])
if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name))
dir_name = pjoin(working_dir, 'SubProcesses', shell_name)
if not skip_compilation:
# Make sure to recreate the executable and modified sources
if os.path.isfile(pjoin(dir_name,'check')):
os.remove(pjoin(dir_name,'check'))
try:
os.remove(pjoin(dir_name,'check_sa.o'))
os.remove(pjoin(dir_name,'loop_matrix.o'))
except OSError:
pass
# Now run make
devnull = open(os.devnull, 'w')
retcode = subprocess.call(['make','check'],
cwd=dir_name, stdout=devnull, stderr=devnull)
devnull.close()
if retcode != 0:
logging.info("Error while executing make in %s" % shell_name)
return ((0.0, 0.0, 0.0, 0.0, 0), [])
# If a PS point is specified, write out the corresponding PS.input
if PSpoint:
misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint)
# Also save the PS point used in PS.input_<PS_name> if the user
# wanted so. It is used for the lorentz check.
if not PS_name is None:
misc.write_PS_input(pjoin(dir_name, \
'PS.input_%s'%PS_name),PSpoint)
# Run ./check
try:
output = subprocess.Popen('./check',
cwd=dir_name,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
output.read()
output.close()
if os.path.exists(pjoin(dir_name,'result.dat')):
return cls.parse_check_output(file(pjoin(dir_name,\
'result.dat')),format=format)
else:
logging.warning("Error while looking for file %s"%str(os.path\
.join(dir_name,'result.dat')))
return ((0.0, 0.0, 0.0, 0.0, 0), [])
except IOError:
logging.warning("Error while executing ./check in %s" % shell_name)
return ((0.0, 0.0, 0.0, 0.0, 0), [])
@classmethod
def parse_check_output(cls,output,format='tuple'):
"""Parse the output string and return a pair where first four values are
the finite, born, single and double pole of the ME and the fourth is the
GeV exponent and the second value is a list of 4 momenta for all particles
involved. Return the answer in two possible formats, 'tuple' or 'dict'."""
res_dict = {'res_p':[],
'born':0.0,
'finite':0.0,
'1eps':0.0,
'2eps':0.0,
'gev_pow':0,
'export_format':'Default',
'accuracy':0.0,
'return_code':0,
'Split_Orders_Names':[],
'Loop_SO_Results':[],
'Born_SO_Results':[],
'Born_kept':[],
'Loop_kept':[]
}
res_p = []
# output is supposed to be a file, if it is its content directly then
# I change it to be the list of line.
if isinstance(output,file) or isinstance(output,list):
text=output
elif isinstance(output,str):
text=output.split('\n')
else:
raise MadGraph5Error, 'Type for argument output not supported in'+\
' parse_check_output.'
for line in text:
splitline=line.split()
if len(splitline)==0:
continue
elif splitline[0]=='PS':
res_p.append([float(s) for s in splitline[1:]])
elif splitline[0]=='ASO2PI':
res_dict['alphaS_over_2pi']=float(splitline[1])
elif splitline[0]=='BORN':
res_dict['born']=float(splitline[1])
elif splitline[0]=='FIN':
res_dict['finite']=float(splitline[1])
elif splitline[0]=='1EPS':
res_dict['1eps']=float(splitline[1])
elif splitline[0]=='2EPS':
res_dict['2eps']=float(splitline[1])
elif splitline[0]=='EXP':
res_dict['gev_pow']=int(splitline[1])
elif splitline[0]=='Export_Format':
res_dict['export_format']=splitline[1]
elif splitline[0]=='ACC':
res_dict['accuracy']=float(splitline[1])
elif splitline[0]=='RETCODE':
res_dict['return_code']=int(splitline[1])
elif splitline[0]=='Split_Orders_Names':
res_dict['Split_Orders_Names']=splitline[1:]
elif splitline[0] in ['Born_kept', 'Loop_kept']:
res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]]
elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']:
# The value for this key of this dictionary is a list of elements
# with format ([],{}) where the first list specifies the split
# orders to which the dictionary in the second position corresponds
# to.
res_dict[splitline[0]].append(\
([int(el) for el in splitline[1:]],{}))
elif splitline[0]=='SO_Loop':
res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\
float(splitline[2])
elif splitline[0]=='SO_Born':
res_dict['Born_SO_Results'][-1][1][splitline[1]]=\
float(splitline[2])
res_dict['res_p'] = res_p
if format=='tuple':
return ((res_dict['finite'],res_dict['born'],res_dict['1eps'],
res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p'])
else:
return res_dict
@staticmethod
def apply_log_tweak(proc_path, mode):
""" Changes the file model_functions.f in the SOURCE of the process output
so as to change how logarithms are analytically continued and see how
it impacts the CMS check."""
valid_modes = ['default','recompile']
if not (mode in valid_modes or (isinstance(mode, list) and
len(mode)==2 and all(m in ['logp','logm','log'] for m in mode))):
raise MadGraph5Error("Mode '%s' not reckonized"%mode+
" in function apply_log_tweak.")
model_path = pjoin(proc_path,'Source','MODEL')
directories = misc.glob('P0_*', pjoin(proc_path,'SubProcesses'))
if directories and os.path.isdir(directories[0]):
exe_path = directories[0]
else:
raise MadGraph5Error, 'Could not find a process executable '+\
'directory in %s'%proc_dir
bu_path = pjoin(model_path, 'model_functions.f__backUp__')
if mode=='default':
# Restore the default source file model_function.f
if not os.path.isfile(bu_path):
raise MadGraph5Error, 'Back up file %s could not be found.'%bu_path
shutil.move(bu_path, pjoin(model_path, 'model_functions.f'))
return
if mode=='recompile':
try:
os.remove(pjoin(model_path,'model_functions.o'))
os.remove(pjoin(proc_path,'lib','libmodel.a'))
except:
pass
misc.compile(cwd=model_path)
# Remove the executable to insure proper recompilation
try:
os.remove(pjoin(exe_path,'check'))
except:
pass
misc.compile(arg=['check'], cwd=exe_path)
return
if mode[0]==mode[1]:
return
# Now change the logs
mp_prefix = 'MP_'
target_line = 'FUNCTION %%sREG%s(ARG)'%mode[0].lower()
# Make sure to create a backup
if not os.path.isfile(bu_path):
shutil.copy(pjoin(model_path, 'model_functions.f'), bu_path)
model_functions = open(pjoin(model_path,'model_functions.f'),'r')
new_model_functions = []
has_replaced = False
just_replaced = False
find_one_replacement= False
mp_mode = None
suffix = {'log':'','logp':r'\s*\+\s*TWOPII','logm':r'\s*\-\s*TWOPII'}
replace_regex=r'^\s*%%sREG%s\s*=\s*LOG\(ARG\)%s'%(mode[0],suffix[mode[0]])
for line in model_functions:
# Make sure to skip split lines after the replacement
if just_replaced:
if not re.match(r'\s{6}', line):
continue
else:
just_replaced = False
if mp_mode is None:
# We are looking for the start of the function
new_model_functions.append(line)
if (target_line%mp_prefix).lower() in line.lower():
mp_mode = mp_prefix
elif (target_line%'').lower() in line.lower():
mp_mode = ''
else:
# Now apply the substitution
if not has_replaced and re.match(replace_regex%mp_mode,line,
re.IGNORECASE):
# Apply the replacement
if mode[0]=='log':
if mp_mode=='':
new_line =\
""" if(dble(arg).lt.0.0d0.and.dimag(arg).gt.0.0d0)then
reg%s=log(arg) %s TWOPII
else
reg%s=log(arg)
endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
else:
new_line =\
""" if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).lt.0.0e0_16)then
mp_reg%s=log(arg) %s TWOPII
else
mp_reg%s=log(arg)
endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
else:
new_line = ' '*6+"%sreg%s=log(arg) %s\n"%(mp_mode,mode[0],
('' if mode[1]=='log' else ('+TWOPII' if mode[1]=='logp' else '-TWOPII')))
new_model_functions.append(new_line)
just_replaced = True
has_replaced = True
find_one_replacement = True
else:
new_model_functions.append(line)
if re.match(r'^\s*END\s*$',line,re.IGNORECASE):
mp_mode = None
has_replaced = False
if not find_one_replacement:
logger.warning('No replacement was found/performed for token '+
"'%s->%s'."%(mode[0],mode[1]))
else:
open(pjoin(model_path,'model_functions.f'),'w').\
write(''.join(new_model_functions))
return
def setup_ward_check(self, working_dir, file_names, mp = False):
""" Modify loop_matrix.f so to have one external massless gauge boson
polarization vector turned into its momentum. It is not a pretty and
flexible solution but it works for this particular case."""
shell_name = None
directories = misc.glob('P0_*', working_dir)
if directories and os.path.isdir(directories[0]):
shell_name = os.path.basename(directories[0])
dir_name = pjoin(working_dir, shell_name)
# Look, in order, for all the possible file names provided.
ind=0
while ind<len(file_names) and not os.path.isfile(pjoin(dir_name,
file_names[ind])):
ind += 1
if ind==len(file_names):
raise Exception, "No helas calls output file found."
helas_file_name=pjoin(dir_name,file_names[ind])
file = open(pjoin(dir_name,helas_file_name), 'r')
helas_calls_out=""
original_file=""
gaugeVectorRegExp=re.compile(\
r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+
r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)")
foundGauge=False
# Now we modify the first massless gauge vector wavefunction
for line in file:
helas_calls_out+=line
original_file+=line
if line.find("INCLUDE 'coupl.inc'") != -1 or \
line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1:
helas_calls_out+=" INTEGER WARDINT\n"
if not foundGauge:
res=gaugeVectorRegExp.search(line)
if res!=None:
foundGauge=True
helas_calls_out+=" DO WARDINT=1,4\n"
helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")="
if not mp:
helas_calls_out+=\
"DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n"
else:
helas_calls_out+="CMPLX(P(WARDINT-1,"+\
res.group('p_id')+"),0.0E0_16,KIND=16)\n"
helas_calls_out+=" ENDDO\n"
file.close()
return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
#===============================================================================
# Helper class LoopMatrixElementEvaluator
#===============================================================================
class LoopMatrixElementTimer(LoopMatrixElementEvaluator):
"""Class taking care of matrix element evaluation and running timing for
loop processes."""
def __init__(self, *args, **kwargs):
""" Same as the mother for now """
LoopMatrixElementEvaluator.__init__(self,*args, **kwargs)
@classmethod
def get_MadLoop_Params(cls,MLCardPath):
""" Return a dictionary of the parameter of the MadLoopParamCard.
The key is the name of the parameter and the value is the corresponding
string read from the card."""
return bannermod.MadLoopParam(MLCardPath)
@classmethod
def set_MadLoop_Params(cls,MLCardPath,params):
""" Set the parameters in MadLoopParamCard to the values specified in
the dictionary params.
The key is the name of the parameter and the value is the corresponding
string to write in the card."""
MLcard = bannermod.MadLoopParam(MLCardPath)
for key,value in params.items():
MLcard.set(key, value, changeifuserset=False)
MLcard.write(MLCardPath, commentdefault=True)
def skip_loop_evaluation_setup(self, dir_name, skip=True):
""" Edit loop_matrix.f in order to skip the loop evaluation phase.
Notice this only affects the double precision evaluation which is
normally fine as we do not make the timing check on mp."""
file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
loop_matrix = file.read()
file.close()
file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.'
if skip else '.FALSE.'), loop_matrix)
file.write(loop_matrix)
file.close()
def boot_time_setup(self, dir_name, bootandstop=True):
""" Edit loop_matrix.f in order to set the flag which stops the
execution after booting the program (i.e. reading the color data)."""
file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
loop_matrix = file.read()
file.close()
file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.'
if bootandstop else '.FALSE.'), loop_matrix)
file.write(loop_matrix)
file.close()
def setup_process(self, matrix_element, export_dir, reusing = False,
param_card = None, MLOptions={},clean=True):
""" Output the matrix_element in argument and perform the initialization
while providing some details about the output in the dictionary returned.
Returns None if anything fails"""
infos={'Process_output': None,
'HELAS_MODEL_compilation' : None,
'dir_path' : None,
'Initialization' : None,
'Process_compilation' : None}
if not reusing and clean:
if os.path.isdir(export_dir):
clean_up(self.output_path)
if os.path.isdir(export_dir):
raise InvalidCmd(\
"The directory %s already exist. Please remove it."\
%str(export_dir))
else:
if not os.path.isdir(export_dir):
raise InvalidCmd(\
"Could not find the directory %s to reuse."%str(export_dir))
if not reusing and clean:
model = matrix_element['processes'][0].get('model')
# I do the import here because there is some cyclic import of export_v4
# otherwise
import madgraph.loop.loop_exporters as loop_exporters
if self.loop_optimized_output:
exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
else:
exporter_class=loop_exporters.LoopProcessExporterFortranSA
MLoptions = {'clean': True,
'complex_mass': self.cmass_scheme,
'export_format':'madloop',
'mp':True,
'SubProc_prefix':'P',
'compute_color_flows':not matrix_element['processes'][0].get('has_born'),
'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
'cuttools_dir': self.cuttools_dir,
'fortran_compiler':self.cmd.options['fortran_compiler'],
'output_dependencies':self.cmd.options['output_dependencies']}
MLoptions.update(self.tir_dir)
start=time.time()
FortranExporter = exporter_class(export_dir, MLoptions)
FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
FortranExporter.copy_template(model)
FortranExporter.generate_subprocess_directory(matrix_element, FortranModel)
wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
for c in l]))
FortranExporter.convert_model(self.full_model,wanted_lorentz,wanted_couplings)
infos['Process_output'] = time.time()-start
start=time.time()
FortranExporter.finalize(matrix_element,"",self.cmd.options, ['nojpeg'])
infos['HELAS_MODEL_compilation'] = time.time()-start
# Copy the parameter card if provided
if param_card != None:
if isinstance(param_card, str):
cp(pjoin(param_card),\
pjoin(export_dir,'Cards','param_card.dat'))
else:
param_card.write(pjoin(export_dir,'Cards','param_card.dat'))
# First Initialize filters (in later versions where this will hopefully
# be done at generation time, then it will be able to skip it)
MadLoopInitializer.fix_PSPoint_in_check(
pjoin(export_dir,'SubProcesses'), read_ps = False, npoints = 4)
self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
mp = False, loop_filter = True,MLOptions=MLOptions)
shell_name = None
directories = misc.glob('P0_*', pjoin(export_dir, 'SubProcesses'))
if directories and os.path.isdir(directories[0]):
shell_name = os.path.basename(directories[0])
dir_name = pjoin(export_dir, 'SubProcesses', shell_name)
infos['dir_path']=dir_name
# Do not refresh the filter automatically as this is very often a waste
# of time
if not MadLoopInitializer.need_MadLoopInit(
export_dir, subproc_prefix='P'):
return infos
attempts = [3,15]
# remove check and check_sa.o for running initialization again
try:
os.remove(pjoin(dir_name,'check'))
os.remove(pjoin(dir_name,'check_sa.o'))
except OSError:
pass
nPS_necessary = MadLoopInitializer.run_initialization(dir_name,
pjoin(export_dir,'SubProcesses'),infos,\
req_files = ['HelFilter.dat','LoopFilter.dat'],
attempts = attempts)
if attempts is None:
logger.error("Could not compile the process %s,"%shell_name+\
" try to generate it via the 'generate' command.")
return None
if nPS_necessary is None:
logger.error("Could not initialize the process %s"%shell_name+\
" with %s PS points."%max(attempts))
return None
elif nPS_necessary > min(attempts):
logger.warning("Could not initialize the process %s"%shell_name+\
" with %d PS points. It needed %d."%(min(attempts),nPS_necessary))
return infos
def time_matrix_element(self, matrix_element, reusing = False,
param_card = None, keep_folder = False, options=None,
MLOptions = {}):
""" Output the matrix_element in argument and give detail information
about the timing for its output and running"""
# If True, then force three PS points only and skip the test on
# unpolarized PS point
make_it_quick=False
if options and 'split_orders' in options.keys():
split_orders = options['split_orders']
else:
split_orders = -1
assert ((not reusing and isinstance(matrix_element, \
helas_objects.HelasMatrixElement)) or (reusing and
isinstance(matrix_element, base_objects.Process)))
if not reusing:
proc_name = matrix_element['processes'][0].shell_string()[2:]
else:
proc_name = matrix_element.shell_string()[2:]
export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\
temp_dir_prefix+"_%s"%proc_name)
res_timings = self.setup_process(matrix_element,export_dir, \
reusing, param_card,MLOptions = MLOptions,clean=True)
if res_timings == None:
return None
dir_name=res_timings['dir_path']
def check_disk_usage(path):
return subprocess.Popen("du -shc -L "+str(path), \
stdout=subprocess.PIPE, shell=True).communicate()[0].split()[-2]
# The above is compatible with python 2.6, not the neater version below
#return subprocess.check_output(["du -shc %s"%path],shell=True).\
# split()[-2]
res_timings['du_source']=check_disk_usage(pjoin(\
export_dir,'Source','*','*.f'))
res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f'))
res_timings['du_color']=check_disk_usage(pjoin(dir_name,
'MadLoop5_resources','*.dat'))
res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check'))
if not res_timings['Initialization']==None:
time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0
elif make_it_quick:
time_per_ps_estimate = -1.0
else:
# We cannot estimate from the initialization, so we run just a 3
# PS point run to evaluate it.
MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
read_ps = False, npoints = 3, hel_config = -1,
split_orders=split_orders)
compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
time_per_ps_estimate = run_time/3.0
self.boot_time_setup(dir_name,bootandstop=True)
compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
res_timings['Booting_time'] = run_time
self.boot_time_setup(dir_name,bootandstop=False)
# Detect one contributing helicity
contributing_hel=0
n_contrib_hel=0
proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r')
proc_prefix = proc_prefix_file.read()
proc_prefix_file.close()
helicities = file(pjoin(dir_name,'MadLoop5_resources',
'%sHelFilter.dat'%proc_prefix)).read().split()
for i, hel in enumerate(helicities):
if (self.loop_optimized_output and int(hel)>-10000) or hel=='T':
if contributing_hel==0:
contributing_hel=i+1
n_contrib_hel += 1
if contributing_hel==0:
logger.error("Could not find a contributing helicity "+\
"configuration for process %s."%proc_name)
return None
res_timings['n_contrib_hel']=n_contrib_hel
res_timings['n_tot_hel']=len(helicities)
# We aim at a 30 sec run
if not make_it_quick:
target_pspoints_number = max(int(30.0/time_per_ps_estimate)+1,50)
else:
target_pspoints_number = 10
logger.info("Checking timing for process %s "%proc_name+\
"with %d PS points."%target_pspoints_number)
MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
read_ps = False, npoints = target_pspoints_number*2, \
hel_config = contributing_hel, split_orders=split_orders)
compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
if compile_time == None: return None
res_timings['run_polarized_total']=\
(run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
if make_it_quick:
res_timings['run_unpolarized_total'] = 1.0
res_timings['ram_usage'] = 0.0
else:
MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
read_ps = False, npoints = target_pspoints_number, hel_config = -1,
split_orders=split_orders)
compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name,
checkRam=True)
if compile_time == None: return None
res_timings['run_unpolarized_total']=\
(run_time-res_timings['Booting_time'])/target_pspoints_number
res_timings['ram_usage'] = ram_usage
if not self.loop_optimized_output:
return res_timings
# For the loop optimized output, we also check the time spent in
# computing the coefficients of the loop numerator polynomials.
# So we modify loop_matrix.f in order to skip the loop evaluation phase.
self.skip_loop_evaluation_setup(dir_name,skip=True)
if make_it_quick:
res_timings['run_unpolarized_coefs'] = 1.0
else:
MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
read_ps = False, npoints = target_pspoints_number, hel_config = -1,
split_orders=split_orders)
compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
if compile_time == None: return None
res_timings['run_unpolarized_coefs']=\
(run_time-res_timings['Booting_time'])/target_pspoints_number
MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
read_ps = False, npoints = target_pspoints_number*2, \
hel_config = contributing_hel, split_orders=split_orders)
compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
if compile_time == None: return None
res_timings['run_polarized_coefs']=\
(run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
# Restitute the original file.
self.skip_loop_evaluation_setup(dir_name,skip=False)
return res_timings
#===============================================================================
# Global helper function run_multiprocs
#===============================================================================
def check_matrix_element_stability(self, matrix_element,options=None,
infos_IN = None, param_card = None, keep_folder = False,
MLOptions = {}):
""" Output the matrix_element in argument, run in for nPoints and return
a dictionary containing the stability information on each of these points.
If infos are provided, then the matrix element output is skipped and
reused from a previous run and the content of infos.
"""
if not options:
reusing = False
nPoints = 100
split_orders = -1
else:
reusing = options['reuse']
nPoints = options['npoints']
split_orders = options['split_orders']
assert ((not reusing and isinstance(matrix_element, \
helas_objects.HelasMatrixElement)) or (reusing and
isinstance(matrix_element, base_objects.Process)))
# Helper functions
def format_PS_point(ps, rotation=0):
""" Write out the specified PS point to the file dir_path/PS.input
while rotating it if rotation!=0. We consider only rotations of 90
but one could think of having rotation of arbitrary angle too.
The first two possibilities, 1 and 2 are a rotation and boost
along the z-axis so that improve_ps can still work.
rotation=0 => No rotation
rotation=1 => Z-axis pi/2 rotation
rotation=2 => Z-axis pi/4 rotation
rotation=3 => Z-axis boost
rotation=4 => (x'=z,y'=-x,z'=-y)
rotation=5 => (x'=-z,y'=y,z'=x)"""
if rotation==0:
p_out=copy.copy(ps)
elif rotation==1:
p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps]
elif rotation==2:
sq2 = math.sqrt(2.0)
p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps]
elif rotation==3:
p_out = boost_momenta(ps, 3)
# From this point the transformations will prevent the
# improve_ps script of MadLoop to work.
elif rotation==4:
p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps]
elif rotation==5:
p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps]
else:
raise MadGraph5Error("Rotation id %i not implemented"%rotation)
return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
def pick_PS_point(proc, options):
""" Randomly generate a PS point and make sure it is eligible. Then
return it. Users can edit the cuts here if they want."""
p, w_rambo = self.get_momenta(proc, options)
if options['events']:
return p
# For 2>1 process, we don't check the cuts of course
while (not MatrixElementEvaluator.pass_isolation_cuts(p) and len(p)>3):
p, w_rambo = self.get_momenta(proc, options)
# For a 2>1 process, it would always be the same PS point,
# so here we bring in so boost along the z-axis, just for the sake
# of it.
if len(p)==3:
p = boost_momenta(p,3,random.uniform(0.0,0.99))
return p
# Start loop on loop libraries
# Accuracy threshold of double precision evaluations above which the
# PS points is also evaluated in quadruple precision
accuracy_threshold=1.0e-1
# Number of lorentz transformations to consider for the stability test
# (along with the loop direction test which is performed by default)
num_rotations = 1
if "MLReductionLib" not in MLOptions:
tools=[1]
else:
tools=MLOptions["MLReductionLib"]
tools=list(set(tools)) # remove the duplication ones
# not self-contained tir libraries
tool_var={'pjfry':2,'golem':4,'samurai':5,'ninja':6,'collier':7}
for tool in ['pjfry','golem','samurai','ninja','collier']:
tool_dir='%s_dir'%tool
if not tool_dir in self.tir_dir:
continue
tool_libpath=self.tir_dir[tool_dir]
tool_libname="lib%s.a"%tool
if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \
or (not os.path.isfile(pjoin(tool_libpath,tool_libname))):
if tool_var[tool] in tools:
tools.remove(tool_var[tool])
if not tools:
return None
# Normally, this should work for loop-induced processes as well
if not reusing:
process = matrix_element['processes'][0]
else:
process = matrix_element
proc_name = process.shell_string()[2:]
export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\
temp_dir_prefix+"_%s"%proc_name)
tools_name=bannermod.MadLoopParam._ID_reduction_tool_map
return_dict={}
return_dict['Stability']={}
infos_save={'Process_output': None,
'HELAS_MODEL_compilation' : None,
'dir_path' : None,
'Initialization' : None,
'Process_compilation' : None}
for tool in tools:
tool_name=tools_name[tool]
# Each evaluations is performed in different ways to assess its stability.
# There are two dictionaries, one for the double precision evaluation
# and the second one for quadruple precision (if it was needed).
# The keys are the name of the evaluation method and the value is the
# float returned.
DP_stability = []
QP_stability = []
# The unstable point encountered are stored in this list
Unstable_PS_points = []
# The exceptional PS points are those which stay unstable in quad prec.
Exceptional_PS_points = []
MLoptions=MLOptions
MLoptions["MLReductionLib"]=tool
clean = (tool==tools[0]) and not nPoints==0
if infos_IN==None or (tool_name not in infos_IN):
infos=infos_IN
else:
infos=infos_IN[tool_name]
if not infos:
infos = self.setup_process(matrix_element,export_dir, \
reusing, param_card,MLoptions,clean)
if not infos:
return None
if clean:
infos_save['Process_output']=infos['Process_output']
infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation']
infos_save['dir_path']=infos['dir_path']
infos_save['Process_compilation']=infos['Process_compilation']
else:
if not infos['Process_output']:
infos['Process_output']=infos_save['Process_output']
if not infos['HELAS_MODEL_compilation']:
infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation']
if not infos['dir_path']:
infos['dir_path']=infos_save['dir_path']
if not infos['Process_compilation']:
infos['Process_compilation']=infos_save['Process_compilation']
dir_path=infos['dir_path']
# Reuse old stability runs if present
savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool]
data_i = 0
if reusing:
# Possibly add additional data than the main one in 0
data_i=0
while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))):
pickle_path = pjoin(dir_path,savefile%('_%d'%data_i))
saved_run = save_load_object.load_from_file(pickle_path)
if data_i>0:
logger.info("Loading additional data stored in %s."%
str(pickle_path))
logger.info("Loaded data moved to %s."%str(pjoin(
dir_path,'LOADED_'+savefile%('_%d'%data_i))))
shutil.move(pickle_path,
pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i)))
DP_stability.extend(saved_run['DP_stability'])
QP_stability.extend(saved_run['QP_stability'])
Unstable_PS_points.extend(saved_run['Unstable_PS_points'])
Exceptional_PS_points.extend(saved_run['Exceptional_PS_points'])
data_i += 1
return_dict['Stability'][tool_name] = {'DP_stability':DP_stability,
'QP_stability':QP_stability,
'Unstable_PS_points':Unstable_PS_points,
'Exceptional_PS_points':Exceptional_PS_points}
if nPoints==0:
if len(return_dict['Stability'][tool_name]['DP_stability'])!=0:
# In case some data was combined, overwrite the pickle
if data_i>1:
save_load_object.save_to_file(pjoin(dir_path,
savefile%'_0'),return_dict['Stability'][tool_name])
continue
else:
logger.info("ERROR: Not reusing a directory or any pickled"+
" result for tool %s and the number"%tool_name+\
" of point for the check is zero.")
return None
logger.info("Checking stability of process %s "%proc_name+\
"with %d PS points by %s."%(nPoints,tool_name))
if infos['Initialization'] != None:
time_per_ps_estimate = (infos['Initialization']/4.0)/2.0
sec_needed = int(time_per_ps_estimate*nPoints*4)
else:
sec_needed = 0
progress_bar = None
time_info = False
if sec_needed>5:
time_info = True
logger.info("This check should take about "+\
"%s to run. Started on %s."%(\
str(datetime.timedelta(seconds=sec_needed)),\
datetime.datetime.now().strftime("%d-%m-%Y %H:%M")))
if logger.getEffectiveLevel()<logging.WARNING and \
(sec_needed>5 or infos['Initialization'] == None):
widgets = ['Stability check:', pbar.Percentage(), ' ',
pbar.Bar(),' ', pbar.ETA(), ' ']
progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints,
fd=sys.stdout)
MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders)
# Recompile (Notice that the recompilation is only necessary once) for
# the change above to take effect.
# Make sure to recreate the executable and modified sources
try:
os.remove(pjoin(dir_path,'check'))
os.remove(pjoin(dir_path,'check_sa.o'))
except OSError:
pass
# Now run make
devnull = open(os.devnull, 'w')
retcode = subprocess.call(['make','check'],
cwd=dir_path, stdout=devnull, stderr=devnull)
devnull.close()
if retcode != 0:
logging.info("Error while executing make in %s" % dir_path)
return None
# First create the stability check fortran driver executable if not
# already present.
if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')):
# Use the presence of the file born_matrix.f to check if this output
# is a loop_induced one or not.
if os.path.isfile(pjoin(dir_path,'born_matrix.f')):
checkerName = 'StabilityCheckDriver.f'
else:
checkerName = 'StabilityCheckDriver_loop_induced.f'
with open(pjoin(self.mg_root,'Template','loop_material','Checks',
checkerName),'r') as checkerFile:
with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix:
checkerToWrite = checkerFile.read()%{'proc_prefix':
proc_prefix.read()}
checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w')
checkerFile.write(checkerToWrite)
checkerFile.close()
#cp(pjoin(self.mg_root,'Template','loop_material','Checks',\
# checkerName),pjoin(dir_path,'StabilityCheckDriver.f'))
# Make sure to recompile the possibly modified files (time stamps can be
# off).
if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')):
os.remove(pjoin(dir_path,'StabilityCheckDriver'))
if os.path.isfile(pjoin(dir_path,'loop_matrix.o')):
os.remove(pjoin(dir_path,'loop_matrix.o'))
misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \
mode='fortran', job_specs = False)
# Now for 2>1 processes, because the HelFilter was setup in for always
# identical PS points with vec(p_1)=-vec(p_2), it is best not to remove
# the helicityFilter double check
if len(process['legs'])==3:
self.fix_MadLoopParamCard(dir_path, mp=False,
loop_filter=False, DoubleCheckHelicityFilter=True)
StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=dir_path)
start_index = len(DP_stability)
if progress_bar!=None:
progress_bar.start()
# Flag to know if the run was interrupted or not
interrupted = False
# Flag to know wheter the run for one specific PS point got an IOError
# and must be retried
retry = 0
# We do not use a for loop because we want to manipulate the updater.
i=start_index
if options and 'events' in options and options['events']:
# it is necessary to reuse the events from lhe file
import MadSpin.decay as madspin
fsock = open(options['events'])
self.event_file = madspin.Event(fsock)
while i<(start_index+nPoints):
# To be added to the returned statistics
qp_dict={}
dp_dict={}
UPS = None
EPS = None
# Pick an eligible PS point with rambo, if not already done
if retry==0:
p = pick_PS_point(process, options)
# print "I use P_%i="%i,p
try:
if progress_bar!=None:
progress_bar.update(i+1-start_index)
# Write it in the input file
PSPoint = format_PS_point(p,0)
dp_res=[]
dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
split_orders=split_orders))
dp_dict['CTModeA']=dp_res[-1]
dp_res.append(self.get_me_value(StabChecker,PSPoint,2,
split_orders=split_orders))
dp_dict['CTModeB']=dp_res[-1]
for rotation in range(1,num_rotations+1):
PSPoint = format_PS_point(p,rotation)
dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
split_orders=split_orders))
dp_dict['Rotation%i'%rotation]=dp_res[-1]
# Make sure all results make sense
if any([not res for res in dp_res]):
return None
dp_accuracy =((max(dp_res)-min(dp_res))/
abs(sum(dp_res)/len(dp_res)))
dp_dict['Accuracy'] = dp_accuracy
if dp_accuracy>accuracy_threshold:
if tool in [1,6]:
# Only CutTools or Ninja can use QP
UPS = [i,p]
qp_res=[]
PSPoint = format_PS_point(p,0)
qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
split_orders=split_orders))
qp_dict['CTModeA']=qp_res[-1]
qp_res.append(self.get_me_value(StabChecker,PSPoint,5,
split_orders=split_orders))
qp_dict['CTModeB']=qp_res[-1]
for rotation in range(1,num_rotations+1):
PSPoint = format_PS_point(p,rotation)
qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
split_orders=split_orders))
qp_dict['Rotation%i'%rotation]=qp_res[-1]
# Make sure all results make sense
if any([not res for res in qp_res]):
return None
qp_accuracy = ((max(qp_res)-min(qp_res))/
abs(sum(qp_res)/len(qp_res)))
qp_dict['Accuracy']=qp_accuracy
if qp_accuracy>accuracy_threshold:
EPS = [i,p]
else:
# Simply consider the point as a UPS when not using
# CutTools
UPS = [i,p]
except KeyboardInterrupt:
interrupted = True
break
except IOError, e:
if e.errno == errno.EINTR:
if retry==100:
logger.error("Failed hundred times consecutively because"+
" of system call interruptions.")
raise
else:
logger.debug("Recovered from a system call interruption."+\
"PSpoint #%i, Attempt #%i."%(i,retry+1))
# Sleep for half a second. Safety measure.
time.sleep(0.5)
# We will retry this PS point
retry = retry+1
# Make sure the MadLoop process is properly killed
try:
StabChecker.kill()
except Exception:
pass
StabChecker = subprocess.Popen(\
[pjoin(dir_path,'StabilityCheckDriver')],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=dir_path)
continue
else:
raise
# Successfully processed a PS point so,
# > reset retry
retry = 0
# > Update the while loop counter variable
i=i+1
# Update the returned statistics
DP_stability.append(dp_dict)
QP_stability.append(qp_dict)
if not EPS is None:
Exceptional_PS_points.append(EPS)
if not UPS is None:
Unstable_PS_points.append(UPS)
if progress_bar!=None:
progress_bar.finish()
if time_info:
logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\
"%d-%m-%Y %H:%M"))
# Close the StabChecker process.
if not interrupted:
StabChecker.stdin.write('y\n')
else:
StabChecker.kill()
#return_dict = {'DP_stability':DP_stability,
# 'QP_stability':QP_stability,
# 'Unstable_PS_points':Unstable_PS_points,
# 'Exceptional_PS_points':Exceptional_PS_points}
# Save the run for possible future use
save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\
return_dict['Stability'][tool_name])
if interrupted:
break
return_dict['Process'] = matrix_element.get('processes')[0] if not \
reusing else matrix_element
return return_dict
@classmethod
def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0,
split_orders=-1):
""" This version of get_me_value is simplified for the purpose of this
class. No compilation is necessary. The CT mode can be specified."""
# Reset the stdin with EOF character without closing it.
StabChecker.stdin.write('\x1a')
StabChecker.stdin.write('1\n')
StabChecker.stdin.write('%d\n'%mode)
StabChecker.stdin.write('%s\n'%PSpoint)
StabChecker.stdin.write('%.16E\n'%mu_r)
StabChecker.stdin.write('%d\n'%hel)
StabChecker.stdin.write('%d\n'%split_orders)
try:
while True:
output = StabChecker.stdout.readline()
if output != '':
last_non_empty = output
if output==' ##TAG#RESULT_START#TAG##\n':
break
# Break if the checker has crashed for some reason.
ret_code = StabChecker.poll()
if not ret_code is None:
output = StabChecker.stdout.readline()
if output != '':
last_non_empty = output
error = StabChecker.stderr.readline()
raise MadGraph5Error, \
"The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
(ret_code, last_non_empty, error)
res = ""
while True:
output = StabChecker.stdout.readline()
if output != '':
last_non_empty = output
if output==' ##TAG#RESULT_STOP#TAG##\n':
break
else:
res += output
ret_code = StabChecker.poll()
if not ret_code is None:
output = StabChecker.stdout.readline()
if output != '':
last_non_empty = output
error = StabChecker.stderr.readline()
raise MadGraph5Error, \
"The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
(ret_code, last_non_empty, error)
return cls.parse_check_output(res,format='tuple')[0][0]
except IOError as e:
logging.warning("Error while running MadLoop. Exception = %s"%str(e))
raise e
def evaluate_helicities(process, param_card = None, mg_root="",
cmass_scheme = False):
""" Perform a python evaluation of the matrix element independently for
all possible helicity configurations for a fixed number of points N and
returns the average for each in the format [[hel_config, eval],...].
This is used to determine what are the vanishing and dependent helicity
configurations at generation time and accordingly setup the output.
This is not yet implemented at LO."""
# Make sure this function is employed with a single process at LO
assert isinstance(process,base_objects.Process)
assert process.get('perturbation_couplings')==[]
N_eval=50
evaluator = MatrixElementEvaluator(process.get('model'), param_card,
auth_skipping = False, reuse = True)
amplitude = diagram_generation.Amplitude(process)
matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False)
cumulative_helEvals = []
# Fill cumulative hel progressively with several evaluations of the ME.
for i in range(N_eval):
p, w_rambo = evaluator.get_momenta(process)
helEvals = evaluator.evaluate_matrix_element(\
matrix_element, p = p, output = 'helEvals')['helEvals']
if cumulative_helEvals==[]:
cumulative_helEvals=copy.copy(helEvals)
else:
cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \
enumerate(cumulative_helEvals)]
# Now normalize with the total number of evaluations
cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals]
# As we are not in the context of a check command, so we clean the added
# globals right away
clean_added_globals(ADDED_GLOBAL)
return cumulative_helEvals
def run_multiprocs_no_crossings(function, multiprocess, stored_quantities,
opt=None, options=None):
"""A wrapper function for running an iteration of a function over
a multiprocess, without having to first create a process list
(which makes a big difference for very large multiprocesses.
stored_quantities is a dictionary for any quantities that we want
to reuse between runs."""
model = multiprocess.get('model')
isids = [leg.get('ids') for leg in multiprocess.get('legs') \
if not leg.get('state')]
fsids = [leg.get('ids') for leg in multiprocess.get('legs') \
if leg.get('state')]
# Create dictionary between isids and antiids, to speed up lookup
id_anti_id_dict = {}
for id in set(tuple(sum(isids+fsids, []))):
id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code()
id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id
sorted_ids = []
results = []
for is_prod in apply(itertools.product, isids):
for fs_prod in apply(itertools.product, fsids):
# Check if we have already checked the process
if check_already_checked(is_prod, fs_prod, sorted_ids,
multiprocess, model, id_anti_id_dict):
continue
# Generate process based on the selected ids
process = multiprocess.get_process_with_legs(base_objects.LegList(\
[base_objects.Leg({'id': id, 'state':False}) for \
id in is_prod] + \
[base_objects.Leg({'id': id, 'state':True}) for \
id in fs_prod]))
if opt is not None:
if isinstance(opt, dict):
try:
value = opt[process.base_string()]
except Exception:
continue
result = function(process, stored_quantities, value, options=options)
else:
result = function(process, stored_quantities, opt, options=options)
else:
result = function(process, stored_quantities, options=options)
if result:
results.append(result)
return results
#===============================================================================
# Helper function check_already_checked
#===============================================================================
def check_already_checked(is_ids, fs_ids, sorted_ids, process, model,
id_anti_id_dict = {}):
"""Check if process already checked, if so return True, otherwise add
process and antiprocess to sorted_ids."""
# Check if process is already checked
if id_anti_id_dict:
is_ids = [id_anti_id_dict[id] for id in \
is_ids]
else:
is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \
is_ids]
ids = array.array('i', sorted(is_ids + list(fs_ids)) + \
[process.get('id')])
if ids in sorted_ids:
# We have already checked (a crossing of) this process
return True
# Add this process to tested_processes
sorted_ids.append(ids)
# Skip adding antiprocess below, since might be relevant too
return False
#===============================================================================
# Generate a loop matrix element
#===============================================================================
def generate_loop_matrix_element(process_definition, reuse, output_path=None,
cmd = FakeInterface(), proc_name=None, loop_filter=None):
""" Generate a loop matrix element from the process definition, and returns
it along with the timing information dictionary.
If reuse is True, it reuses the already output directory if found.
There is the possibility of specifying the proc_name."""
assert isinstance(process_definition,
(base_objects.ProcessDefinition,base_objects.Process))
assert process_definition.get('perturbation_couplings')!=[]
if isinstance(process_definition,base_objects.ProcessDefinition):
if any(len(l.get('ids'))>1 for l in process_definition.get('legs')):
raise InvalidCmd("This check can only be performed on single "+
" processes. (i.e. without multiparticle labels).")
isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
if not leg.get('state')]
fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
if leg.get('state')]
# Now generate a process based on the ProcessDefinition given in argument.
process = process_definition.get_process(isids,fsids)
else:
process = process_definition
if not output_path is None:
root_path = output_path
else:
root_path = cmd._mgme_dir
# By default, set all entries to None
timing = {'Diagrams_generation': None,
'n_loops': None,
'HelasDiagrams_generation': None,
'n_loop_groups': None,
'n_loop_wfs': None,
'loop_wfs_ranks': None}
if proc_name:
proc_dir = pjoin(root_path,proc_name)
else:
proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%(
'_'.join(process.shell_string().split('_')[1:])))
if reuse and os.path.isdir(proc_dir):
logger.info("Reusing directory %s"%str(proc_dir))
# If reusing, return process instead of matrix element
return timing, process
logger.info("Generating p%s"%process_definition.nice_string()[1:])
start=time.time()
try:
amplitude = loop_diagram_generation.LoopAmplitude(process,
loop_filter=loop_filter)
except InvalidCmd:
# An error about the sanity of the process can be thrown, in which case
# we return nothing
return time.time()-start, None
if not amplitude.get('diagrams'):
# Not matrix eleemnt for this process
return time.time()-start, None
# Make sure to disable loop_optimized_output when considering loop induced
# processes
loop_optimized_output = cmd.options['loop_optimized_output']
timing['Diagrams_generation']=time.time()-start
timing['n_loops']=len(amplitude.get('loop_diagrams'))
start=time.time()
matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
optimized_output = loop_optimized_output,gen_color=True)
# Here, the alohaModel used for analytica computations and for the aloha
# subroutine output will be different, so that some optimization is lost.
# But that is ok for the check functionality.
matrix_element.compute_all_analytic_information()
timing['HelasDiagrams_generation']=time.time()-start
if loop_optimized_output:
timing['n_loop_groups']=len(matrix_element.get('loop_groups'))
lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \
ldiag.get('loop_wavefunctions')]
timing['n_loop_wfs']=len(lwfs)
timing['loop_wfs_ranks']=[]
for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \
for l in lwfs])+1):
timing['loop_wfs_ranks'].append(\
len([1 for l in lwfs if \
l.get_analytic_info('wavefunction_rank')==rank]))
return timing, matrix_element
#===============================================================================
# check profile for loop process (timings + stability in one go)
#===============================================================================
def check_profile(process_definition, param_card = None,cuttools="",tir={},
options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
"""For a single loop process, check both its timings and then its stability
in one go without regenerating it."""
if 'reuse' not in options:
keep_folder=False
else:
keep_folder = options['reuse']
model=process_definition.get('model')
timing1, matrix_element = generate_loop_matrix_element(process_definition,
keep_folder,output_path=output_path,cmd=cmd)
reusing = isinstance(matrix_element, base_objects.Process)
options['reuse'] = reusing
myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
model=model, output_path=output_path, cmd=cmd)
if not myProfiler.loop_optimized_output:
MLoptions={}
else:
MLoptions=MLOptions
timing2 = myProfiler.time_matrix_element(matrix_element, reusing,
param_card, keep_folder=keep_folder,options=options,
MLOptions = MLoptions)
timing2['reduction_tool'] = MLoptions['MLReductionLib'][0]
if timing2 == None:
return None, None
# The timing info is made of the merged two dictionaries
timing = dict(timing1.items()+timing2.items())
stability = myProfiler.check_matrix_element_stability(matrix_element,
options=options, infos_IN=timing,param_card=param_card,
keep_folder = keep_folder,
MLOptions = MLoptions)
if stability == None:
return None, None
else:
timing['loop_optimized_output']=myProfiler.loop_optimized_output
stability['loop_optimized_output']=myProfiler.loop_optimized_output
return timing, stability
#===============================================================================
# check_timing for loop processes
#===============================================================================
def check_stability(process_definition, param_card = None,cuttools="",tir={},
options=None,nPoints=100, output_path=None,
cmd = FakeInterface(), MLOptions = {}):
"""For a single loop process, give a detailed summary of the generation and
execution timing."""
if "reuse" in options:
reuse=options['reuse']
else:
reuse=False
reuse=options['reuse']
keep_folder = reuse
model=process_definition.get('model')
timing, matrix_element = generate_loop_matrix_element(process_definition,
reuse, output_path=output_path, cmd=cmd)
reusing = isinstance(matrix_element, base_objects.Process)
options['reuse'] = reusing
myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
output_path=output_path,model=model,cmd=cmd)
if not myStabilityChecker.loop_optimized_output:
MLoptions = {}
else:
MLoptions = MLOptions
# Make sure that the poles computation is disabled for COLLIER
if 'COLLIERComputeUVpoles' not in MLoptions:
MLoptions['COLLIERComputeUVpoles']=False
if 'COLLIERComputeIRpoles' not in MLoptions:
MLoptions['COLLIERComputeIRpoles']=False
# Use high required accuracy in COLLIER's requirement if not specified
if 'COLLIERRequiredAccuracy' not in MLoptions:
MLoptions['COLLIERRequiredAccuracy']=1e-13
# Use loop-direction switching as stability test if not specifed (more reliable)
if 'COLLIERUseInternalStabilityTest' not in MLoptions:
MLoptions['COLLIERUseInternalStabilityTest']=False
# Finally we *must* forbid the use of COLLIER global cache here, because it
# does not work with the way we call independently CTMODERun 1 and 2
# with the StabilityChecker.
MLoptions['COLLIERGlobalCache'] = 0
if "MLReductionLib" not in MLOptions:
MLoptions["MLReductionLib"] = []
if cuttools:
MLoptions["MLReductionLib"].extend([1])
if "iregi_dir" in tir:
MLoptions["MLReductionLib"].extend([3])
if "pjfry_dir" in tir:
MLoptions["MLReductionLib"].extend([2])
if "golem_dir" in tir:
MLoptions["MLReductionLib"].extend([4])
if "samurai_dir" in tir:
MLoptions["MLReductionLib"].extend([5])
if "ninja_dir" in tir:
MLoptions["MLReductionLib"].extend([6])
if "collier_dir" in tir:
MLoptions["MLReductionLib"].extend([7])
stability = myStabilityChecker.check_matrix_element_stability(matrix_element,
options=options,param_card=param_card,
keep_folder=keep_folder,
MLOptions=MLoptions)
if stability == None:
return None
else:
stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output
return stability
#===============================================================================
# check_timing for loop processes
#===============================================================================
def check_timing(process_definition, param_card= None, cuttools="",tir={},
output_path=None, options={}, cmd = FakeInterface(),
MLOptions = {}):
"""For a single loop process, give a detailed summary of the generation and
execution timing."""
if 'reuse' not in options:
keep_folder = False
else:
keep_folder = options['reuse']
model=process_definition.get('model')
timing1, matrix_element = generate_loop_matrix_element(process_definition,
keep_folder, output_path=output_path, cmd=cmd)
reusing = isinstance(matrix_element, base_objects.Process)
options['reuse'] = reusing
myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir,
output_path=output_path, cmd=cmd)
if not myTimer.loop_optimized_output:
MLoptions = {}
else:
MLoptions = MLOptions
# Make sure that the poles computation is disabled for COLLIER
if 'COLLIERComputeUVpoles' not in MLoptions:
MLoptions['COLLIERComputeUVpoles']=False
if 'COLLIERComputeIRpoles' not in MLoptions:
MLoptions['COLLIERComputeIRpoles']=False
# And the COLLIER global cache is active, if not specified
if 'COLLIERGlobalCache' not in MLoptions:
MLoptions['COLLIERGlobalCache']=-1
# And time NINJA by default if not specified:
if 'MLReductionLib' not in MLoptions or \
len(MLoptions['MLReductionLib'])==0:
MLoptions['MLReductionLib'] = [6]
timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card,
keep_folder = keep_folder, options=options,
MLOptions = MLoptions)
if timing2 == None:
return None
else:
# Return the merged two dictionaries
res = dict(timing1.items()+timing2.items())
res['loop_optimized_output']=myTimer.loop_optimized_output
res['reduction_tool'] = MLoptions['MLReductionLib'][0]
return res
#===============================================================================
# check_processes
#===============================================================================
def check_processes(processes, param_card = None, quick = [],cuttools="",tir={},
options=None, reuse = False, output_path=None, cmd = FakeInterface()):
"""Check processes by generating them with all possible orderings
of particles (which means different diagram building and Helas
calls), and comparing the resulting matrix element values."""
cmass_scheme = cmd.options['complex_mass_scheme']
if isinstance(processes, base_objects.ProcessDefinition):
# Generate a list of unique processes
# Extract IS and FS ids
multiprocess = processes
model = multiprocess.get('model')
# Initialize matrix element evaluation
if multiprocess.get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model,
auth_skipping = True, reuse = False, cmd = cmd)
else:
evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
model=model, auth_skipping = True,
reuse = False, output_path=output_path, cmd = cmd)
results = run_multiprocs_no_crossings(check_process,
multiprocess,
evaluator,
quick,
options)
if "used_lorentz" not in evaluator.stored_quantities:
evaluator.stored_quantities["used_lorentz"] = []
if multiprocess.get('perturbation_couplings')!=[] and not reuse:
# Clean temporary folders created for the running of the loop processes
clean_up(output_path)
return results, evaluator.stored_quantities["used_lorentz"]
elif isinstance(processes, base_objects.Process):
processes = base_objects.ProcessList([processes])
elif isinstance(processes, base_objects.ProcessList):
pass
else:
raise InvalidCmd("processes is of non-supported format")
if not processes:
raise InvalidCmd("No processes given")
model = processes[0].get('model')
# Initialize matrix element evaluation
if processes[0].get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model, param_card,
auth_skipping = True, reuse = False, cmd = cmd)
else:
evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
model=model,param_card=param_card,
auth_skipping = True, reuse = False,
output_path=output_path, cmd = cmd)
# Keep track of tested processes, matrix elements, color and already
# initiated Lorentz routines, to reuse as much as possible
sorted_ids = []
comparison_results = []
# Check process by process
for process in processes:
# Check if we already checked process
if check_already_checked([l.get('id') for l in process.get('legs') if \
not l.get('state')],
[l.get('id') for l in process.get('legs') if \
l.get('state')],
sorted_ids, process, model):
continue
# Get process result
res = check_process(process, evaluator, quick, options)
if res:
comparison_results.append(res)
if "used_lorentz" not in evaluator.stored_quantities:
evaluator.stored_quantities["used_lorentz"] = []
if processes[0].get('perturbation_couplings')!=[] and not reuse:
# Clean temporary folders created for the running of the loop processes
clean_up(output_path)
return comparison_results, evaluator.stored_quantities["used_lorentz"]
def check_process(process, evaluator, quick, options):
"""Check the helas calls for a process by generating the process
using all different permutations of the process legs (or, if
quick, use a subset of permutations), and check that the matrix
element is invariant under this."""
model = process.get('model')
# Ensure that leg numbers are set
for i, leg in enumerate(process.get('legs')):
leg.set('number', i+1)
logger.info("Checking crossings of %s" % \
process.nice_string().replace('Process:', 'process'))
process_matrix_elements = []
# For quick checks, only test twp permutations with leg "1" in
# each position
if quick:
leg_positions = [[] for leg in process.get('legs')]
quick = range(1,len(process.get('legs')) + 1)
values = []
# Now, generate all possible permutations of the legs
number_checked=0
for legs in itertools.permutations(process.get('legs')):
order = [l.get('number') for l in legs]
if quick:
found_leg = True
for num in quick:
# Only test one permutation for each position of the
# specified legs
leg_position = legs.index([l for l in legs if \
l.get('number') == num][0])
if not leg_position in leg_positions[num-1]:
found_leg = False
leg_positions[num-1].append(leg_position)
if found_leg:
continue
# Further limit the total number of permutations checked to 3 for
# loop processes.
if quick and process.get('perturbation_couplings') and number_checked >3:
continue
legs = base_objects.LegList(legs)
if order != range(1,len(legs) + 1):
logger.info("Testing permutation: %s" % \
order)
newproc = copy.copy(process)
newproc.set('legs',legs)
# Generate the amplitude for this process
try:
if newproc.get('perturbation_couplings')==[]:
amplitude = diagram_generation.Amplitude(newproc)
else:
# Change the cutting method every two times.
loop_base_objects.cutting_method = 'optimal' if \
number_checked%2 == 0 else 'default'
amplitude = loop_diagram_generation.LoopAmplitude(newproc)
except InvalidCmd:
result=False
else:
result = amplitude.get('diagrams')
# Make sure to re-initialize the cutting method to the original one.
loop_base_objects.cutting_method = 'optimal'
if not result:
# This process has no diagrams; go to next process
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
break
if order == range(1,len(legs) + 1):
# Generate phase space point to use
p, w_rambo = evaluator.get_momenta(process, options)
# Generate the HelasMatrixElement for the process
if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
matrix_element = helas_objects.HelasMatrixElement(amplitude,
gen_color=False)
else:
matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
optimized_output=evaluator.loop_optimized_output)
# The loop diagrams are always the same in the basis, so that the
# LoopHelasMatrixElement always look alike. One needs to consider
# the crossing no matter what then.
if amplitude.get('process').get('has_born'):
# But the born diagrams will change depending on the order of the
# particles in the process definition
if matrix_element in process_matrix_elements:
# Exactly the same matrix element has been tested
# for other permutation of same process
continue
process_matrix_elements.append(matrix_element)
res = evaluator.evaluate_matrix_element(matrix_element, p = p,
options=options)
if res == None:
break
values.append(res[0])
number_checked += 1
# Check if we failed badly (1% is already bad) - in that
# case done for this process
if abs(max(values)) + abs(min(values)) > 0 and \
2 * abs(max(values) - min(values)) / \
(abs(max(values)) + abs(min(values))) > 0.01:
break
# Check if process was interrupted
if not values:
return None
# Done with this process. Collect values, and store
# process and momenta
diff = 0
if abs(max(values)) + abs(min(values)) > 0:
diff = 2* abs(max(values) - min(values)) / \
(abs(max(values)) + abs(min(values)))
# be more tolerant with loop processes
if process.get('perturbation_couplings'):
passed = diff < 1.e-5
else:
passed = diff < 1.e-8
return {"process": process,
"momenta": p,
"values": values,
"difference": diff,
"passed": passed}
def clean_up(mg_root):
"""Clean-up the possible left-over outputs from 'evaluate_matrix element' of
the LoopMatrixEvaluator (when its argument proliferate is set to true). """
if mg_root is None:
pass
directories = misc.glob('%s*' % temp_dir_prefix, mg_root)
if directories != []:
logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix)
for dir in directories:
# For safety make sure that the directory contains a folder SubProcesses
if os.path.isdir(pjoin(dir,'SubProcesses')):
shutil.rmtree(dir)
def format_output(output,format):
""" Return a string for 'output' with the specified format. If output is
None, it returns 'NA'."""
if output!=None:
return format%output
else:
return 'NA'
def output_profile(myprocdef, stability, timing, output_path, reusing=False):
"""Present the results from a timing and stability consecutive check"""
opt = timing['loop_optimized_output']
text = 'Timing result for the '+('optimized' if opt else 'default')+\
' output:\n'
text += output_timings(myprocdef,timing)
text += '\nStability result for the '+('optimized' if opt else 'default')+\
' output:\n'
text += output_stability(stability,output_path, reusing=reusing)
mode = 'optimized' if opt else 'default'
logFilePath = pjoin(output_path, 'profile_%s_%s.log'\
%(mode,stability['Process'].shell_string()))
logFile = open(logFilePath, 'w')
logFile.write(text)
logFile.close()
logger.info('Log of this profile check was output to file %s'\
%str(logFilePath))
return text
def output_stability(stability, output_path, reusing=False):
"""Present the result of a stability check in a nice format.
The full info is printed out in 'Stability_result_<proc_shell_string>.dat'
under the MadGraph5_aMC@NLO root folder (output_path)"""
def accuracy(eval_list):
""" Compute the accuracy from different evaluations."""
return (2.0*(max(eval_list)-min(eval_list))/
abs(max(eval_list)+min(eval_list)))
def best_estimate(eval_list):
""" Returns the best estimate from different evaluations."""
return (max(eval_list)+min(eval_list))/2.0
def loop_direction_test_power(eval_list):
""" Computes the loop direction test power P is computed as follow:
P = accuracy(loop_dir_test) / accuracy(all_test)
So that P is large if the loop direction test is effective.
The tuple returned is (log(median(P)),log(min(P)),frac)
where frac is the fraction of events with powers smaller than -3
which means events for which the reading direction test shows an
accuracy three digits higher than it really is according to the other
tests."""
powers=[]
for eval in eval_list:
loop_dir_evals = [eval['CTModeA'],eval['CTModeB']]
# CTModeA is the reference so we keep it in too
other_evals = [eval[key] for key in eval.keys() if key not in \
['CTModeB','Accuracy']]
if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0:
powers.append(accuracy(loop_dir_evals)/accuracy(other_evals))
n_fail=0
for p in powers:
if (math.log(p)/math.log(10))<-3:
n_fail+=1
if len(powers)==0:
return (None,None,None)
return (math.log(median(powers))/math.log(10),
math.log(min(powers))/math.log(10),
n_fail/len(powers))
def test_consistency(dp_eval_list, qp_eval_list):
""" Computes the consistency test C from the DP and QP evaluations.
C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
So a consistent test would have C as close to one as possible.
The tuple returned is (log(median(C)),log(min(C)),log(max(C)))"""
consistencies = []
for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list):
dp_evals = [dp_eval[key] for key in dp_eval.keys() \
if key!='Accuracy']
qp_evals = [qp_eval[key] for key in qp_eval.keys() \
if key!='Accuracy']
if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \
accuracy(dp_evals)!=0.0:
consistencies.append(accuracy(dp_evals)/(abs(\
best_estimate(qp_evals)-best_estimate(dp_evals))))
if len(consistencies)==0:
return (None,None,None)
return (math.log(median(consistencies))/math.log(10),
math.log(min(consistencies))/math.log(10),
math.log(max(consistencies))/math.log(10))
def median(orig_list):
""" Find the median of a sorted float list. """
list=copy.copy(orig_list)
list.sort()
if len(list)%2==0:
return (list[int((len(list)/2)-1)]+list[int(len(list)/2)])/2.0
else:
return list[int((len(list)-1)/2)]
# Define shortcut
f = format_output
opt = stability['loop_optimized_output']
mode = 'optimized' if opt else 'default'
process = stability['Process']
res_str = "Stability checking for %s (%s mode)\n"\
%(process.nice_string()[9:],mode)
logFile = open(pjoin(output_path, 'stability_%s_%s.log'\
%(mode,process.shell_string())), 'w')
logFile.write('Stability check results\n\n')
logFile.write(res_str)
data_plot_dict={}
accuracy_dict={}
nPSmax=0
max_acc=0.0
min_acc=1.0
if stability['Stability']:
toolnames= stability['Stability'].keys()
toolnamestr=" | ".join(tn+
''.join([' ']*(10-len(tn))) for tn in toolnames)
DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \
for key,stab in stability['Stability'].items()]
med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability])
min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability])
max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability])
UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()]
res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr
len_PS=["%i"%len(evals)+\
''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability]
len_PS_str=" | ".join(len_PS)
res_str_i += "|= Number of PS points considered %s\n"%len_PS_str
res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str
res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str
res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str
pmedminlist=[]
pfraclist=[]
for key,stab in stability['Stability'].items():
(pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability'])
ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f'))
pfrac_str = f(pfrac,'%.2e')
pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str))))
pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str))))
pmedminlist_str=" | ".join(pmedminlist)
pfraclist_str=" | ".join(pfraclist)
res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str
res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str
len_UPS=["%i"%len(upup)+\
''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS]
len_UPS_str=" | ".join(len_UPS)
res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str
res_str_i += \
"""
= Legend for the statistics of the stability tests. (all log below ar log_10)
The loop direction test power P is computed as follow:
P = accuracy(loop_dir_test) / accuracy(all_other_test)
So that log(P) is positive if the loop direction test is effective.
The tuple printed out is (log(median(P)),log(min(P)))
The consistency test C is computed when QP evaluations are available:
C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
So a consistent test would have log(C) as close to zero as possible.
The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n"""
res_str+=res_str_i
for key in stability['Stability'].keys():
toolname=key
stab=stability['Stability'][key]
DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']]
# Remember that an evaluation which did not require QP has an empty dictionary
QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \
stab['QP_stability']]
nPS = len(DP_stability)
if nPS>nPSmax:nPSmax=nPS
UPS = stab['Unstable_PS_points']
UPS_stability_DP = [DP_stability[U[0]] for U in UPS]
UPS_stability_QP = [QP_stability[U[0]] for U in UPS]
EPS = stab['Exceptional_PS_points']
EPS_stability_DP = [DP_stability[E[0]] for E in EPS]
EPS_stability_QP = [QP_stability[E[0]] for E in EPS]
res_str_i = ""
# Use nicer name for the XML tag in the log file
xml_toolname = {'GOLEM95':'GOLEM','IREGI':'IREGI',
'CUTTOOLS':'CUTTOOLS','PJFRY++':'PJFRY',
'NINJA':'NINJA','SAMURAI':'SAMURAI',
'COLLIER':'COLLIER'}[toolname.upper()]
if len(UPS)>0:
res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\
%(len(UPS),nPS,toolname)
prefix = 'DP' if toolname=='CutTools' else ''
res_str_i += "|= %s Median inaccuracy.......... %s\n"\
%(prefix,f(median(UPS_stability_DP),'%.2e'))
res_str_i += "|= %s Max accuracy............... %s\n"\
%(prefix,f(min(UPS_stability_DP),'%.2e'))
res_str_i += "|= %s Min accuracy............... %s\n"\
%(prefix,f(max(UPS_stability_DP),'%.2e'))
(pmed,pmin,pfrac)=loop_direction_test_power(\
[stab['DP_stability'][U[0]] for U in UPS])
if toolname=='CutTools':
res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\
%(f(pmed,'%.1f'),f(pmin,'%.1f'))
res_str_i += "|= UPS DP fraction with power<-3. %s\n"\
%f(pfrac,'%.2e')
res_str_i += "|= QP Median accuracy............ %s\n"\
%f(median(UPS_stability_QP),'%.2e')
res_str_i += "|= QP Max accuracy............... %s\n"\
%f(min(UPS_stability_QP),'%.2e')
res_str_i += "|= QP Min accuracy............... %s\n"\
%f(max(UPS_stability_QP),'%.2e')
(pmed,pmin,pfrac)=loop_direction_test_power(\
[stab['QP_stability'][U[0]] for U in UPS])
res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\
%(f(pmed,'%.1f'),f(pmin,'%.1f'))
res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
(pmed,pmin,pmax)=test_consistency(\
[stab['DP_stability'][U[0]] for U in UPS],
[stab['QP_stability'][U[0]] for U in UPS])
res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\
%(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f'))
if len(EPS)==0:
res_str_i += "= Number of Exceptional PS points : 0\n"
if len(EPS)>0:
res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\
%(len(EPS),nPS,toolname)
res_str_i += "|= DP Median accuracy............ %s\n"\
%f(median(EPS_stability_DP),'%.2e')
res_str_i += "|= DP Max accuracy............... %s\n"\
%f(min(EPS_stability_DP),'%.2e')
res_str_i += "|= DP Min accuracy............... %s\n"\
%f(max(EPS_stability_DP),'%.2e')
pmed,pmin,pfrac=loop_direction_test_power(\
[stab['DP_stability'][E[0]] for E in EPS])
res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\
%(f(pmed,'%.1f'),f(pmin,'%.1f'))
res_str_i += "|= EPS DP fraction with power<-3. %s\n"\
%f(pfrac,'%.2e')
res_str_i += "|= QP Median accuracy............ %s\n"\
%f(median(EPS_stability_QP),'%.2e')
res_str_i += "|= QP Max accuracy............... %s\n"\
%f(min(EPS_stability_QP),'%.2e')
res_str_i += "|= QP Min accuracy............... %s\n"\
%f(max(EPS_stability_QP),'%.2e')
pmed,pmin,pfrac=loop_direction_test_power(\
[stab['QP_stability'][E[0]] for E in EPS])
res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\
%(f(pmed,'%.1f'),f(pmin,'%.1f'))
res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
logFile.write(res_str_i)
if len(EPS)>0:
logFile.write('\nFull details of the %i EPS encountered by %s.\n'\
%(len(EPS),toolname))
logFile.write('<EPS_data reduction=%s>\n'%xml_toolname.upper())
for i, eps in enumerate(EPS):
logFile.write('\nEPS #%i\n'%(i+1))
logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
for p in eps[1]]))
logFile.write('\n DP accuracy : %.4e\n'%DP_stability[eps[0]])
logFile.write(' QP accuracy : %.4e\n'%QP_stability[eps[0]])
logFile.write('</EPS_data>\n')
if len(UPS)>0:
logFile.write('\nFull details of the %i UPS encountered by %s.\n'\
%(len(UPS),toolname))
logFile.write('<UPS_data reduction=%s>\n'%xml_toolname.upper())
for i, ups in enumerate(UPS):
logFile.write('\nUPS #%i\n'%(i+1))
logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
for p in ups[1]]))
logFile.write('\n DP accuracy : %.4e\n'%DP_stability[ups[0]])
logFile.write(' QP accuracy : %.4e\n'%QP_stability[ups[0]])
logFile.write('</UPS_data>\n')
logFile.write('\nData entries for the stability plot.\n')
logFile.write('First row is a maximal accuracy delta, second is the '+\
'fraction of events with DP accuracy worse than delta.\n')
logFile.write('<plot_data reduction=%s>\n'%xml_toolname.upper())
# Set the x-range so that it spans [10**-17,10**(min_digit_accuracy)]
if max(DP_stability)>0.0:
min_digit_acc=int(math.log(max(DP_stability))/math.log(10))
if min_digit_acc>=0:
min_digit_acc = min_digit_acc+1
accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)]
else:
logFile.writelines('%.4e %.4e\n'%(accuracies[i], 0.0) for i in \
range(len(accuracies)))
logFile.write('</plot_data>\n')
res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\
' is output then.'
logFile.write('Perfect accuracy over all the trial PS points.')
res_str +=res_str_i
continue
accuracy_dict[toolname]=accuracies
if max(accuracies) > max_acc: max_acc=max(accuracies)
if min(accuracies) < min_acc: min_acc=min(accuracies)
data_plot=[]
for acc in accuracies:
data_plot.append(float(len([d for d in DP_stability if d>acc]))\
/float(len(DP_stability)))
data_plot_dict[toolname]=data_plot
logFile.writelines('%.4e %.4e\n'%(accuracies[i], data_plot[i]) for i in \
range(len(accuracies)))
logFile.write('</plot_data>\n')
logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\
%(nPS,toolname))
logFile.write('First row is DP, second is QP (if available).\n\n')
logFile.write('<accuracies reduction=%s>\n'%xml_toolname.upper())
logFile.writelines('%.4e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \
else '%.4e\n'%QP_stability[i]) for i in range(nPS))
logFile.write('</accuracies>\n')
res_str+=res_str_i
logFile.close()
res_str += "\n= Stability details of the run are output to the file"+\
" stability_%s_%s.log\n"%(mode,process.shell_string())
# Bypass the plotting if the madgraph logger has a FileHandler (like it is
# done in the check command acceptance test) because in this case it makes
# no sense to plot anything.
if any(isinstance(handler,logging.FileHandler) for handler in \
logging.getLogger('madgraph').handlers):
return res_str
try:
import matplotlib.pyplot as plt
colorlist=['b','r','g','y','m','c','k']
for i,key in enumerate(data_plot_dict.keys()):
color=colorlist[i]
data_plot=data_plot_dict[key]
accuracies=accuracy_dict[key]
plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\
label=key)
plt.axis([min_acc,max_acc,\
10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1])
plt.yscale('log')
plt.xscale('log')
plt.title('Stability plot for %s (%s mode, %d points)'%\
(process.nice_string()[9:],mode,nPSmax))
plt.ylabel('Fraction of events')
plt.xlabel('Maximal precision')
plt.legend()
if not reusing:
logger.info('Some stability statistics will be displayed once you '+\
'close the plot window')
plt.show()
else:
fig_output_file = str(pjoin(output_path,
'stability_plot_%s_%s.png'%(mode,process.shell_string())))
logger.info('Stability plot output to file %s. '%fig_output_file)
plt.savefig(fig_output_file)
return res_str
except Exception as e:
if isinstance(e, ImportError):
res_str += "\n= Install matplotlib to get a "+\
"graphical display of the results of this check."
else:
res_str += "\n= Could not produce the stability plot because of "+\
"the following error: %s"%str(e)
return res_str
def output_timings(process, timings):
"""Present the result of a timings check in a nice format """
# Define shortcut
f = format_output
loop_optimized_output = timings['loop_optimized_output']
reduction_tool = bannermod.MadLoopParam._ID_reduction_tool_map[
timings['reduction_tool']]
res_str = "%s \n"%process.nice_string()
try:
gen_total = timings['HELAS_MODEL_compilation']+\
timings['HelasDiagrams_generation']+\
timings['Process_output']+\
timings['Diagrams_generation']+\
timings['Process_compilation']+\
timings['Initialization']
except TypeError:
gen_total = None
res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs')
res_str += "|= Diagrams generation....... %s\n"\
%f(timings['Diagrams_generation'],'%.3gs')
res_str += "|= Helas Diagrams generation. %s\n"\
%f(timings['HelasDiagrams_generation'],'%.3gs')
res_str += "|= Process output............ %s\n"\
%f(timings['Process_output'],'%.3gs')
res_str += "|= HELAS+model compilation... %s\n"\
%f(timings['HELAS_MODEL_compilation'],'%.3gs')
res_str += "|= Process compilation....... %s\n"\
%f(timings['Process_compilation'],'%.3gs')
res_str += "|= Initialization............ %s\n"\
%f(timings['Initialization'],'%.3gs')
res_str += "\n= Reduction tool tested...... %s\n"%reduction_tool
res_str += "\n= Helicity sum time / PSpoint ========== %.3gms\n"\
%(timings['run_unpolarized_total']*1000.0)
if loop_optimized_output:
coef_time=timings['run_unpolarized_coefs']*1000.0
loop_time=(timings['run_unpolarized_total']-\
timings['run_unpolarized_coefs'])*1000.0
total=coef_time+loop_time
res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
%(coef_time,int(round(100.0*coef_time/total)))
res_str += "|= Loop evaluation time...... %.3gms (%d%%)\n"\
%(loop_time,int(round(100.0*loop_time/total)))
res_str += "\n= One helicity time / PSpoint ========== %.3gms\n"\
%(timings['run_polarized_total']*1000.0)
if loop_optimized_output:
coef_time=timings['run_polarized_coefs']*1000.0
loop_time=(timings['run_polarized_total']-\
timings['run_polarized_coefs'])*1000.0
total=coef_time+loop_time
res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
%(coef_time,int(round(100.0*coef_time/total)))
res_str += "|= Loop evaluation time...... %.3gms (%d%%)\n"\
%(loop_time,int(round(100.0*loop_time/total)))
res_str += "\n= Miscellaneous ========================\n"
res_str += "|= Number of hel. computed... %s/%s\n"\
%(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d'))
res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d')
if loop_optimized_output:
res_str += "|= Number of loop groups..... %s\n"\
%f(timings['n_loop_groups'],'%d')
res_str += "|= Number of loop wfs........ %s\n"\
%f(timings['n_loop_wfs'],'%d')
if timings['loop_wfs_ranks']!=None:
for i, r in enumerate(timings['loop_wfs_ranks']):
res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r)
res_str += "|= Loading time (Color data). ~%.3gms\n"\
%(timings['Booting_time']*1000.0)
res_str += "|= Maximum RAM usage (rss)... %s\n"\
%f(float(timings['ram_usage']/1000.0),'%.3gMb')
res_str += "\n= Output disk size =====================\n"
res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb')
res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb')
res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb')
res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb')
return res_str
def output_comparisons(comparison_results):
"""Present the results of a comparison in a nice list format
mode short: return the number of fail process
"""
proc_col_size = 17
pert_coupl = comparison_results[0]['process']['perturbation_couplings']
if pert_coupl:
process_header = "Process [virt="+" ".join(pert_coupl)+"]"
else:
process_header = "Process"
if len(process_header) + 1 > proc_col_size:
proc_col_size = len(process_header) + 1
for proc in comparison_results:
if len(proc['process'].base_string()) + 1 > proc_col_size:
proc_col_size = len(proc['process'].base_string()) + 1
col_size = 18
pass_proc = 0
fail_proc = 0
no_check_proc = 0
failed_proc_list = []
no_check_proc_list = []
res_str = fixed_string_length(process_header, proc_col_size) + \
fixed_string_length("Min element", col_size) + \
fixed_string_length("Max element", col_size) + \
fixed_string_length("Relative diff.", col_size) + \
"Result"
for result in comparison_results:
proc = result['process'].base_string()
values = result['values']
if len(values) <= 1:
res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
" * No permutations, process not checked *"
no_check_proc += 1
no_check_proc_list.append(result['process'].nice_string())
continue
passed = result['passed']
res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
fixed_string_length("%1.10e" % min(values), col_size) + \
fixed_string_length("%1.10e" % max(values), col_size) + \
fixed_string_length("%1.10e" % result['difference'],
col_size)
if passed:
pass_proc += 1
res_str += "Passed"
else:
fail_proc += 1
failed_proc_list.append(result['process'].nice_string())
res_str += "Failed"
res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
(pass_proc, pass_proc + fail_proc,
fail_proc, pass_proc + fail_proc)
if fail_proc != 0:
res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
if no_check_proc != 0:
res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
return res_str
def fixed_string_length(mystr, length):
"""Helper function to fix the length of a string by cutting it
or adding extra space."""
if len(mystr) > length:
return mystr[0:length]
else:
return mystr + " " * (length - len(mystr))
#===============================================================================
# check_gauge
#===============================================================================
def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False,
options=None, output_path=None, cmd = FakeInterface()):
"""Check gauge invariance of the processes by using the BRS check.
For one of the massless external bosons (e.g. gluon or photon),
replace the polarization vector (epsilon_mu) with its momentum (p_mu)
"""
cmass_scheme = cmd.options['complex_mass_scheme']
if isinstance(processes, base_objects.ProcessDefinition):
# Generate a list of unique processes
# Extract IS and FS ids
multiprocess = processes
model = multiprocess.get('model')
# Initialize matrix element evaluation
if multiprocess.get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd,
auth_skipping = True, reuse = False)
else:
evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
cmd=cmd,model=model, param_card=param_card,
auth_skipping = False, reuse = False,
output_path=output_path)
if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]:
# Set all widths to zero for gauge check
logger.info('Set All width to zero for non complex mass scheme checks')
for particle in evaluator.full_model.get('particles'):
if particle.get('width') != 'ZERO':
evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
results = run_multiprocs_no_crossings(check_gauge_process,
multiprocess,
evaluator,
options=options
)
if multiprocess.get('perturbation_couplings')!=[] and not reuse:
# Clean temporary folders created for the running of the loop processes
clean_up(output_path)
return results
elif isinstance(processes, base_objects.Process):
processes = base_objects.ProcessList([processes])
elif isinstance(processes, base_objects.ProcessList):
pass
else:
raise InvalidCmd("processes is of non-supported format")
assert processes, "No processes given"
model = processes[0].get('model')
# Initialize matrix element evaluation
if processes[0].get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model, param_card,
auth_skipping = True, reuse = False,
cmd = cmd)
else:
evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
model=model, param_card=param_card,
auth_skipping = False, reuse = False,
output_path=output_path, cmd = cmd)
comparison_results = []
comparison_explicit_flip = []
# For each process, make sure we have set up leg numbers:
for process in processes:
# Check if we already checked process
#if check_already_checked([l.get('id') for l in process.get('legs') if \
# not l.get('state')],
## [l.get('id') for l in process.get('legs') if \
# l.get('state')],
# sorted_ids, process, model):
# continue
# Get process result
result = check_gauge_process(process, evaluator,options=options)
if result:
comparison_results.append(result)
if processes[0].get('perturbation_couplings')!=[] and not reuse:
# Clean temporary folders created for the running of the loop processes
clean_up(output_path)
return comparison_results
def check_gauge_process(process, evaluator, options=None):
"""Check gauge invariance for the process, unless it is already done."""
model = process.get('model')
# Check that there are massless vector bosons in the process
found_gauge = False
for i, leg in enumerate(process.get('legs')):
part = model.get_particle(leg.get('id'))
if part.get('spin') == 3 and part.get('mass').lower() == 'zero':
found_gauge = True
break
if not found_gauge:
logger.info("No ward identity for %s" % \
process.nice_string().replace('Process', 'process'))
# This process can't be checked
return None
for i, leg in enumerate(process.get('legs')):
leg.set('number', i+1)
logger.info("Checking ward identities for %s" % \
process.nice_string().replace('Process', 'process'))
legs = process.get('legs')
# Generate a process with these legs
# Generate the amplitude for this process
try:
if process.get('perturbation_couplings')==[]:
amplitude = diagram_generation.Amplitude(process)
else:
amplitude = loop_diagram_generation.LoopAmplitude(process)
except InvalidCmd:
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
return None
if not amplitude.get('diagrams'):
# This process has no diagrams; go to next process
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
return None
# Generate the HelasMatrixElement for the process
if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
matrix_element = helas_objects.HelasMatrixElement(amplitude,
gen_color = False)
else:
matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
optimized_output=evaluator.loop_optimized_output)
#p, w_rambo = evaluator.get_momenta(process)
# MLOptions = {'ImprovePS':True,'ForceMP':True}
# brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
# output='jamp',MLOptions=MLOptions, options=options)
brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
output='jamp', options=options)
if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
matrix_element = helas_objects.HelasMatrixElement(amplitude,
gen_color = False)
mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False,
output='jamp', options=options)
if mvalue and mvalue['m2']:
return {'process':process,'value':mvalue,'brs':brsvalue}
def output_gauge(comparison_results, output='text'):
"""Present the results of a comparison in a nice list format"""
proc_col_size = 17
pert_coupl = comparison_results[0]['process']['perturbation_couplings']
# Of course, be more tolerant for loop processes
if pert_coupl:
threshold=1e-5
else:
threshold=1e-10
if pert_coupl:
process_header = "Process [virt="+" ".join(pert_coupl)+"]"
else:
process_header = "Process"
if len(process_header) + 1 > proc_col_size:
proc_col_size = len(process_header) + 1
for one_comp in comparison_results:
proc = one_comp['process'].base_string()
mvalue = one_comp['value']
brsvalue = one_comp['brs']
if len(proc) + 1 > proc_col_size:
proc_col_size = len(proc) + 1
col_size = 18
pass_proc = 0
fail_proc = 0
failed_proc_list = []
no_check_proc_list = []
res_str = fixed_string_length(process_header, proc_col_size) + \
fixed_string_length("matrix", col_size) + \
fixed_string_length("BRS", col_size) + \
fixed_string_length("ratio", col_size) + \
"Result"
for one_comp in comparison_results:
proc = one_comp['process'].base_string()
mvalue = one_comp['value']
brsvalue = one_comp['brs']
ratio = (abs(brsvalue['m2'])/abs(mvalue['m2']))
res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \
fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \
fixed_string_length("%1.10e" % ratio, col_size)
if ratio > threshold:
fail_proc += 1
proc_succeed = False
failed_proc_list.append(proc)
res_str += "Failed"
else:
pass_proc += 1
proc_succeed = True
res_str += "Passed"
#check all the JAMP
# loop over jamp
# This is not available for loop processes where the jamp list returned
# is empty.
if len(mvalue['jamp'])!=0:
for k in range(len(mvalue['jamp'][0])):
m_sum = 0
brs_sum = 0
# loop over helicity
for j in range(len(mvalue['jamp'])):
#values for the different lorentz boost
m_sum += abs(mvalue['jamp'][j][k])**2
brs_sum += abs(brsvalue['jamp'][j][k])**2
# Compare the different helicity
if not m_sum:
continue
ratio = abs(brs_sum) / abs(m_sum)
tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
fixed_string_length("%1.10e" % m_sum, col_size) + \
fixed_string_length("%1.10e" % brs_sum, col_size) + \
fixed_string_length("%1.10e" % ratio, col_size)
if ratio > 1e-15:
if not len(failed_proc_list) or failed_proc_list[-1] != proc:
fail_proc += 1
pass_proc -= 1
failed_proc_list.append(proc)
res_str += tmp_str + "Failed"
elif not proc_succeed:
res_str += tmp_str + "Passed"
res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
(pass_proc, pass_proc + fail_proc,
fail_proc, pass_proc + fail_proc)
if fail_proc != 0:
res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
if output=='text':
return res_str
else:
return fail_proc
#===============================================================================
# check_lorentz
#===============================================================================
def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \
reuse = False, output_path=None, cmd = FakeInterface()):
""" Check if the square matrix element (sum over helicity) is lorentz
invariant by boosting the momenta with different value."""
cmass_scheme = cmd.options['complex_mass_scheme']
if isinstance(processes, base_objects.ProcessDefinition):
# Generate a list of unique processes
# Extract IS and FS ids
multiprocess = processes
model = multiprocess.get('model')
# Initialize matrix element evaluation
if multiprocess.get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model,
cmd= cmd, auth_skipping = False, reuse = True)
else:
evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
model=model, auth_skipping = False, reuse = True,
output_path=output_path, cmd = cmd)
if not cmass_scheme and processes.get('perturbation_couplings')==[]:
# Set all widths to zero for lorentz check
logger.info('Set All width to zero for non complex mass scheme checks')
for particle in evaluator.full_model.get('particles'):
if particle.get('width') != 'ZERO':
evaluator.full_model.get('parameter_dict')[\
particle.get('width')] = 0.
results = run_multiprocs_no_crossings(check_lorentz_process,
multiprocess,
evaluator,
options=options)
if multiprocess.get('perturbation_couplings')!=[] and not reuse:
# Clean temporary folders created for the running of the loop processes
clean_up(output_path)
return results
elif isinstance(processes, base_objects.Process):
processes = base_objects.ProcessList([processes])
elif isinstance(processes, base_objects.ProcessList):
pass
else:
raise InvalidCmd("processes is of non-supported format")
assert processes, "No processes given"
model = processes[0].get('model')
# Initialize matrix element evaluation
if processes[0].get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model, param_card,
auth_skipping = False, reuse = True,
cmd=cmd)
else:
evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
model=model,param_card=param_card,
auth_skipping = False, reuse = True,
output_path=output_path, cmd = cmd)
comparison_results = []
# For each process, make sure we have set up leg numbers:
for process in processes:
# Check if we already checked process
#if check_already_checked([l.get('id') for l in process.get('legs') if \
# not l.get('state')],
# [l.get('id') for l in process.get('legs') if \
# l.get('state')],
# sorted_ids, process, model):
# continue
# Get process result
result = check_lorentz_process(process, evaluator,options=options)
if result:
comparison_results.append(result)
if processes[0].get('perturbation_couplings')!=[] and not reuse:
# Clean temporary folders created for the running of the loop processes
clean_up(output_path)
return comparison_results
def check_lorentz_process(process, evaluator,options=None):
"""Check gauge invariance for the process, unless it is already done."""
amp_results = []
model = process.get('model')
for i, leg in enumerate(process.get('legs')):
leg.set('number', i+1)
logger.info("Checking lorentz transformations for %s" % \
process.nice_string().replace('Process:', 'process'))
legs = process.get('legs')
# Generate a process with these legs
# Generate the amplitude for this process
try:
if process.get('perturbation_couplings')==[]:
amplitude = diagram_generation.Amplitude(process)
else:
amplitude = loop_diagram_generation.LoopAmplitude(process)
except InvalidCmd:
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
return None
if not amplitude.get('diagrams'):
# This process has no diagrams; go to next process
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
return None
# Generate the HelasMatrixElement for the process
p, w_rambo = evaluator.get_momenta(process, options)
# Generate the HelasMatrixElement for the process
if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
matrix_element = helas_objects.HelasMatrixElement(amplitude,
gen_color = True)
else:
matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
optimized_output = evaluator.loop_optimized_output)
MLOptions = {'ImprovePS':True,'ForceMP':True}
if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
auth_skipping = True, options=options)
else:
data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
auth_skipping = True, PS_name = 'original', MLOptions=MLOptions,
options = options)
if data and data['m2']:
if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
results = [data]
else:
results = [('Original evaluation',data)]
else:
return {'process':process, 'results':'pass'}
# The boosts are not precise enough for the loop evaluations and one need the
# fortran improve_ps function of MadLoop to work. So we only consider the
# boosts along the z directions for loops or simple rotations.
if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
for boost in range(1,4):
boost_p = boost_momenta(p, boost)
results.append(evaluator.evaluate_matrix_element(matrix_element,
p=boost_p,output='jamp'))
else:
# We only consider the rotations around the z axis so to have the
boost_p = boost_momenta(p, 3)
results.append(('Z-axis boost',
evaluator.evaluate_matrix_element(matrix_element, options=options,
p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions)))
# We add here also the boost along x and y for reference. In the output
# of the check, it is now clearly stated that MadLoop improve_ps script
# will not work for them. The momenta read from event file are not
# precise enough so these x/yBoost checks are omitted.
if not options['events']:
boost_p = boost_momenta(p, 1)
results.append(('X-axis boost',
evaluator.evaluate_matrix_element(matrix_element, options=options,
p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions)))
boost_p = boost_momenta(p, 2)
results.append(('Y-axis boost',
evaluator.evaluate_matrix_element(matrix_element,options=options,
p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions)))
# We only consider the rotations around the z axis so to have the
# improve_ps fortran routine work.
rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p]
results.append(('Z-axis pi/2 rotation',
evaluator.evaluate_matrix_element(matrix_element,options=options,
p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions)))
# Now a pi/4 rotation around the z-axis
sq2 = math.sqrt(2.0)
rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p]
results.append(('Z-axis pi/4 rotation',
evaluator.evaluate_matrix_element(matrix_element,options=options,
p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions)))
return {'process': process, 'results': results}
#===============================================================================
# check_gauge
#===============================================================================
def check_unitary_feynman(processes_unit, processes_feynm, param_card=None,
options=None, tir={}, output_path=None,
cuttools="", reuse=False, cmd = FakeInterface()):
"""Check gauge invariance of the processes by flipping
the gauge of the model
"""
mg_root = cmd._mgme_dir
cmass_scheme = cmd.options['complex_mass_scheme']
if isinstance(processes_unit, base_objects.ProcessDefinition):
# Generate a list of unique processes
# Extract IS and FS ids
multiprocess_unit = processes_unit
model = multiprocess_unit.get('model')
# Initialize matrix element evaluation
# For the unitary gauge, open loops should not be used
loop_optimized_bu = cmd.options['loop_optimized_output']
if processes_unit.get('squared_orders'):
if processes_unit.get('perturbation_couplings') in [[],['QCD']]:
cmd.options['loop_optimized_output'] = True
else:
raise InvalidCmd("The gauge test cannot be performed for "+
" a process with more than QCD corrections and which"+
" specifies squared order constraints.")
else:
cmd.options['loop_optimized_output'] = False
aloha.unitary_gauge = True
if processes_unit.get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model, param_card,
cmd=cmd,auth_skipping = False, reuse = True)
else:
evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
cmd=cmd, model=model,
param_card=param_card,
auth_skipping = False,
output_path=output_path,
reuse = False)
if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]:
logger.info('Set All width to zero for non complex mass scheme checks')
for particle in evaluator.full_model.get('particles'):
if particle.get('width') != 'ZERO':
evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
output_u = run_multiprocs_no_crossings(get_value,
multiprocess_unit,
evaluator,
options=options)
clean_added_globals(ADDED_GLOBAL)
# Clear up previous run if checking loop output
if processes_unit.get('perturbation_couplings')!=[]:
clean_up(output_path)
momentum = {}
for data in output_u:
momentum[data['process']] = data['p']
multiprocess_feynm = processes_feynm
model = multiprocess_feynm.get('model')
# Initialize matrix element evaluation
aloha.unitary_gauge = False
# We could use the default output as well for Feynman, but it provides
# an additional check
cmd.options['loop_optimized_output'] = True
if processes_feynm.get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model, param_card,
cmd= cmd, auth_skipping = False, reuse = False)
else:
evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
cmd= cmd, model=model,
param_card=param_card,
auth_skipping = False,
output_path=output_path,
reuse = False)
if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]:
# Set all widths to zero for gauge check
for particle in evaluator.full_model.get('particles'):
if particle.get('width') != 'ZERO':
evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm,
evaluator, momentum,
options=options)
output = [processes_unit]
for data in output_f:
local_dico = {}
local_dico['process'] = data['process']
local_dico['value_feynm'] = data['value']
local_dico['value_unit'] = [d['value'] for d in output_u
if d['process'] == data['process']][0]
output.append(local_dico)
if processes_feynm.get('perturbation_couplings')!=[] and not reuse:
# Clean temporary folders created for the running of the loop processes
clean_up(output_path)
# Reset the original global variable loop_optimized_output.
cmd.options['loop_optimized_output'] = loop_optimized_bu
return output
# elif isinstance(processes, base_objects.Process):
# processes = base_objects.ProcessList([processes])
# elif isinstance(processes, base_objects.ProcessList):
# pass
else:
raise InvalidCmd("processes is of non-supported format")
#===============================================================================
# check_cms
#===============================================================================
def check_complex_mass_scheme(process_line, param_card=None, cuttools="",tir={},
cmd = FakeInterface(), output_path=None, MLOptions = {}, options={}):
"""Check complex mass scheme consistency in the offshell region of s-channels
detected for this process, by varying the expansion paramer consistently
with the corresponding width and making sure that the difference between
the complex mass-scheme and the narrow-width approximation is higher order.
"""
if not isinstance(process_line, str):
raise InvalidCmd("Proces definition must be given as a stirng for this check")
# Generate a list of unique processes in the NWA scheme
cmd.do_set('complex_mass_scheme False', log=False)
#cmd.do_import('model loop_qcd_qed_sm-NWA')
multiprocess_nwa = cmd.extract_process(process_line)
# Change the option 'recompute_width' to the optimal value if set to 'auto'.
has_FRdecay = os.path.isfile(pjoin(cmd._curr_model.get('modelpath'),
'decays.py'))
# Proceed with some warning
missing_perturbations = cmd._curr_model.get_coupling_orders()-\
set(multiprocess_nwa.get('perturbation_couplings'))
if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
len(missing_perturbations)>0:
logger.warning("------------------------------------------------------")
logger.warning("The process considered does not specify the following "+
"type of loops to be included : %s"%str(list(missing_perturbations)))
logger.warning("Consequently, the CMS check will be unsuccessful if the"+
" process involves any resonating particle whose LO decay is "+
"mediated by one of these orders.")
logger.warning("You can use the syntax '[virt=all]' to automatically"+
" include all loops supported by the model.")
logger.warning("------------------------------------------------------")
if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
len(multiprocess_nwa.get('legs'))<=4:
logger.warning("------------------------------------------------------")
logger.warning("Processes with four or less external states are typically not"+\
" sensitive to incorrect Complex Mass Scheme implementations.")
logger.warning("You can test this sensitivity by making sure that the"+
" same check on the leading-order counterpart of this process *fails*"+
" when using the option '--diff_lambda_power=2'.")
logger.warning("If it does not, then consider adding a massless "+
"gauge vector to the external states.")
logger.warning("------------------------------------------------------")
if options['recompute_width']=='auto':
if multiprocess_nwa.get('perturbation_couplings')!=[]:
# NLO, so it is necessary to have the correct LO width for the check
options['recompute_width'] = 'first_time'
else:
options['recompute_width'] = 'never'
# Some warnings
if options['recompute_width'] in ['first_time', 'always'] and \
not has_FRdecay and not 'cached_widths' in options:
logger.info('The LO widths will need to be recomputed but the '+
'model considered does not appear to have a decay module.\nThe widths'+
' will need to be computed numerically and it will slow down the test.\n'+
'Consider using a param_card already specifying correct LO widths and'+
" adding the option --recompute_width=never when doing this check.")
if options['recompute_width']=='never' and \
any(order in multiprocess_nwa.get('perturbation_couplings') for order in
options['expansion_orders']):
logger.warning('You chose not to recompute the widths while including'+
' loop corrections. The check will be successful only if the width'+\
' specified in the default param_card is LO accurate (Remember that'+\
' the default values of alpha_s and awem1 are set to 0.1 and 10.0'+\
' respectively by default).')
# Reload the model including the decay.py to have efficient MadWidth if
# possible (this model will be directly given to MadWidth. Notice that
# this will not be needed for the CMS run because MadWidth is not supposed
# to be used there (the widths should be recycled from those of the NWA run).
if options['recompute_width'] in ['first_time', 'always'] and has_FRdecay:
modelname = cmd._curr_model.get('modelpath+restriction')
with misc.MuteLogger(['madgraph'], ['INFO']):
model = import_ufo.import_model(modelname, decay=True,
complex_mass_scheme=False)
multiprocess_nwa.set('model', model)
run_options = copy.deepcopy(options)
# Set the seed if chosen by user
if options['seed'] > 0:
random.seed(options['seed'])
# Add useful entries
run_options['param_card'] = param_card
if isinstance(cmd, FakeInterface):
raise MadGraph5Error, "Check CMS cannot be run with a FakeInterface."
run_options['cmd'] = cmd
run_options['MLOptions'] = MLOptions
if output_path:
run_options['output_path'] = output_path
else:
run_options['output_path'] = cmd._mgme_dir
# Add the information regarding FR decay for optimal log information
run_options['has_FRdecay'] = has_FRdecay
# And one for caching the widths computed along the way
if 'cached_widths' not in run_options:
run_options['cached_widths'] = {}
# Cached param_cards, first is param_card instance, second is
# param_name dictionary
run_options['cached_param_card'] = {'NWA':[None,None],'CMS':[None,None]}
if options['tweak']['name']:
logger.info("Now running the CMS check for tweak '%s'"\
%options['tweak']['name'])
model = multiprocess_nwa.get('model')
# Make sure all masses are defined as external
for particle in model.get('particles'):
mass_param = model.get_parameter(particle.get('mass'))
if particle.get('mass')!='ZERO' and 'external' not in mass_param.depend:
if model.get('name') not in ['sm','loop_sm']:
logger.warning("The mass '%s' of particle '%s' is not an external"%\
(model.get_parameter(particle.get('mass')).name,particle.get('name'))+\
" parameter as required by this check. \nMG5_aMC will try to"+\
" modify the model to remedy the situation. No guarantee.")
status = model.change_electroweak_mode(set(['mz','mw','alpha']))
if not status:
raise InvalidCmd('The EW scheme could apparently not be changed'+\
' so as to have the W-boson mass external. The check cannot'+\
' proceed.')
break
veto_orders = [order for order in model.get('coupling_orders') if \
order not in options['expansion_orders']]
if len(veto_orders)>0:
logger.warning('You did not define any parameter scaling rule for the'+\
" coupling orders %s. They will be "%','.join(veto_orders)+\
"forced to zero in the tests. Consider adding the scaling rule to"+\
"avoid this. (see option '--cms' in 'help check')")
for order in veto_orders:
multiprocess_nwa.get('orders')[order]==0
multiprocess_nwa.set('perturbation_couplings', [order for order in
multiprocess_nwa['perturbation_couplings'] if order not in veto_orders])
if multiprocess_nwa.get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model, param_card,
cmd=cmd,auth_skipping = False, reuse = True)
else:
evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
cmd=cmd, model=model,
param_card=param_card,
auth_skipping = False,
output_path=output_path,
reuse = False)
cached_information = []
output_nwa = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
multiprocess_nwa,
evaluator,
# This empty list 'opt' will be passed to the check_complex_mass_scheme_process
# function which will fill it with the specification of the particle for which
# the the complex mass scheme must be checked. The fact that it is a list
# at this stage tells the function check_complex_mass_scheme_process that
# we are doing nwa. It will then be converted to a dictionary when doing cms.
opt = cached_information,
options=run_options)
# Make sure to start from fresh for LO runs
clean_added_globals(ADDED_GLOBAL)
# Generate a list of unique processes in the CMS scheme
cmd.do_set('complex_mass_scheme True', log=False)
#cmd.do_import('model loop_qcd_qed_sm__CMS__-CMS')
multiprocess_cms = cmd.extract_process(process_line)
model = multiprocess_cms.get('model')
# Apply veto
if len(veto_orders)>0:
for order in veto_orders:
multiprocess_cms.get('orders')[order]==0
multiprocess_cms.set('perturbation_couplings', [order for order in
multiprocess_cms['perturbation_couplings'] if order not in veto_orders])
if multiprocess_cms.get('perturbation_couplings')==[]:
evaluator = MatrixElementEvaluator(model, param_card,
cmd=cmd,auth_skipping = False, reuse = True)
else:
evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
cmd=cmd, model=model,
param_card=param_card,
auth_skipping = False,
output_path=output_path,
reuse = False)
output_cms = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
multiprocess_cms,
evaluator,
# We now substituted the cached information
opt = dict(cached_information),
options=run_options)
if multiprocess_cms.get('perturbation_couplings')!=[] and not options['reuse']:
# Clean temporary folders created for the running of the loop processes
clean_up(output_path)
# Now reformat a bit the output by putting the CMS and NWA results together
# as values of a dictionary with the process name as key.
# Also a 'processes_order' to list all processes in their order of appearance
result = {'ordered_processes':[],'lambdaCMS':options['lambdaCMS']}
# Recall what perturbation orders were used
result['perturbation_orders']=multiprocess_nwa.get('perturbation_couplings')
for i, proc_res in enumerate(output_nwa):
result['ordered_processes'].append(proc_res[0])
result[proc_res[0]] = {
'NWA':proc_res[1]['resonances_result'],
'CMS':output_cms[i][1]['resonances_result'],
'born_order':proc_res[1]['born_order'],
'loop_order':proc_res[1]['loop_order']}
# As an optimization we propagate the widths as they could be reused when
# using several tweaks
options['cached_widths'] = run_options['cached_widths']
# Add widths information to the result
result['recompute_width'] = options['recompute_width']
result['has_FRdecay'] = has_FRdecay
result['widths_computed'] = []
cached_widths = sorted(options['cached_widths'].items(), key=lambda el: \
abs(el[0][0]))
for (pdg, lambda_value), width in cached_widths:
if lambda_value != 1.0:
continue
result['widths_computed'].append((model.get_particle(pdg).get_name(),
width))
# Make sure to clear the python ME definitions generated in LO runs
clean_added_globals(ADDED_GLOBAL)
return result
# Check CMS for a given process
def check_complex_mass_scheme_process(process, evaluator, opt = [],
options=None):
"""Check CMS for the process in argument. The options 'opt' is quite important.
When opt is a list, it means that we are doing NWA and we are filling the
list with the following tuple
('proc_name',({'ParticlePDG':ParticlePDG,
'FinalStateMothersNumbers':set([]),
'PS_point_used':[]},...))
When opt is a dictionary, we are in the CMS mode and it will be reused then.
"""
# a useful logical to check if we are in LO (python on the flight) or
# NLO (output and compilation) mode
NLO = process.get('perturbation_couplings') != []
def glue_momenta(production, decay):
""" Merge together the kinematics for the production of particle
positioned last in the 'production' array with the 1>N 'decay' kinematic'
provided where the decay particle is first."""
from MadSpin.decay import momentum
full = production[:-1]
# Consistency check:
# target = production[decay_number-1]
# boosted = momentum(decay[0][0],decay[0][1],decay[0][2],decay[0][3])
# print 'Consistency check ',target==boosted
for p in decay[1:]:
bp = momentum(*p).boost(momentum(*production[-1]))
full.append([bp.E,bp.px,bp.py,bp.pz])
return full
def find_resonances(diagrams):
""" Find all the resonances in the matrix element in argument """
model = process['model']
resonances_found = []
for ll, diag in enumerate(diagrams):
for amp in diag.get('amplitudes'):
# 0 specifies the PDG given to the fake s-channels from
# vertices with more than four legs
s_channels, t_channels = amp.\
get_s_and_t_channels(process.get_ninitial(), model, 0)
# The s-channel are given from the outmost ones going inward as
# vertices, so we must replace parent legs with the outermost ones
replacement_dict = {}
for s_channel in s_channels:
new_resonance = {
'ParticlePDG':s_channel.get('legs')[-1].get('id'),
'FSMothersNumbers':[],
'PS_point_used':[]}
for leg in s_channel.get('legs')[:-1]:
if leg.get('number')>0:
new_resonance['FSMothersNumbers'].append(
leg.get('number'))
else:
try:
new_resonance['FSMothersNumbers'].extend(
replacement_dict[leg.get('number')])
except KeyError:
raise Exception, 'The following diagram '+\
'is malformed:'+diag.nice_string()
replacement_dict[s_channel.get('legs')[-1].get('number')] = \
new_resonance['FSMothersNumbers']
new_resonance['FSMothersNumbers'] = set(
new_resonance['FSMothersNumbers'])
if new_resonance not in resonances_found:
resonances_found.append(new_resonance)
# Now we setup the phase-space point for each resonance found
kept_resonances = []
for resonance in resonances_found:
# Discard fake s-channels
if resonance['ParticlePDG'] == 0:
continue
# Discard if the particle appears in the final state
if abs(resonance['ParticlePDG']) in \
[abs(l.get('id')) for l in process.get('legs')]:
continue
mass_string = evaluator.full_model.get_particle(
resonance['ParticlePDG']).get('mass')
mass = evaluator.full_model.get('parameter_dict')[mass_string].real
# Discard massless s-channels
if mass==0.0:
continue
width_string = evaluator.full_model.get_particle(
resonance['ParticlePDG']).get('width')
width = evaluator.full_model.get('parameter_dict')[width_string].real
# Discard stable s-channels
if width==0.0:
continue
final_state_energy = sum(
evaluator.full_model.get('parameter_dict')[
evaluator.full_model.get_particle(l.get('id')).get('mass')].real
for l in process.get('legs') if l.get('number') in
resonance['FSMothersNumbers'])
# Choose the offshellness
special_mass = (1.0 + options['offshellness'])*mass
# Discard impossible kinematics
if special_mass<final_state_energy:
raise InvalidCmd('The offshellness specified (%s) is such'\
%options['offshellness']+' that the resulting kinematic is '+\
'impossible for resonance %s %s.'%(evaluator.full_model.
get_particle(resonance['ParticlePDG']).get_name(),
str(list(resonance['FSMothersNumbers']))))
continue
# Add it to the list of accepted resonances
kept_resonances.append(resonance)
for resonance in kept_resonances:
# Chose the PS point for the resonance
set_PSpoint(resonance, force_other_res_offshell=kept_resonances)
# misc.sprint(kept_resonances)
# misc.sprint(len(kept_resonances))
return tuple(kept_resonances)
def set_PSpoint(resonance, force_other_res_offshell=[],
allow_energy_increase=1.5, isolation_cuts=True):
""" Starting from the specified resonance, construct a phase space point
for it and possibly also enforce other resonances to be onshell. Possibly
allow to progressively increase enregy by steps of the integer specified
(negative float to forbid it) and possible enforce default isolation cuts
as well."""
def invmass(momenta):
""" Computes the invariant mass of a list of momenta."""
ptot = [sum(p[i] for p in momenta) for i in range(4)]
return math.sqrt(ptot[0]**2-ptot[1]**2-ptot[2]**2-ptot[3]**2)
model = evaluator.full_model
def getmass(pdg):
""" Returns the mass of a particle given the current model and its
pdg given in argument."""
return model.get('parameter_dict')[
model.get_particle(pdg).get('mass')].real
N_trials = 0
max_trial = 1e4
nstep_for_energy_increase = 1e3
PS_point_found = None
if options['offshellness'] > 0.0:
offshellness = options['offshellness']
else:
# We must undershoot the offshellness since one needs more
# energy than the target mass to have a valid PS point. So we
# start with an offshellness 4 times larger, and progressively reduce
# it later
offshellness = (0.25*(options['offshellness']+1.0))-1.0
# When offshellness is negative, it is progressively decreased every
# nstep_for_energy_increase attempts (not increased!), so it is more
# dangerous, and we therefore want the steps to be smaller
if options['offshellness'] < 0.0:
energy_increase = math.sqrt(allow_energy_increase)
else:
energy_increase = allow_energy_increase
# Make sure to remove the resonance itself from force_other_res_offshell
other_res_offshell = [res for res in force_other_res_offshell if
res!=resonance]
# Now play it smart on finding starting energy and offshellness and
# register all resonance masses
all_other_res_masses = [getmass(res['ParticlePDG'])
for res in other_res_offshell]
resonance_mass = getmass(resonance['ParticlePDG'])
str_res = '%s %s'%(model.get_particle(
resonance['ParticlePDG']).get_name(),
str(list(resonance['FSMothersNumbers'])))
leg_number_to_leg = dict((l.get('number'),l) for l in process.get('legs'))
# Find what is the minimum possible offshellness given
# the mass of the daughters of this resonance.
# This will only be relevant when options['offshellness'] is negative
daughter_masses = sum(getmass(leg_number_to_leg[\
number].get('id')) for number in resonance['FSMothersNumbers'])
min_offshellnes = 4.0*((daughter_masses*1.2)/resonance_mass)-1.0
# Compute the minimal energy given the external states, add 20% to leave
# enough phase-space
min_energy = max(sum(getmass(l.get('id')) for l in \
process.get('legs') if l.get('state')==True),
sum(getmass(l.get('id')) for l in \
process.get('legs') if l.get('state')==False))
# List all other offshellnesses of the potential daughters of this
# resonance
daughter_offshellnesses = [(1.0+options['offshellness'])*mass
for i, mass in enumerate(all_other_res_masses) if
other_res_offshell[i]['FSMothersNumbers'].issubset(
resonance['FSMothersNumbers'])]
if options['offshellness'] >= 0.0:
if len(daughter_offshellnesses)>0:
max_mass = max(daughter_offshellnesses)
# A factor two to have enough phase-space
offshellness = max(2.0*(max_mass/resonance_mass)-1.0,
options['offshellness'])
max_mass = max([(1.0+options['offshellness'])*mass for mass in \
all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
# Account for external_masses too
# A factor two to have enough phase-space open
target = max(min_energy*1.2,max_mass*2.0)
if target > options['energy']:
logger.warning("The user-defined energy %f seems "%options['energy']+
" insufficient to reach the minimum propagator invariant mass "+
"%f required for the chosen offshellness %f."%(max_mass,
options['offshellness']) + " Energy reset to %f."%target)
options['energy'] = target
else:
if len(daughter_offshellnesses) > 0:
min_mass = min(daughter_offshellnesses)
# A factor one half to have enough phase-space
offshellness = min(0.25*(min_mass/resonance_mass)-1.0,
options['offshellness'])
# Make sure the chosen offshellness leaves enough energy to produce
# the daughter masses
if (1.0+offshellness)*resonance_mass < daughter_masses*1.2:
msg = 'The resonance %s cannot accomodate'%str_res+\
' an offshellness of %f because the daughter'%options['offshellness']+\
' masses are %f.'%daughter_masses
if options['offshellness']<min_offshellnes:
msg += ' Try again with an offshellness'+\
' smaller (in absolute value) of at least %f.'%min_offshellnes
else:
msg += ' Try again with a smalled offshellness (in absolute value).'
raise InvalidCmd(msg)
min_mass = min([(1.0+options['offshellness'])*mass for mass in \
all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
# Account for external_masses too
# A factor two to have enough phase-space open
if 2.0*min_mass < options['energy']:
new_energy = max(min_energy*1.2, 2.0*min_mass)
logger.warning("The user-defined energy %f seems "%options['energy']+
" too large to not overshoot the maximum propagator invariant mass "+
"%f required for the chosen offshellness %f."%(min_mass,
options['offshellness']) + " Energy reset to %f."%new_energy)
options['energy'] = new_energy
if options['offshellness'] < 0.0 and options['energy'] >= min_mass:
logger.debug("The target energy is not compatible with the mass"+
" of the external states for this process (%f). It is "%min_mass+
"unlikely that a valid kinematic configuration will be found.")
if options['offshellness']<0.0 and offshellness<options['offshellness'] or \
options['offshellness']>0.0 and offshellness>options['offshellness']:
logger.debug("Offshellness increased to %f"%offshellness+
" so as to try to find a kinematical configuration with"+
" offshellness at least equal to %f"%options['offshellness']+
" for all resonances.")
start_energy = options['energy']
while N_trials<max_trial:
N_trials += 1
if N_trials%nstep_for_energy_increase==0:
if allow_energy_increase > 0.0:
old_offshellness = offshellness
if offshellness > 0.0:
options['energy'] *= energy_increase
offshellness *= energy_increase
else:
options['energy'] = max(options['energy']/energy_increase,
min_energy*1.2)
offshellness = max(min_offshellnes,
((offshellness+1.0)/energy_increase)-1.0)
if old_offshellness!=offshellness:
logger.debug('Trying to find a valid kinematic'+\
" configuration for resonance '%s'"%str_res+\
' with increased offshellness %f'%offshellness)
candidate = get_PSpoint_for_resonance(resonance, offshellness)
pass_offshell_test = True
for i, res in enumerate(other_res_offshell):
# Make sure other resonances are sufficiently offshell too
if offshellness > 0.0:
if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) <\
((1.0+options['offshellness'])*all_other_res_masses[i]):
pass_offshell_test = False
break
else:
if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) >\
((1.0+options['offshellness'])*all_other_res_masses[i]):
pass_offshell_test = False
break
if not pass_offshell_test:
continue
# Make sure it is isolated
if isolation_cuts:
# Set ptcut to 5% of total energy
if not evaluator.pass_isolation_cuts(candidate,
ptcut=0.05*invmass([candidate[0],candidate[1]]), drcut=0.4):
continue
PS_point_found = candidate
break
# Restore the initial energy setup
options['energy'] = start_energy
if PS_point_found is None:
err_msg = 'Could not find a valid PS point in %d'%max_trial+\
' trials. Try increasing the energy, modify the offshellness '+\
'or relax some constraints.'
if options['offshellness']<0.0:
err_msg +='Try with a positive offshellness instead (or a '+\
'negative one of smaller absolute value)'
raise InvalidCmd, err_msg
else:
# misc.sprint('PS point found in %s trials.'%N_trials)
# misc.sprint(PS_point_found)
resonance['offshellnesses'] = []
all_other_res_masses = [resonance_mass] + all_other_res_masses
other_res_offshell = [resonance] + other_res_offshell
for i, res in enumerate(other_res_offshell):
if i==0:
res_str = 'self'
else:
res_str = '%s %s'%(model.get_particle(
res['ParticlePDG']).get_name(),
str(list(res['FSMothersNumbers'])))
resonance['offshellnesses'].append((res_str,(
(invmass([PS_point_found[j-1] for j in
res['FSMothersNumbers']])/all_other_res_masses[i])-1.0)))
resonance['PS_point_used'] = PS_point_found
def get_PSpoint_for_resonance(resonance, offshellness = options['offshellness']):
""" Assigns a kinematic configuration to the resonance dictionary
given in argument."""
# Get the particle mass
mass_string = evaluator.full_model.get_particle(
resonance['ParticlePDG']).get('mass')
mass = evaluator.full_model.get('parameter_dict')[mass_string].real
# Choose the offshellness
special_mass = (1.0 + offshellness)*mass
# Create a fake production and decay process
prod_proc = base_objects.Process({'legs':base_objects.LegList(
copy.copy(leg) for leg in process.get('legs') if
leg.get('number') not in resonance['FSMothersNumbers'])})
# Add the resonant particle as a final state
# ID set to 0 since its mass will be forced
# Number set so as to be first in the list in get_momenta
prod_proc.get('legs').append(base_objects.Leg({
'number':max(l.get('number') for l in process.get('legs'))+1,
'state':True,
'id':0}))
# now the decay process
decay_proc = base_objects.Process({'legs':base_objects.LegList(
copy.copy(leg) for leg in process.get('legs') if leg.get('number')
in resonance['FSMothersNumbers'] and not leg.get('state')==False)})
# Add the resonant particle as an initial state
# ID set to 0 since its mass will be forced
# Number set to -1 as well so as to be sure it appears first in
# get_momenta
decay_proc.get('legs').insert(0,base_objects.Leg({
'number':-1,
'state':False,
'id':0}))
prod_kinematic = evaluator.get_momenta(prod_proc, options=options,
special_mass=special_mass)[0]
decay_kinematic = evaluator.get_momenta(decay_proc, options=options,
special_mass=special_mass)[0]
momenta = glue_momenta(prod_kinematic,decay_kinematic)
# Reshuffle the momentum so as to put it back in the order specified
# in the process definition.
# First the production momenta, without the special decayed particle
ordered_momenta = [(prod_proc.get('legs')[i].get('number'),momenta[i])
for i in range(len(prod_proc.get('legs'))-1)]
# And then the decay ones.
ordered_momenta += [(decay_proc.get('legs')[-i].get('number'),
momenta[-i]) for i in range(1,len(decay_proc.get('legs')))]
# Return the PSpoint found in the right order
return [m[1] for m in sorted(ordered_momenta, key = lambda el: el[0])]
# misc.sprint(resonance['PS_point_used'])
@misc.mute_logger()
def get_width(PDG, lambdaCMS, param_card):
""" Returns the width to use for particle with absolute PDG 'PDG' and
for the the lambdaCMS value 'lambdaCMS' using the cache if possible."""
# If an unstable particle is in the external state, then set its width
# to zero and don't cache the result of course.
if abs(PDG) in [abs(leg.get('id')) for leg in process.get('legs')]:
return 0.0
particle = evaluator.full_model.get_particle(PDG)
# If it is a goldstone or a ghost, return zero as its width should anyway
# not be independent.
if particle.get('ghost') or particle.get('goldstone'):
return 0.0
# If its width is analytically set to zero, then return zero right away
if particle.get('width')=='ZERO':
return 0.0
if (PDG,lambdaCMS) in options['cached_widths']:
return options['cached_widths'][(PDG,lambdaCMS)]
if options['recompute_width'] == 'never':
width = evaluator.full_model.\
get('parameter_dict')[particle.get('width')].real
else:
# Crash if we are doing CMS and the width was not found and recycled above
if aloha.complex_mass:
raise MadGraph5Error, "The width for particle with PDG %d and"%PDG+\
" lambdaCMS=%f should have already been "%lambdaCMS+\
"computed during the NWA run."
# Use MadWith
if options['recompute_width'] in ['always','first_time']:
particle_name = particle.get_name()
with misc.TMP_directory(dir=options['output_path']) as path:
param_card.write(pjoin(path,'tmp.dat'))
# 2-body decay is the maximum that should be considered for NLO check.
# The default 1% accuracy is not enough when pushing to small
# lambdaCMS values, we need 1 per mil at least.
command = '%s --output=%s'%(particle_name,pjoin(path,'tmp.dat'))+\
' --path=%s --body_decay=2'%pjoin(path,'tmp.dat')+\
' --precision_channel=0.001'
# misc.sprint(command)
param_card.write(pjoin(options['output_path'],'tmp.dat'))
# The MG5 command get_width will change the cmd._curr_model
# and the cmd._curr_fortran_model which what we specified, so
# we must make sure to restore them after it finishes
orig_model = options['cmd']._curr_model
orig_helas_model = options['cmd']._curr_helas_model
options['cmd'].do_compute_widths(command, evaluator.full_model)
# Restore the models
options['cmd']._curr_model = orig_model
options['cmd']._curr_helas_model = orig_helas_model
# Restore the width of the model passed in argument since
# MadWidth will automatically update the width
evaluator.full_model.set_parameters_and_couplings(
param_card=param_card)
try:
tmp_param_card = check_param_card.ParamCard(pjoin(path,'tmp.dat'))
except:
raise MadGraph5Error, 'Error occured during width '+\
'computation with command:\n compute_widths %s'%command
width = tmp_param_card['decay'].get(PDG).value
# misc.sprint('lambdaCMS checked is', lambdaCMS,
# 'for particle',particle_name)
# misc.sprint('Width obtained :', width)
# if lambdaCMS != 1.0:
# misc.sprint('Naively expected (lin. scaling) :',
# options['cached_widths'][(PDG,1.0)]*lambdaCMS)
if options['recompute_width'] in ['never','first_time']:
# Assume linear scaling of the width
for lam in options['lambdaCMS']:
options['cached_widths'][(PDG,lam)]=width*(lam/lambdaCMS)
else:
options['cached_widths'][(PDG,lambdaCMS)] = width
return options['cached_widths'][(PDG,lambdaCMS)]
def get_order(diagrams, diagsName):
"""Compute the common summed of coupling orders used for this cms check
in the diagrams specified. When inconsistency occurs, use orderName
in the warning message if throwm."""
orders = set([])
for diag in diagrams:
diag_orders = diag.calculate_orders()
orders.add(sum((diag_orders[order] if order in diag_orders else 0)
for order in options['expansion_orders']))
if len(orders)>1:
logger.warning(msg%('%s '%diagsName,str(orders)))
return min(list(orders))
else:
return list(orders)[0]
MLoptions = copy.copy(options['MLOptions'])
# Make sure double-check helicities is set to False
MLoptions['DoubleCheckHelicityFilter'] = False
# Apply the seed tweak if present
for tweak in options['tweak']['custom']:
if tweak.startswith('seed'):
try:
new_seed = int(tweak[4:])
except ValueError:
raise MadGraph5Error, "Seed '%s' is not of the right format 'seed<int>'."%tweak
random.seed(new_seed)
mode = 'CMS' if aloha.complex_mass else 'NWA'
for i, leg in enumerate(process.get('legs')):
leg.set('number', i+1)
logger.info("Running CMS check for process %s (now doing %s scheme)" % \
( process.nice_string().replace('Process:', 'process'), mode))
proc_dir = None
resonances = None
warning_msg = "All %sdiagrams do not share the same sum of orders "+\
"%s; found %%s."%(','.join(options['expansion_orders']))+\
" This potentially problematic for the CMS check."
if NLO:
# We must first create the matrix element, export it and set it up.
# If the reuse option is specified, it will be recycled.
if options['name']=='auto':
proc_name = "%s%s_%s%s__%s__"%(('SAVED' if options['reuse'] else ''),
temp_dir_prefix, '_'.join(process.shell_string().split('_')[1:]),
('_' if process.get('perturbation_couplings') else '')+
'_'.join(process.get('perturbation_couplings')),mode)
else:
proc_name = "%s%s_%s__%s__"%(('SAVED' if options['reuse'] else ''),
temp_dir_prefix,options['name'], mode)
# Generate the ME
timing, matrix_element = generate_loop_matrix_element(process,
options['reuse'], output_path=options['output_path'],
cmd = options['cmd'], proc_name=proc_name,
loop_filter=options['loop_filter'])
if matrix_element is None:
# No diagrams for this process
return None
reusing = isinstance(matrix_element, base_objects.Process)
proc_dir = pjoin(options['output_path'],proc_name)
# Export the ME
infos = evaluator.setup_process(matrix_element, proc_dir,
reusing = reusing, param_card = options['param_card'],
MLOptions=MLoptions)
# Make sure the right MLoptions are set
evaluator.fix_MadLoopParamCard(pjoin(proc_dir,'Cards'),
mp = None, loop_filter = True,MLOptions=MLoptions)
# Make sure to start from fresh if previous run was stopped
tmp_card_backup = pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__')
if os.path.isfile(tmp_card_backup):
# Run was stopped mid-way, we must then restore the original card
logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
" Now reverting 'param_card.dat' to its original value.")
shutil.copy(tmp_card_backup, pjoin(proc_dir, 'Cards','param_card.dat'))
else:
# Create a temporary backup which will be cleaned if the run ends properly
shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'), tmp_card_backup)
# Now do the same with model_functions.f
tmp_modelfunc_backup = pjoin(proc_dir,'Source','MODEL',
'model_functions.f__TemporaryBackup__')
if os.path.isfile(tmp_modelfunc_backup):
# Run was stopped mid-way, we must then restore the model functions
logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
" Now reverting 'model_functions.f' to its original value.")
shutil.copy(tmp_modelfunc_backup, pjoin(proc_dir,'Source','MODEL',
'model_functions.f'))
evaluator.apply_log_tweak(proc_dir, 'recompile')
else:
# Create a temporary backup which will be cleaned if the run ends properly
shutil.copy(pjoin(proc_dir,'Source','MODEL','model_functions.f'),
tmp_modelfunc_backup)
# Make sure to setup correctly the helicity
MadLoopInitializer.fix_PSPoint_in_check(pjoin(proc_dir,'SubProcesses'),
read_ps = True, npoints = 1, hel_config = options['helicity'],
split_orders=options['split_orders'])
# And recompile while making sure to recreate the executable and
# modified sources
for dir in misc.glob('P*_*', pjoin(proc_dir,'SubProcesses')):
if not (re.search(r'.*P\d+_\w*$', dir) or not os.path.isdir(dir)):
continue
try:
os.remove(pjoin(dir,'check'))
os.remove(pjoin(dir,'check_sa.o'))
except OSError:
pass
# Now run make
with open(os.devnull, 'w') as devnull:
retcode = subprocess.call(['make','check'],
cwd=dir, stdout=devnull, stderr=devnull)
if retcode != 0:
raise MadGraph5Error, "Compilation error with "+\
"'make check' in %s"%dir
# Now find all the resonances of the ME, if not saved from a previous run
pkl_path = pjoin(proc_dir,'resonance_specs.pkl')
if reusing:
# We recover the information from the pickle dumped during the
# original run
if not os.path.isfile(pkl_path):
raise InvalidCmd('The folder %s could'%proc_dir+\
" not be reused because the resonance specification file "+
"'resonance_specs.pkl' is missing.")
else:
proc_name, born_order, loop_order, resonances = \
save_load_object.load_from_file(pkl_path)
# Make sure to rederive the phase-space point since parameters
# such as masses, seed, offshellness could have affected it
for res in resonances:
set_PSpoint(res, force_other_res_offshell=resonances)
# Second run (CMS), we can reuse the information if it is a dictionary
if isinstance(opt, list):
opt.append((proc_name, resonances))
else:
resonances = opt
else:
helas_born_diagrams = matrix_element.get_born_diagrams()
if len(helas_born_diagrams)==0:
logger.warning('The CMS check for loop-induced process is '+\
'not yet available (nor is it very interesting).')
return None
born_order = get_order(helas_born_diagrams,'Born')
loop_order = get_order(matrix_element.get_loop_diagrams(),'loop')
# Second run (CMS), we can reuse the information if it is a dictionary
if isinstance(opt, list):
opt.append((process.base_string(),find_resonances(helas_born_diagrams)))
resonances = opt[-1][1]
else:
resonances = opt
# Save the resonances to a pickle file in the output directory so that
# it can potentially be reused.
save_load_object.save_to_file(pkl_path, (process.base_string(),
born_order, loop_order,resonances))
else:
# The LO equivalent
try:
amplitude = diagram_generation.Amplitude(process)
except InvalidCmd:
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
return None
if not amplitude.get('diagrams'):
# This process has no diagrams; go to next process
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
return None
matrix_element = helas_objects.HelasMatrixElement(amplitude,
gen_color=True)
diagrams = matrix_element.get('diagrams')
born_order = get_order(diagrams,'Born')
# Loop order set to -1 indicates an LO result
loop_order = -1
# Find all the resonances of the ME, if not already given in opt
if isinstance(opt, list):
opt.append((process.base_string(),find_resonances(diagrams)))
resonances = opt[-1][1]
else:
resonances= opt
if len(resonances)==0:
logger.info("No resonance found for process %s."\
%process.base_string())
return None
# Cache the default param_card for NLO
if not options['cached_param_card'][mode][0]:
if NLO:
param_card = check_param_card.ParamCard(
pjoin(proc_dir,'Cards','param_card.dat'))
else:
param_card = check_param_card.ParamCard(
StringIO.StringIO(evaluator.full_model.write_param_card()))
options['cached_param_card'][mode][0] = param_card
name2block, _ = param_card.analyze_param_card()
options['cached_param_card'][mode][1] = name2block
else:
param_card = options['cached_param_card'][mode][0]
name2block = options['cached_param_card'][mode][1]
# Already add the coupling order for this sqaured ME.
if loop_order != -1 and (loop_order+born_order)%2 != 0:
raise MadGraph5Error, 'The summed squared matrix element '+\
" order '%d' is not even."%(loop_order+born_order)
result = {'born_order':born_order,
'loop_order': (-1 if loop_order==-1 else (loop_order+born_order)/2),
'resonances_result':[]}
# Create a physical backup of the param_card
if NLO:
try:
shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'),
pjoin(proc_dir,'Cards','param_card.dat__backUp__'))
except:
pass
# Apply custom tweaks
had_log_tweaks=False
if NLO:
for tweak in options['tweak']['custom']:
if tweak.startswith('seed'):
continue
try:
logstart, logend = tweak.split('->')
except:
raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
if logstart in ['logp','logm', 'log'] and \
logend in ['logp','logm', 'log']:
if NLO:
evaluator.apply_log_tweak(proc_dir, [logstart, logend])
had_log_tweaks = True
else:
raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
if had_log_tweaks:
evaluator.apply_log_tweak(proc_dir, 'recompile')
# Select what resonances should be run
if options['resonances']=='all':
resonances_to_run = resonances
elif isinstance(options['resonances'],int):
resonances_to_run = resonances[:options['resonances']]
elif isinstance(options['resonances'],list):
resonances_to_run = []
for res in resonances:
for res_selection in options['resonances']:
if abs(res['ParticlePDG'])==res_selection[0] and \
res['FSMothersNumbers']==set(res_selection[1]):
resonances_to_run.append(res)
break
else:
raise InvalidCmd("Resonance selection '%s' not reckognized"%\
str(options['resonances']))
# Display progressbar both for LO and NLO for now but not when not showing
# the plots
if NLO and options['show_plot']:
widgets = ['ME evaluations:', pbar.Percentage(), ' ',
pbar.Bar(),' ', pbar.ETA(), ' ']
progress_bar = pbar.ProgressBar(widgets=widgets,
maxval=len(options['lambdaCMS'])*len(resonances_to_run), fd=sys.stdout)
progress_bar.update(0)
# Flush stdout to force the progress_bar to appear
sys.stdout.flush()
else:
progress_bar = None
for resNumber, res in enumerate(resonances_to_run):
# First add a dictionary for this resonance to the result with already
# one key specifying the resonance
result['resonances_result'].append({'resonance':res,'born':[]})
if NLO:
result['resonances_result'][-1]['finite'] = []
# Now scan the different lambdaCMS values
for lambdaNumber, lambdaCMS in enumerate(options['lambdaCMS']):
# Setup the model for that value of lambdaCMS
# The copy constructor below creates a deep copy
new_param_card = check_param_card.ParamCard(param_card)
# Change all specified parameters
for param, replacement in options['expansion_parameters'].items():
# Replace the temporary prefix used for evaluation of the
# substitution expression
orig_param = param.replace('__tmpprefix__','')
if orig_param not in name2block:
# It can be that some parameter ar in the NWA model but not
# in the CMS, such as the Yukawas for example.
# logger.warning("Unknown parameter '%s' in mode '%s'."%(param,mode))
continue
for block, lhaid in name2block[orig_param]:
orig_value = float(param_card[block].get(lhaid).value)
new_value = eval(replacement,
{param:orig_value,'lambdacms':lambdaCMS})
new_param_card[block].get(lhaid).value=new_value
# Apply these changes already (for the purpose of Width computation.
# although it is optional since we now provide the new_param_card to
# the width computation function.). Also in principle this matters
# only in the CMS and there the widths would be reused from their
# prior computation within NWA with zero widths. So, all in all,
# the line below is really not crucial, but semantically, it ought
# to be there.
evaluator.full_model.set_parameters_and_couplings(
param_card=new_param_card)
# Now compute or recyle all widths
for decay in new_param_card['decay'].keys():
if mode=='CMS':
new_width = get_width(abs(decay[0]), lambdaCMS,
new_param_card)
else:
new_width = 0.0
new_param_card['decay'].get(decay).value= new_width
# Apply these changes for the purpose of the final computation
evaluator.full_model.set_parameters_and_couplings(
param_card=new_param_card)
if NLO:
new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
# Write the recomputed widths so that it can potentially be
# used for future runs (here for the model in the CMS format)
if lambdaCMS==1.0 and mode=='CMS' and \
options['recompute_width'] in ['always','first_time']:
new_param_card.write(pjoin(proc_dir,
'Cards','param_card.dat_recomputed_widths'))
# If recomputing widths with MadWidths, we want to do it within
# the NWA models with zero widths.
if mode=='NWA' and (options['recompute_width']=='always' or (
options['recompute_width']=='first_time' and lambdaCMS==1.0)):
# The copy constructor below creates a deep copy
tmp_param_card = check_param_card.ParamCard(new_param_card)
# We don't use the result here, it is just so that it is put
# in the cache and reused in the CMS run that follows.
for decay in new_param_card['decay'].keys():
particle_name = evaluator.full_model.get_particle(\
abs(decay[0])).get_name()
new_width = get_width(abs(decay[0]),lambdaCMS,new_param_card)
tmp_param_card['decay'].get(decay).value = new_width
if not options['has_FRdecay'] and new_width != 0.0 and \
(abs(decay[0]),lambdaCMS) not in options['cached_widths']:
logger.info('Numerically computed width of particle'+\
' %s for lambda=%.4g : %-9.6gGeV'%
(particle_name,lambdaCMS,new_width))
# Write the recomputed widths so that it can potentially be
# used for future runs (here the model in the NWA format)
if lambdaCMS==1.0 and NLO:
tmp_param_card.write(pjoin(proc_dir,
'Cards','param_card.dat_recomputed_widths'))
# Apply the params tweaks
for param, replacement in options['tweak']['params'].items():
# Replace the temporary prefix used for evaluation of the
# substitution expression
orig_param = param.replace('__tmpprefix__','')
# Treat the special keyword 'allwidths'
if orig_param.lower() == 'allwidths':
# Apply the rule to all widhts
for decay in new_param_card['decay'].keys():
orig_value = float(new_param_card['decay'].get(decay).value)
new_value = eval(replacement,
{param:orig_value,'lambdacms':lambdaCMS})
new_param_card['decay'].get(decay).value = new_value
continue
if orig_param not in name2block:
# It can be that some parameter are in the NWA model but not
# in the CMS, such as the Yukawas for example.
continue
for block, lhaid in name2block[orig_param]:
orig_value = float(new_param_card[block].get(lhaid).value)
new_value = eval(replacement,
{param:orig_value,'lambdacms':lambdaCMS})
new_param_card[block].get(lhaid).value=new_value
if options['tweak']['params']:
# Apply the tweaked param_card one last time
evaluator.full_model.set_parameters_and_couplings(
param_card=new_param_card)
if NLO:
new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
# Finally ready to compute the matrix element
if NLO:
ME_res = LoopMatrixElementEvaluator.get_me_value(process, 0,
proc_dir, PSpoint=res['PS_point_used'], verbose=False,
format='dict', skip_compilation=True)
# Notice that there is much more information in ME_res. It can
# be forwarded to check_complex_mass_scheme in this result
# dictionary if necessary for the analysis. (or even the full
# dictionary ME_res can be added).
result['resonances_result'][-1]['born'].append(ME_res['born'])
result['resonances_result'][-1]['finite'].append(
ME_res['finite']*ME_res['born']*ME_res['alphaS_over_2pi'])
else:
ME_res = evaluator.evaluate_matrix_element(matrix_element,
p=res['PS_point_used'], auth_skipping=False, output='m2')[0]
result['resonances_result'][-1]['born'].append(ME_res)
if not progress_bar is None:
progress_bar.update(resNumber*len(options['lambdaCMS'])+\
(lambdaNumber+1))
# Flush to force the printout of the progress_bar to be updated
sys.stdout.flush()
# Restore the original continued log definition if necessary
log_reversed = False
for tweak in options['tweak']['custom']:
if tweak.startswith('log') and had_log_tweaks:
if log_reversed:
continue
if NLO:
evaluator.apply_log_tweak(proc_dir, 'default')
evaluator.apply_log_tweak(proc_dir, 'recompile')
log_reversed = True
# Restore the original model parameters
evaluator.full_model.set_parameters_and_couplings(param_card=param_card)
if NLO:
try:
shutil.copy(pjoin(proc_dir,'Cards','param_card.dat__backUp__'),
pjoin(proc_dir,'Cards','param_card.dat'))
except:
param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
# All should have been restored properly, so we can now clean the temporary
# backups
try:
os.remove(pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__'))
os.remove(pjoin(proc_dir,'Source','MODEL',
'model_functions.f__TemporaryBackup__'))
except:
pass
return (process.nice_string().replace('Process:', '').strip(),result)
def get_value(process, evaluator, p=None, options=None):
"""Return the value/momentum for a phase space point"""
for i, leg in enumerate(process.get('legs')):
leg.set('number', i+1)
logger.info("Checking %s in %s gauge" % \
( process.nice_string().replace('Process:', 'process'),
'unitary' if aloha.unitary_gauge else 'feynman'))
legs = process.get('legs')
# Generate a process with these legs
# Generate the amplitude for this process
try:
if process.get('perturbation_couplings')==[]:
amplitude = diagram_generation.Amplitude(process)
else:
amplitude = loop_diagram_generation.LoopAmplitude(process)
except InvalidCmd:
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
return None
if not amplitude.get('diagrams'):
# This process has no diagrams; go to next process
logging.info("No diagrams for %s" % \
process.nice_string().replace('Process', 'process'))
return None
if not p:
# Generate phase space point to use
p, w_rambo = evaluator.get_momenta(process, options)
# Generate the HelasMatrixElement for the process
if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
matrix_element = helas_objects.HelasMatrixElement(amplitude,
gen_color = True)
else:
matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
gen_color = True, optimized_output = evaluator.loop_optimized_output)
mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p,
output='jamp',options=options)
if mvalue and mvalue['m2']:
return {'process':process.base_string(),'value':mvalue,'p':p}
def output_lorentz_inv_loop(comparison_results, output='text'):
"""Present the results of a comparison in a nice list format for loop
processes. It detail the results from each lorentz transformation performed.
"""
process = comparison_results[0]['process']
results = comparison_results[0]['results']
# Rotations do not change the reference vector for helicity projection,
# the loop ME are invarariant under them with a relatively good accuracy.
threshold_rotations = 1e-6
# This is typically not the case for the boosts when one cannot really
# expect better than 1e-5. It turns out that this is even true in
# quadruple precision, for an unknown reason so far.
threshold_boosts = 1e-3
res_str = "%s" % process.base_string()
transfo_col_size = 17
col_size = 18
transfo_name_header = 'Transformation name'
if len(transfo_name_header) + 1 > transfo_col_size:
transfo_col_size = len(transfo_name_header) + 1
misc.sprint(results)
for transfo_name, value in results:
if len(transfo_name) + 1 > transfo_col_size:
transfo_col_size = len(transfo_name) + 1
res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \
fixed_string_length("Value", col_size) + \
fixed_string_length("Relative diff.", col_size) + "Result"
ref_value = results[0]
res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \
fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size)
# Now that the reference value has been recuperated, we can span all the
# other evaluations
all_pass = True
for res in results[1:]:
threshold = threshold_boosts if 'BOOST' in res[0].upper() else \
threshold_rotations
rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\
/((ref_value[1]['m2']+res[1]['m2'])/2.0))
this_pass = rel_diff <= threshold
if not this_pass:
all_pass = False
res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \
fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \
fixed_string_length("%1.10e" % rel_diff, col_size) + \
("Passed" if this_pass else "Failed")
if all_pass:
res_str += '\n' + 'Summary: passed'
else:
res_str += '\n' + 'Summary: failed'
return res_str
def output_lorentz_inv(comparison_results, output='text'):
"""Present the results of a comparison in a nice list format
if output='fail' return the number of failed process -- for test--
"""
# Special output for loop processes
if comparison_results[0]['process']['perturbation_couplings']!=[]:
return output_lorentz_inv_loop(comparison_results, output)
proc_col_size = 17
threshold=1e-10
process_header = "Process"
if len(process_header) + 1 > proc_col_size:
proc_col_size = len(process_header) + 1
for proc, values in comparison_results:
if len(proc) + 1 > proc_col_size:
proc_col_size = len(proc) + 1
col_size = 18
pass_proc = 0
fail_proc = 0
no_check_proc = 0
failed_proc_list = []
no_check_proc_list = []
res_str = fixed_string_length(process_header, proc_col_size) + \
fixed_string_length("Min element", col_size) + \
fixed_string_length("Max element", col_size) + \
fixed_string_length("Relative diff.", col_size) + \
"Result"
for one_comp in comparison_results:
proc = one_comp['process'].base_string()
data = one_comp['results']
if data == 'pass':
no_check_proc += 1
no_check_proc_list.append(proc)
continue
values = [data[i]['m2'] for i in range(len(data))]
min_val = min(values)
max_val = max(values)
diff = (max_val - min_val) / abs(max_val)
res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
fixed_string_length("%1.10e" % min_val, col_size) + \
fixed_string_length("%1.10e" % max_val, col_size) + \
fixed_string_length("%1.10e" % diff, col_size)
if diff < threshold:
pass_proc += 1
proc_succeed = True
res_str += "Passed"
else:
fail_proc += 1
proc_succeed = False
failed_proc_list.append(proc)
res_str += "Failed"
#check all the JAMP
# loop over jamp
# Keep in mind that this is not available for loop processes where the
# jamp list is empty
if len(data[0]['jamp'])!=0:
for k in range(len(data[0]['jamp'][0])):
sum = [0] * len(data)
# loop over helicity
for j in range(len(data[0]['jamp'])):
#values for the different lorentz boost
values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
sum = [sum[i] + values[i] for i in range(len(values))]
# Compare the different lorentz boost
min_val = min(sum)
max_val = max(sum)
if not max_val:
continue
diff = (max_val - min_val) / max_val
tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
fixed_string_length("%1.10e" % min_val, col_size) + \
fixed_string_length("%1.10e" % max_val, col_size) + \
fixed_string_length("%1.10e" % diff, col_size)
if diff > 1e-10:
if not len(failed_proc_list) or failed_proc_list[-1] != proc:
fail_proc += 1
pass_proc -= 1
failed_proc_list.append(proc)
res_str += tmp_str + "Failed"
elif not proc_succeed:
res_str += tmp_str + "Passed"
res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
(pass_proc, pass_proc + fail_proc,
fail_proc, pass_proc + fail_proc)
if fail_proc != 0:
res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
if no_check_proc:
res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
if output == 'text':
return res_str
else:
return fail_proc
def output_unitary_feynman(comparison_results, output='text'):
"""Present the results of a comparison in a nice list format
if output='fail' return the number of failed process -- for test--
"""
proc_col_size = 17
# We use the first element of the comparison_result list to store the
# process definition object
pert_coupl = comparison_results[0]['perturbation_couplings']
comparison_results = comparison_results[1:]
if pert_coupl:
process_header = "Process [virt="+" ".join(pert_coupl)+"]"
else:
process_header = "Process"
if len(process_header) + 1 > proc_col_size:
proc_col_size = len(process_header) + 1
for data in comparison_results:
proc = data['process']
if len(proc) + 1 > proc_col_size:
proc_col_size = len(proc) + 1
pass_proc = 0
fail_proc = 0
no_check_proc = 0
failed_proc_list = []
no_check_proc_list = []
col_size = 18
res_str = fixed_string_length(process_header, proc_col_size) + \
fixed_string_length("Unitary", col_size) + \
fixed_string_length("Feynman", col_size) + \
fixed_string_length("Relative diff.", col_size) + \
"Result"
for one_comp in comparison_results:
proc = one_comp['process']
data = [one_comp['value_unit'], one_comp['value_feynm']]
if data[0] == 'pass':
no_check_proc += 1
no_check_proc_list.append(proc)
continue
values = [data[i]['m2'] for i in range(len(data))]
min_val = min(values)
max_val = max(values)
# when max_val is also negative
# diff will be negative if there is no abs
diff = (max_val - min_val) / abs(max_val)
res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
fixed_string_length("%1.10e" % values[0], col_size) + \
fixed_string_length("%1.10e" % values[1], col_size) + \
fixed_string_length("%1.10e" % diff, col_size)
if diff < 1e-8:
pass_proc += 1
proc_succeed = True
res_str += "Passed"
else:
fail_proc += 1
proc_succeed = False
failed_proc_list.append(proc)
res_str += "Failed"
#check all the JAMP
# loop over jamp
# This is not available for loop processes where the jamp list returned
# is empty.
if len(data[0]['jamp'])>0:
for k in range(len(data[0]['jamp'][0])):
sum = [0, 0]
# loop over helicity
for j in range(len(data[0]['jamp'])):
#values for the different lorentz boost
values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
sum = [sum[i] + values[i] for i in range(len(values))]
# Compare the different lorentz boost
min_val = min(sum)
max_val = max(sum)
if not max_val:
continue
diff = (max_val - min_val) / max_val
tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
fixed_string_length("%1.10e" % sum[0], col_size) + \
fixed_string_length("%1.10e" % sum[1], col_size) + \
fixed_string_length("%1.10e" % diff, col_size)
if diff > 1e-10:
if not len(failed_proc_list) or failed_proc_list[-1] != proc:
fail_proc += 1
pass_proc -= 1
failed_proc_list.append(proc)
res_str += tmp_str + "Failed"
elif not proc_succeed:
res_str += tmp_str + "Passed"
res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
(pass_proc, pass_proc + fail_proc,
fail_proc, pass_proc + fail_proc)
if fail_proc != 0:
res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
if no_check_proc:
res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
if output == 'text':
return res_str
else:
return fail_proc
def CMS_save_path(extension, cms_res, used_model, opts, output_path=None):
"""Creates a suitable filename for saving these results."""
if opts['name']=='auto' and opts['analyze']!='None':
# Reuse the same name then
return '%s.%s'%(os.path.splitext(opts['analyze'].split(',')[0])\
[0],extension)
# if a name is specified, use it
if opts['name']!='auto':
basename = opts['name']
else:
prefix = 'cms_check_'
# Use process name if there is only one process
if len(cms_res['ordered_processes'])==1:
proc = cms_res['ordered_processes'][0]
replacements = [('=>','gt'),('<=','lt'),('/','_no_'),
(' ',''),('+','p'),('-','m'),
('~','x'), ('>','_'),('=','eq'),('^2','squared')]
# Remove the perturbation couplings:
try:
proc=proc[:proc.index('[')]
except ValueError:
pass
for key, value in replacements:
proc = proc.replace(key,value)
basename =prefix+proc+'_%s_'%used_model.get('name')+\
( ('_'+'_'.join(cms_res['perturbation_orders'])) if \
cms_res['perturbation_orders']!=[] else '')
# Use timestamp otherwise
else:
basename = prefix+datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
suffix = '_%s'%opts['tweak']['name'] if opts['tweak']['name']!='' else ''
if output_path:
return pjoin(output_path,'%s%s.%s'%(basename,suffix,extension))
else:
return '%s%s.%s'%(basename,suffix,extension)
def output_complex_mass_scheme(result,output_path, options, model, output='text'):
""" Outputs nicely the outcome of the complex mass scheme check performed
by varying the width in the offshell region of resonances found for eahc process.
Output just specifies whether text should be returned or a list of failed
processes. Use 'concise_text' for a consise report of the results."""
pert_orders=result['perturbation_orders']
######## CHECK PARAMETERS #########
#
# diff_lambda_power choses the power by which one should divide the difference
# curve. The test should only work with 1, but it is useful for the LO
# check to see the difference has O(\lambda) contribution by setting this
# parameter to 2. If the Born does not have O(\lambda) contributions
# (i.e. if the test still pas with diff_lambda_power=2) then the NLO test
# will not be sensitive to the CMS implementation details.
diff_lambda_power = options['diff_lambda_power']
# DISLAIMER:
# The CMS check is non trivial to automate and it is actually best done
# manually by looking at plots for various implementation of the CMS.
# The automatic check performed here with the default parameters below
# should typically capture the main features of the CMS implementation.
# There will always be exceptions however.
#
if 'has_FRdecay' in result:
has_FRdecay = result['has_FRdecay']
else:
has_FRdecay = False
# be tighter at LO
if not pert_orders:
CMS_test_threshold = 1e-3
else:
# AT NLO, a correct cancellation is typically of the order of 2% with
# a lowest lambda value of 10^-4. It is clear that the threshold should
# scale with the minimum lambda value because any little offset in the
# LO width value for example (acceptable when less than one 1% if the
# widths were computed numerically) will lead to an inaccuracy of the
# cancellation scaling with lambda.
if not has_FRdecay and ('recomputed_with' not in result or \
result['recompute_width'] in ['always','first_time']):
CMS_test_threshold = 2e-2*(1.0e-4/min(result['lambdaCMS']))
else:
# If the widths were not computed numerically, then the accuracy of
# the cancellation should be better.
CMS_test_threshold = 2e-2*(1.0e-5/min(result['lambdaCMS']))
# This threshold sets how flat the diff line must be when approaching it from
# the right to start considering its value. Notice that it cannot be larger
# than the CMS_test_threshold
consideration_threshold = min(CMS_test_threshold/10.0, 0.05)
# Number of values groupes with the median technique to avoid being
# sensitive to unstabilities
group_val = 3
# Starting from which value, relative to the averaged diff, should one consider
# the asymptotic diff median to be exactly 0.0 in which case one would use this
# average instead of this asymptotic median. u d~ > e+ ve LO exhibit a \
# difference at zero for example.
diff_zero_threshold = 1e-3
# Plotting parameters. Specify the lambda range to plot.
# lambda_range = [-1,-1] returns the default automatic setup
lambda_range = options['lambda_plot_range']
##################################
# One can print out the raw results by uncommenting the line below
# misc.sprint(result)
# for i, res in enumerate(result['a e- > e- ve ve~ [ virt = QCD QED ]']['CMS']):
# for i, res in enumerate(result['u d~ > e+ ve a [ virt = QCD QED ]']['CMS']):
# if res['resonance']['FSMothersNumbers'] == set([3, 4]):
# misc.sprint(res['resonance']['PS_point_used'])
# stop
res_str = ''
# Variables for the concise report
concise_str = ''
concise_data = '%%(process)-%ds%%(asymptot)-15s%%(cms_check)-25s%%(status)-25s\n'
concise_repl_dict = {'Header':{'process':'Process',
'asymptot':'Asymptot',
'cms_check':'Deviation to asymptot',
'status':'Result'}}
####### BEGIN helper functions
# Chose here whether to use Latex particle names or not
# Possible values are 'none', 'model' or 'built-in'
useLatexParticleName = 'built-in'
name2tex = {'e+':r'e^+','w+':r'W^+','a':r'\gamma','g':'g',
'e-':r'e^-','w-':r'W^-','z':'Z','h':'H',
'mu+':r'\mu^+',
'mu-':r'\mu^-',
'ta+':r'\tau^+',
'ta-':r'\tau^-'}
for p in ['e','m','t']:
d = {'e':'e','m':r'\mu','t':r'\tau'}
name2tex['v%s'%p]=r'\nu_{%s}'%d[p]
name2tex['v%s~'%p]=r'\bar{\nu_{%s}}'%d[p]
for p in ['u','d','c','s','b','t']:
name2tex[p]=p
name2tex['%s~'%p]=r'\bar{%s}'%p
def format_particle_name(particle, latex=useLatexParticleName):
p_name = particle
if latex=='model':
try:
texname = model.get_particle(particle).get('texname')
if texname and texname!='none':
p_name = r'$\displaystyle %s$'%texname
except:
pass
elif latex=='built-in':
try:
p_name = r'$\displaystyle %s$'%name2tex[particle]
except:
pass
return p_name
def resonance_str(resonance, latex=useLatexParticleName):
""" Provides a concise string to characterize the resonance """
particle_name = model.get_particle(resonance['ParticlePDG']).get_name()
mothersID=['%d'%n for n in sorted(resonance['FSMothersNumbers'])]
return r"%s [%s]"%(format_particle_name(particle_name,latex=latex),
','.join(mothersID))
def format_title(process, resonance):
""" Format the plot title given the process and resonance """
process_string = []
for particle in process.split():
if '<=' in particle:
particle = particle.replace('<=',r'$\displaystyle <=$')
if '^2' in particle:
particle = particle.replace('^2',r'$\displaystyle ^2$')
if particle=='$$':
process_string.append(r'\$\$')
continue
if particle=='>':
process_string.append(r'$\displaystyle \rightarrow$')
continue
if particle=='/':
process_string.append(r'$\displaystyle /$')
continue
process_string.append(format_particle_name(particle))
if resonance=='':
return r'CMS check for %s' %(' '.join(process_string))
else:
return r'CMS check for %s ( resonance %s )'\
%(' '.join(process_string),resonance)
def guess_lambdaorder(ME_values_list, lambda_values, expected=None,
proc=None, res=None):
""" Guess the lambda scaling from a list of ME values and return it.
Also compare with the expected result if specified and trigger a
warning if not in agreement."""
# guess the lambdaCMS power in the amplitude squared
bpowers = []
for i, lambdaCMS in enumerate(lambda_values[1:]):
bpowers.append(round(math.log(ME_values_list[0]/ME_values_list[i+1],\
lambda_values[0]/lambdaCMS)))
# Pick the most representative power
bpower = sorted([(el, bpowers.count(el)) for el in set(bpowers)],
key = lambda elem: elem[1], reverse=True)[0][0]
if not expected:
return bpower
if bpower != expected:
logger.warning('The apparent scaling of the squared amplitude'+
'seems inconsistent w.r.t to detected value '+
'(%i vs %i). %i will be used.'%(expected,bpower,bpower)+
' This happend for process %s and resonance %s'%(proc, res))
return bpower
def check_stability(ME_values, lambda_values, lambda_scaling, values_name):
""" Checks if the values passed in argument are stable and return the
stability check outcome warning if it is not precise enough. """
values = sorted([
abs(val*(lambda_values[0]/lambda_values[i])**lambda_scaling) for \
i, val in enumerate(ME_values)])
median = values[len(values)//2]
max_diff = max(abs(values[0]-median),abs(values[-1]-median))
stability = max_diff/median
stab_threshold = 1e-2
if stability >= stab_threshold:
return "== WARNING: Stability check failed for '%s' with stability %.2e.\n"\
%(values_name, stability)
else:
return None
####### END helper functions
if options['analyze']=='None':
if options['reuse']:
save_path = CMS_save_path('pkl', result, model, options,
output_path=output_path)
buff = "\nThe results of this check have been stored on disk and its "+\
"analysis can be rerun at anytime with the MG5aMC command:\n "+\
" check cms --analyze=%s\n"%save_path
res_str += buff
concise_str += buff
save_load_object.save_to_file(save_path, result)
elif len(result['ordered_processes'])>0:
buff = "\nUse the following synthax if you want to store "+\
"the raw results on disk.\n"+\
" check cms -reuse <proc_def> <options>\n"
res_str += buff
concise_str += buff
############################
# Numerical check first #
############################
checks = []
for process in result['ordered_processes']:
checks.extend([(process,resID) for resID in \
range(len(result[process]['CMS']))])
if options['reuse']:
logFile = open(CMS_save_path(
'log', result, model, options, output_path=output_path),'w')
lambdaCMS_list=result['lambdaCMS']
# List of failed processes
failed_procs = []
# A bar printing function helper. Change the length here for esthetics
bar = lambda char: char*47
# Write out the widths used if information is present:
if 'widths_computed' in result:
res_str += '\n%s%s%s\n'%(bar('='),' Widths ',bar('='))
if result['recompute_width'] == 'never':
res_str += '| Widths extracted from the param_card.dat'
else:
res_str += '| Widths computed %s'%('analytically' if has_FRdecay
else 'numerically')
if result['recompute_width'] == 'first_time':
res_str += ' for \lambda = 1'
elif result['recompute_width'] == 'always':
res_str += ' for all \lambda values'
res_str += " using mode '--recompute_width=%s'.\n"%result['recompute_width']
for particle_name, width in result['widths_computed']:
res_str += '| %-10s = %-11.6gGeV\n'%('Width(%s)'%particle_name,width)
res_str += '%s%s%s\n'%(bar('='),'='*8,bar('='))
# Doing the analysis to printout to the MG5 interface and determine whether
# the test is passed or not
# Number of last points to consider for the stability test
nstab_points=group_val
# Store here the asymptot detected for each difference curve
differences_target = {}
for process, resID in checks:
# Reinitialize the concise result replacement dictionary
# (only one resonance is indicated in this one, no matter what.)
concise_repl_dict[process] = {'process':process,
'asymptot':'N/A',
'cms_check':'N/A',
'status':'N/A'}
proc_res = result[process]
cms_res = proc_res['CMS'][resID]
nwa_res = proc_res['NWA'][resID]
resonance = resonance_str(cms_res['resonance'], latex='none')
cms_born=cms_res['born']
nwa_born=nwa_res['born']
# Starting top thick bar
res_str += '\n%s%s%s\n'%(bar('='),'='*8,bar('='))
# Centered process and resonance title
proc_title = "%s (resonance %s)"%(process,resonance)
centering = (bar(2)+8-len(proc_title))//2
res_str += "%s%s\n"%(' '*centering,proc_title)
# Starting bottom thin bar
res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
# Reminder if diff_lambda_power is not 1
if diff_lambda_power!=1:
res_str += "== WARNING diff_lambda_power is not 1 but = %g\n"%diff_lambda_power
res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
born_power = guess_lambdaorder(nwa_born,lambdaCMS_list,
expected=proc_res['born_order'], proc=process, res=resonance)
stab_cms_born = check_stability(cms_born[-nstab_points:],
lambdaCMS_list[-nstab_points:], born_power, 'CMS Born')
if stab_cms_born:
res_str += stab_cms_born
stab_nwa_born = check_stability(nwa_born[-nstab_points:],
lambdaCMS_list[-nstab_points:], born_power, 'NWA Born')
if stab_nwa_born:
res_str += stab_nwa_born
# Write out the phase-space point
res_str += "== Kinematic configuration in GeV (E,px,pypz)\n"
for i, p in enumerate(cms_res['resonance']['PS_point_used']):
res_str += " | p%-2.d = "%(i+1)
for pi in p:
res_str += '%-24.17g'%pi if pi<0.0 else ' %-23.17g'%pi
res_str += "\n"
# Write out the offshellnesses specification
res_str += "== Offshellnesses of all detected resonances\n"
for res_name, offshellness in cms_res['resonance']['offshellnesses']:
res_str += " | %-15s = %f\n"%(res_name, offshellness)
res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
if not pert_orders:
res_str += "== Born scaling lambda^n_born. nborn = %d\n"%born_power
else:
cms_finite=cms_res['finite']
nwa_finite=nwa_res['finite']
loop_power = guess_lambdaorder(nwa_finite,lambdaCMS_list,
expected=proc_res['loop_order'], proc=process, res=resonance)
res_str += "== Scaling lambda^n. nborn, nloop = %d, %d\n"\
%(born_power,loop_power)
stab_cms_finite = check_stability(cms_finite[-nstab_points:],
lambdaCMS_list[-nstab_points:], loop_power, 'CMS finite')
if stab_cms_finite:
res_str += stab_cms_finite
stab_nwa_finite = check_stability(nwa_finite[-nstab_points:],
lambdaCMS_list[-nstab_points:], loop_power, 'NWA finite')
if stab_nwa_finite:
res_str += stab_nwa_finite
# Now organize data
CMSData = []
NWAData = []
DiffData = []
for idata, lam in enumerate(lambdaCMS_list):
if not pert_orders:
new_cms=cms_born[idata]/(lam**born_power)
new_nwa=nwa_born[idata]/(lam**born_power)
else:
new_cms=(cms_finite[idata]+cms_born[idata]-nwa_born[idata])/(lam*nwa_born[idata])
new_nwa=nwa_finite[idata]/(lam*nwa_born[idata])
new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
CMSData.append(new_cms)
NWAData.append(new_nwa)
DiffData.append(new_diff)
# NWA Born median
# Find which values to start the test at by looking at the CMSdata scaling
# First compute the median of the middle 60% of entries in the plot
trim_range=int(((1.0-0.6)/2.0)*len(DiffData))
low_diff_median = sorted(DiffData[trim_range:-trim_range])\
[(len(DiffData)-2*trim_range)//2]
# Now walk the values from the right of the diff plot until we reaches
# values stable with respect to the CMS_tale_median. This value will
# be limit of the range considered for the CMS test. Do it in a way which
# is insensitive to instabilities, by considering medians of group_val
# consecutive points.
current_median = 0
# We really want to select only the very stable region
scan_index = 0
reference = abs(sorted(NWAData)[len(NWAData)//2])
if low_diff_median!= 0.0:
if abs(reference/low_diff_median)<diff_zero_threshold:
reference = abs(low_diff_median)
while True:
scanner = DiffData[scan_index:group_val+scan_index]
current_median = sorted(scanner)[len(scanner)//2]
# Useful for debugging
#misc.sprint(scanner,current_median,abs(current_median-low_diff_median)/reference,reference,consideration_threshold)
if abs(current_median-low_diff_median)/reference<\
consideration_threshold:
break;
scan_index += 1
if (group_val+scan_index)>=len(DiffData):
# this should not happen, but in this case we arbitrarily take
# half of the data
logger.warning('The median scanning failed during the CMS check '+
'for process %s'%proc_title+\
'This is means that the difference plot has not stable'+\
'intermediate region and MG5_aMC will arbitrarily consider the'+\
'left half of the values.')
scan_index = -1
break;
if scan_index == -1:
cms_check_data_range = len(DiffData)//2
else:
cms_check_data_range = scan_index + group_val
res_str += "== Data range considered (min, max, n_val) = (%.1e, %.1e, %d)\n"\
%(lambdaCMS_list[-1],lambdaCMS_list[scan_index],
len(lambdaCMS_list)-scan_index)
# Now setup the list of values affecting the CMScheck
CMScheck_values = DiffData[cms_check_data_range:]
# For the purpose of checking the stability of the tale, we now do
# the consideration_threshold scan from the *left* and if we finsih
# before the end, it means that there is an unstable region.
if scan_index >= 0:
# try to find the numerical instability region
scan_index = len(CMScheck_values)
used_group_val = max(3,group_val)
unstability_found = True
while True:
scanner = CMScheck_values[scan_index-used_group_val:scan_index]
maxdiff = max(abs(scan-low_diff_median) for scan in scanner)
if maxdiff/reference<consideration_threshold:
break;
if (scan_index-used_group_val)==0:
# this only happens when no stable intermediate region can be found
# Set scan_index to -99 so as to prevent warning
unstability_found = False
break;
# Proceed to th next block of data
scan_index -= 1
# Now report here the unstability found
if unstability_found:
unstab_check=CMScheck_values[scan_index:]
relative_array = [val > CMScheck_values[scan_index-1] for
val in unstab_check]
upper = relative_array.count(True)
lower = relative_array.count(False)
if not ((lower==0 and upper>=0) or (lower>=0 and upper==0)):
logger.warning(
"""For process %s, a numerically unstable region was detected starting from lambda < %.1e.
Look at the plot in this region (and possibly throw more points using the option --lambdaCMS).
If this is indeed a stability issue, then either decrease MLStabThreshold in MadLoop or decrease the
minimum value of lambda to be considered in the CMS check."""\
%(proc_title, lambdaCMS_list[cms_check_data_range+scan_index-1]))
# Now apply the same same technique, as above but to the difference plot
# Now we will use low_diff_median instead of diff_tale_median
#diff_tale_median = sorted(CMScheck_values)[len(CMScheck_values)//2]
scan_index = 0
max_diff = 0.0
res_str += "== Ref. value used in the ratios (Born NWA) = %s\n"\
%('%.3g'%reference)
res_str += "== Asymptotic difference value detected = %s\n"\
%('%.3g'%low_diff_median)
concise_repl_dict[process]['asymptot'] = '%.3e'%low_diff_median
# Pass information to the plotter for the difference target
differences_target[(process,resID)]= low_diff_median
# misc.sprint('Now doing resonance %s.'%res_str)
while True:
current_vals = CMScheck_values[scan_index:scan_index+group_val]
max_diff = max(max_diff, abs(low_diff_median-
sorted(current_vals)[len(current_vals)//2])/reference)
if (scan_index+group_val)>=len(CMScheck_values):
break
scan_index += 1
# Now use the CMS check result
cms_check = (max_diff*100.0, '>' if max_diff>CMS_test_threshold else '<',
CMS_test_threshold*100.0)
res_str += "== CMS check result (threshold) = %.3g%% (%s%.3g%%)\n"%cms_check
concise_repl_dict[process]['cms_check'] = \
"%-10s (%s%.3g%%)"%('%.3g%%'%cms_check[0],cms_check[1],cms_check[2])
if max_diff>CMS_test_threshold:
failed_procs.append((process,resonance))
res_str += "%s %s %s\n"%(bar('='),
'FAILED' if max_diff>CMS_test_threshold else 'PASSED',bar('='))
concise_repl_dict[process]['status'] = 'Failed' if max_diff>CMS_test_threshold \
else 'Passed'
if output=='concise_text':
# Find what is the maximum size taken by the process string
max_proc_size = max(
[len(process) for process in result['ordered_processes']]+[10])
# Re-initialize the res_str so as to contain only the minimal report
res_str = concise_str
res_str += '\n'+concise_data%(max_proc_size+4)%concise_repl_dict['Header']
for process in result['ordered_processes']:
res_str += (concise_data%(max_proc_size+4)%concise_repl_dict[process])
if len(checks):
res_str += "Summary: %i/%i passed"%(len(checks)-len(failed_procs),len(checks))+\
('.\n' if not failed_procs else ', failed checks are for:\n')
else:
return "\nNo CMS check to perform, the process either has no diagram or does not "+\
"not feature any massive s-channel resonance."
for process, resonance in failed_procs:
res_str += "> %s, %s\n"%(process, resonance)
if output=='concise_text':
res_str += '\nMore detailed information on this check available with the command:\n'
res_str += ' MG5_aMC>display checks\n'
############################
# Now we turn to the plots #
############################
if not options['show_plot']:
if options['reuse']:
logFile.write(res_str)
logFile.close()
if output.endswith('text'):
return res_str
else:
return failed_procs
fig_output_file = CMS_save_path('pdf', result, model, options,
output_path=output_path)
base_fig_name = fig_output_file[:-4]
suffix = 1
while os.path.isfile(fig_output_file):
fig_output_file = '%s__%d__.pdf'%(base_fig_name,suffix)
suffix+=1
process_data_plot_dict={}
# load possible additional results. The second element of the tuple is
# the dataset name.
all_res = [(result, None)]
for i, add_res in enumerate(options['analyze'].split(',')[1:]):
specs =re.match(r'^(?P#.*)\((?P<title>.*)\)$', add_res)
if specs:
filename = specs.group('filename')
title = specs.group('title')
else:
filename = add_res
title = '#%d'%(i+1)
new_result = save_load_object.load_from_file(filename)
if new_result is None:
raise InvalidCmd('The complex mass scheme check result'+
" file below could not be read.\n %s"%filename)
if len(new_result['ordered_processes'])!=len(result['ordered_processes']) \
or len(new_result['lambdaCMS'])!=len(result['lambdaCMS']):
raise self.InvalidCmd('The complex mass scheme check result'+
" file below does not seem compatible.\n %s"%filename)
all_res.append((new_result,title))
# Prepare the data
for process, resID in checks:
data1=[] # for subplot 1,i.e. CMS and NWA
data2=[] # for subplot 2,i.e. diff
info ={} # info to be passed to the plotter
for res in all_res:
proc_res = res[0][process]
cms_res = proc_res['CMS'][resID]
nwa_res = proc_res['NWA'][resID]
resonance = resonance_str(cms_res['resonance'])
if options['resonances']!=1:
info['title'] = format_title(process, resonance)
else:
info['title'] = format_title(process, '')
# Born result
cms_born=cms_res['born']
nwa_born=nwa_res['born']
if len(cms_born) != len(lambdaCMS_list) or\
len(nwa_born) != len(lambdaCMS_list):
raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
' lambdaCMS values specified for process %s'%process
if pert_orders:
cms_finite=cms_res['finite']
nwa_finite=nwa_res['finite']
if len(cms_finite) != len(lambdaCMS_list) or\
len(nwa_finite) != len(lambdaCMS_list):
raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
' lambdaCMS values specified for process %s'%process
bpower = guess_lambdaorder(nwa_born,lambdaCMS_list,
expected=proc_res['born_order'], proc=process, res=resonance)
CMSData = []
NWAData = []
DiffData = []
for idata, lam in enumerate(lambdaCMS_list):
if not pert_orders:
new_cms = cms_born[idata]/lam**bpower
new_nwa = nwa_born[idata]/lam**bpower
else:
new_cms=cms_finite[idata]+cms_born[idata]-nwa_born[idata]
new_nwa=nwa_finite[idata]
new_cms /= lam*nwa_born[idata]
new_nwa /= lam*nwa_born[idata]
new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
CMSData.append(new_cms)
NWAData.append(new_nwa)
DiffData.append(new_diff)
if res[1] is None:
if not pert_orders:
data1.append([r'$\displaystyle CMS\;=\;\mathcal{M}_{CMS}^{(0)}/\lambda^%d$'%bpower,CMSData])
data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}_{NWA}^{(0)}/\lambda^%d$'%bpower,NWAData])
else:
data1.append([r'$\displaystyle CMS\;=\;(\mathcal{M}^{(1)}_{CMS}+\mathcal{M}_{CMS}^{(0)}-\mathcal{M}^{(0)}_{NWA})/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',CMSData])
data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}^{(1)}_{NWA}/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',NWAData])
data2.append([r'$\displaystyle\Delta\;=\;(CMS-NWA)/\lambda%s$'\
%('' if diff_lambda_power==1 else r'^{%g}'%diff_lambda_power)
,DiffData])
data2.append([r'Detected asymptot',[differences_target[(process,resID)]
for i in range(len(lambdaCMS_list))]])
else:
data1.append([r'$\displaystyle CMS$ %s'%res[1].replace('_',' ').replace('#','\#'), CMSData])
data1.append([r'$\displaystyle NWA$ %s'%res[1].replace('_',' ').replace('#','\#'), NWAData])
data2.append([r'$\displaystyle\Delta$ %s'%res[1].replace('_',' ').replace('#','\#'), DiffData])
process_data_plot_dict[(process,resID)]=(data1,data2, info)
# Now turn to the actual plotting
try:
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
logger.info('Rendering plots... (this can take some time because of the latex labels)')
res_str += \
"""\n-----------------------------------------------------------------------------------------------
| In the plots, the Complex Mass Scheme check is successful if the normalized difference |
| between the CMS and NWA result (lower inset) tends to a constant when \lambda goes to zero. |
-----------------------------------------------------------------------------------------------\n"""
# output the figures
if lambda_range[1]>0:
min_lambda_index = -1
for i, lam in enumerate(lambdaCMS_list):
if lam<=lambda_range[1]:
min_lambda_index = i
break
else:
min_lambda_index = 0
if lambda_range[0]>0:
max_lambda_index = -1
for i, lam in enumerate(lambdaCMS_list):
if lam<=lambda_range[0]:
max_lambda_index=i-1
break
else:
max_lambda_index=len(lambdaCMS_list)-1
if max_lambda_index==-1 or min_lambda_index==-1 or \
min_lambda_index==max_lambda_index:
raise InvalidCmd('Invalid lambda plotting range: (%.1e,%.1e)'%\
(lambda_range[0],lambda_range[1]))
# Trim lambda values
if lambda_range[0]>0.0 or lambda_range[1]>0.0:
lambdaCMS_list = lambdaCMS_list[min_lambda_index:max_lambda_index+1]
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
pp=PdfPages(fig_output_file)
if len(checks)==0 or len(process_data_plot_dict[checks[0]][1])<=7:
colorlist=['b','r','g','k','c','m','y']
else:
import matplotlib.colors as colors
import matplotlib.cm as mplcm
import matplotlib.colors as colors
# Nice color maps here are 'gist_rainbow'
cm = plt.get_cmap('gist_rainbow')
cNorm = colors.Normalize(vmin=0, vmax=(len(data2)-1))
scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
# use vmax=(len(data1)-1)*0.9 to remove pink at the end of the spectrum
colorlist = [scalarMap.to_rgba(i*0.9) for i in range(len(data2))]
# Or it is also possible to alternate colors so as to make them
# as distant as possible to one another
# colorlist = sum([
# [scalarMap.to_rgba(i),scalarMap.to_rgba(i+len(data2)//2)]
# for i in range(len(data2)//2)],[])
legend_size = 10
for iproc, (process, resID) in enumerate(checks):
data1,data2, info=process_data_plot_dict[(process,resID)]
# Trim dataplot if necessary
if lambda_range[0]>0.0 or lambda_range[1]>0.0:
for i in range(len(data1)):
data1[i][1]=data1[i][1][min_lambda_index:max_lambda_index+1]
for i in range(len(data2)):
data2[i][1]=data2[i][1][min_lambda_index:max_lambda_index+1]
plt.figure(iproc+1)
plt.subplot(211)
minvalue=1e+99
maxvalue=-1e+99
for i, d1 in enumerate(data1):
# Use the same color for NWA and CMS curve but different linestyle
color=colorlist[i//2]
data_plot=d1[1]
minvalue=min(min(data_plot),minvalue)
maxvalue=max(max(data_plot),maxvalue)
plt.plot(lambdaCMS_list, data_plot, color=color, marker='', \
linestyle=('-' if i%2==0 else '--'),
label=(d1[0] if (i%2==0 or i==1) else '_nolegend_'))
ymin = minvalue-(maxvalue-minvalue)/5.
ymax = maxvalue+(maxvalue-minvalue)/5.
plt.yscale('linear')
plt.xscale('log')
plt.title(info['title'],fontsize=12,y=1.08)
plt.ylabel(r'$\displaystyle \mathcal{M}$')
#plt.xlabel('lambdaCMS')
if ymax*len(data1)-sum(max(d1[1][-len(d1[1])//2:]) \
for d1 in data1) > 0.5*(ymax-ymin)*len(data1):
plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
else:
plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
plt.axis([min(lambdaCMS_list),max(lambdaCMS_list), ymin, ymax])
plt.subplot(212)
minvalue=1e+99
maxvalue=-1e+99
try:
asymptot_index = [d2[0] for d2 in data2].index('Detected asymptot')
plt.plot(lambdaCMS_list, data2[asymptot_index][1],
color='0.75', marker='', linestyle='-', label='')
except ValueError:
pass
color_ID = -1
for d2 in data2:
# Special setup for the reference asymptot straight line
if d2[0]=='Detected asymptot':
continue
color_ID += 1
color=colorlist[color_ID]
data_plot=d2[1]
minvalue=min(min(data_plot),minvalue)
maxvalue=max(max(data_plot),maxvalue)
plt.plot(lambdaCMS_list, data_plot, color=color, marker='',\
linestyle='-', label=d2[0])
ymin = minvalue-(maxvalue-minvalue)/5.
ymax = maxvalue+(maxvalue-minvalue)/5.
plt.yscale('linear')
plt.xscale('log')
plt.ylabel(r'$\displaystyle \Delta$')
plt.xlabel(r'$\displaystyle \lambda$')
# The unreadable stuff below is just to check if the left of the
# plot is stable or not
sd = [sorted(d2[1][-len(d2[1])//2:]) for d2 in data2]
left_stability = sum(abs(s[0]-s[-1]) for s in sd)
sd = [sorted(d2[1][:-len(d2[1])//2]) for d2 in data2]
right_stability = sum(abs(s[0]-s[-1]) for s in sd)
left_stable = False if right_stability==0.0 else \
(left_stability/right_stability)<0.1
if left_stable:
if ymax*len(data2)-sum(max(d2[1][-len(d2[1])//2:]) \
for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
else:
plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
else:
if ymax*len(data2)-sum(max(d2[1][:-len(d2[1])//2]) \
for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
plt.legend(prop={'size':legend_size},loc='upper right', frameon=False)
else:
plt.legend(prop={'size':legend_size},loc='lower right', frameon=False)
plt.axis([min(lambdaCMS_list),max(lambdaCMS_list),\
minvalue-(maxvalue-minvalue)/5., maxvalue+(maxvalue-minvalue)/5.])
plt.savefig(pp,format='pdf')
pp.close()
if len(checks)>0:
logger.info('Complex Mass Scheme check plot output to file %s. '%fig_output_file)
if sys.platform.startswith('linux'):
misc.call(["xdg-open", fig_output_file])
elif sys.platform.startswith('darwin'):
misc.call(["open", fig_output_file])
plt.close("all")
except Exception as e:
if isinstance(e, ImportError):
res_str += "\n= Install matplotlib to get a "+\
"graphical display of the results of the cms check."
else:
general_error = "\n= Could not produce the cms check plot because of "+\
"the following error: %s"%str(e)
try:
import Tkinter
if isinstance(e, Tkinter.TclError):
res_str += "\n= Plots are not generated because your system"+\
" does not support graphical display."
else:
res_str += general_error
except:
res_str += general_error
if options['reuse']:
logFile.write(res_str)
logFile.close()
if output.endswith('text'):
return res_str
else:
return failed_procs
```
#### File: tests/parallel_tests/madevent_comparator.py
```python
import datetime
import glob
import itertools
import logging
import os
import re
import shutil
import subprocess
import sys
import time
pjoin = os.path.join
# Get the grand parent directory (mg5 root) of the module real path
# (tests/acceptance_tests) and add it to the current PYTHONPATH to allow
# for easy import of MG5 tools
_file_path = os.path.dirname(os.path.realpath(__file__))
import madgraph.iolibs.template_files as template_files
import madgraph.iolibs.save_load_object as save_load_object
import madgraph.interface.master_interface as cmd_interface
import madgraph.various.misc as misc
from madgraph import MadGraph5Error, MG5DIR
import me_comparator
class MadEventComparator(me_comparator.MEComparator):
"""Base object to run comparison tests. Take standard Runner objects and
a list of proc as an input and return detailed comparison tables in various
formats."""
def run_comparison(self, proc_list, model='sm', orders={}):
"""Run the codes and store results."""
if isinstance(model, basestring):
model= [model] * len(self.me_runners)
self.results = []
self.proc_list = proc_list
logging.info(\
"Running on %i processes with order: %s, in model %s" % \
(len(proc_list),
me_comparator.MERunner.get_coupling_definitions(orders),
'/'.join([onemodel for onemodel in model])))
pass_proc = False
for i,runner in enumerate(self.me_runners):
cpu_time1 = time.time()
logging.info("Now running %s" % runner.name)
if pass_proc:
runner.pass_proc = pass_proc
self.results.append(runner.run(proc_list, model[i], orders))
cpu_time2 = time.time()
logging.info(" Done in %0.3f s" % (cpu_time2 - cpu_time1))
# logging.info(" (%i/%i with zero ME)" % \
# (len([res for res in self.results[-1] if res[0][0] == 0.0]),
# len(proc_list)))
def cleanup(self):
"""Call cleanup for each MERunner."""
for runner in self.me_runners:
logging.info("Cleaning code %s runner" % runner.name)
runner.cleanup()
def output_result(self, filename=None, tolerance=3e-02):
"""Output result as a nicely formated table. If filename is provided,
write it to the file, else to the screen. Tolerance can be adjusted."""
def detect_type(data):
"""check if the type is an integer/float/string"""
if data.isdigit():
return 'int'
elif len(data) and data[0] == '-' and data[1:].isdigit():
return 'int'
try:
float(data)
return 'float'
except:
return 'str'
proc_col_size = 17
for proc in self.results[0]:
if len(proc) + 1 > proc_col_size:
proc_col_size = len(proc) + 1
col_size = 17
pass_test = 0
fail_test = 0
failed_prop_list = []
res_str = "\n" + self._fixed_string_length("Checked", proc_col_size) + \
''.join([self._fixed_string_length(runner.name, col_size) for \
runner in self.me_runners]) + \
self._fixed_string_length("Relative diff.", col_size) + \
"Result"
for prop in self.results[0]:
loc_results = []
succeed = True
for i in range(len(self.results)):
if not self.results[i].has_key(prop):
loc_results.append('not present')
succeed = False
else:
loc_results.append(self.results[i][prop])
res_str += '\n' + self._fixed_string_length(proc, proc_col_size)+ \
''.join([self._fixed_string_length(str(res),
col_size) for res in loc_results])
if not succeed:
res_str += self._fixed_string_length("NAN", col_size)
res_str += 'failed'
fail_test += 1
failed_prop_list.append(prop)
else:
# check the type (integer/float/string)
type = detect_type(loc_results[0])
if type == 'int':
if any(detect_type(loc)=='float' for loc in loc_results):
type = 'float'
if type == 'float':
if max(loc_results) == 0.0 and min(loc_results) == 0.0:
res_str += self._fixed_string_length("0", col_size)
res_str += 'passed'
pass_test +=1
else:
loc_results = [float(d) for d in loc_results]
diff = (max(loc_results) - min(loc_results)) / \
(max(loc_results) + min(loc_results))
res_str += self._fixed_string_length("%1.10e" % diff, col_size)
if diff >= tolerance:
res_str += 'failed'
failed_prop_list.append(prop)
fail_test += 1
else:
res_str += 'passed'
pass_test +=1
else:
for value in loc_results:
if value != loc_results[0]:
res_str += self._fixed_string_length("differ", col_size)
res_str += 'failed'
failed_prop_list.append(prop)
fail_test += 1
break
res_str += self._fixed_string_length("identical", col_size)
res_str += 'passed'
pass_test +=1
res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
(pass_test, pass_test + fail_test,
fail_test, pass_test + fail_test)
if fail_test != 0:
res_str += "\nFailed processes: %s" % ', '.join(failed_prop_list)
logging.info(res_str)
if filename:
file = open(filename, 'w')
file.write(res_str)
file.close()
return fail_test, failed_prop_list
def assert_processes(self, test_object, tolerance = 1e-06):
"""Run assert to check that all processes passed comparison"""
fail_test, fail_prop = self.output_result('', tolerance)
test_object.assertEqual(fail_test, 0, "Failed for processes: %s" % ', '.join(fail_prop))
class MadEventComparatorGauge(me_comparator.MEComparatorGauge):
"""Base object to run comparison tests. Take standard Runner objects and
a list of proc as an input and return detailed comparison tables in various
formats."""
def run_comparison(self, proc_list, model='sm', orders={}):
"""Run the codes and store results."""
#if isinstance(model, basestring):
# model= [model] * len(self.me_runners)
self.results = []
self.proc_list = proc_list
logging.info(\
"Running on %i processes with order: %s, in model %s" % \
(len(proc_list),
' '.join(["%s=%i" % (k, v) for k, v in orders.items()]),
model))
pass_proc = False
for i,runner in enumerate(self.me_runners):
cpu_time1 = time.time()
logging.info("Now running %s" % runner.name)
if pass_proc:
runner.pass_proc = pass_proc
self.results.append(runner.run(proc_list, model, orders))
cpu_time2 = time.time()
logging.info(" Done in %0.3f s" % (cpu_time2 - cpu_time1))
# logging.info(" (%i/%i with zero ME)" % \
# (len([res for res in self.results[-1] if res[0][0] == 0.0]),
# len(proc_list)))
def cleanup(self):
"""Call cleanup for each MERunner."""
for runner in self.me_runners:
logging.info("Cleaning code %s runner" % runner.name)
runner.cleanup()
def output_result(self, filename=None, tolerance=3e-03):
"""Output result as a nicely formated table. If filename is provided,
write it to the file, else to the screen. Tolerance can be adjusted."""
def detect_type(data):
"""check if the type is an integer/float/string"""
if data.isdigit():
return 'int'
elif len(data) and data[0] == '-' and data[1:].isdigit():
return 'int'
try:
float(data)
return 'float'
except:
return 'str'
proc_col_size = 17
for proc in self.results[0]:
if len(proc) + 1 > proc_col_size:
proc_col_size = len(proc) + 1
col_size = 17
pass_test = 0
fail_test = 0
failed_proc_list = []
res_str = "\n" + self._fixed_string_length("Process", proc_col_size) + \
''.join([self._fixed_string_length(runner.name, col_size) for \
runner in self.me_runners]) + \
self._fixed_string_length("Diff both unit", col_size) + \
self._fixed_string_length("Diff both cms", col_size) + \
self._fixed_string_length("Diff both fixw", col_size) + \
self._fixed_string_length("Diff both feyn", col_size) + \
"Result"
for proc in self.results[0]:
loc_results = []
succeed = True
for i in range(len(self.results)):
if not self.results[i].has_key(proc):
loc_results.append('not present')
succeed = False
else:
loc_results.append(self.results[i][proc])
res_str += '\n' + self._fixed_string_length(proc, proc_col_size)+ \
''.join([self._fixed_string_length(str(res),
col_size) for res in loc_results])
if not succeed:
res_str += self._fixed_string_length("NAN", col_size)
res_str += 'failed'
fail_test += 1
failed_proc_list.append(proc)
else:
# check the type (integer/float/string)
type = detect_type(loc_results[0])
if type == 'float':
if max(loc_results) == 0.0 and min(loc_results) == 0.0:
res_str += self._fixed_string_length("0", col_size)
res_str += 'passed'
pass_test +=1
else:
loc_results = [float(d) for d in loc_results]
diff_feyn = abs(loc_results[1] - loc_results[2]) / \
(loc_results[1] + loc_results[2] + 1e-99)
diff_unit = abs(loc_results[0] - loc_results[3]) / \
(loc_results[0] + loc_results[3] + 1e-99)
diff_cms = abs(loc_results[0] - loc_results[1]) / \
(loc_results[0] + loc_results[1] + 1e-99)
diff_fixw = abs(loc_results[2] - loc_results[3]) / \
(loc_results[2] + loc_results[3] + 1e-99)
res_str += self._fixed_string_length("%1.10e" % diff_unit, col_size)
res_str += self._fixed_string_length("%1.10e" % diff_cms, col_size)
res_str += self._fixed_string_length("%1.10e" % diff_fixw, col_size)
res_str += self._fixed_string_length("%1.10e" % diff_feyn, col_size)
if diff_feyn < 4e-2 and diff_cms < 1e-2 and diff_fixw < 1e-2 and \
diff_unit < 4e-2:
pass_test += 1
res_str += "Pass"
else:
fail_test += 1
failed_proc_list.append(proc)
res_str += "Fail"
else:
for value in loc_results:
if value != loc_results[0]:
res_str += self._fixed_string_length("differ", col_size)
res_str += 'failed'
failed_proc_list.append(proc)
fail_test += 1
break
res_str += self._fixed_string_length("identical", col_size)
res_str += 'passed'
pass_test +=1
res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
(pass_test, pass_test + fail_test,
fail_test, pass_test + fail_test)
if fail_test != 0:
res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
logging.info(res_str)
if filename:
file = open(filename, 'w')
file.write(res_str)
file.close()
return fail_test, failed_proc_list
def assert_processes(self, test_object, tolerance = 1e-06):
"""Run assert to check that all processes passed comparison"""
fail_test, fail_prop = self.output_result('', tolerance)
test_object.assertEqual(fail_test, 0, "Failed for processes: %s" % ', '.join(fail_prop))
class FakeRunner(object):
temp_dir_name = ""
proc_list = []
res_list = []
setup_flag = False
name = 'Store'
type = 'Store'
model_dir = os.path.join(MG5DIR,'models')
def cleanup(self):
pass
class MadEventRunner(object):
"""Base class to containing default function to setup, run and access results
produced with a specific ME generator.
"""
temp_dir_name = ""
proc_list = []
res_list = []
setup_flag = False
name = 'None'
model_dir = os.path.join(MG5DIR,'models')
class MERunnerException(Exception):
"""Default Exception class for MERunner objects"""
def setup(self):
"""Empty method to define all warming up operations to be executed before
actually running the generator.
"""
pass
def run(self, proc_list, model, orders, energy):
"""Run the generator for a specific list of processes (see below for
conventions) and store the result.
"""
pass
def get_result(self, proc_id):
"""Return the result (i.e., ME value for a particular PS point) for a
specific process identified with its id."""
return self.proc_list[proc_id]
def cleanup(self):
"""Perform some clean up procedure to leave the ME code directory in
the same state as it was initially (e.g., remove temp dirs, ...)
"""
pass
class MG5Runner(MadEventRunner):
"""Runner object for the MG5 Matrix Element generator."""
mg5_path = ""
name = 'MadGraph v5'
type = 'v5'
def setup(self, mg5_path, temp_dir=None):
"""Wrapper for the mg4 setup, also initializing the mg5 path variable"""
self.mg5_path = os.path.abspath(mg5_path)
if not temp_dir:
i=0
while os.path.exists(os.path.join(mg5_path,
"p_ME_test_%s_%s" % (self.type, i))):
i += 1
temp_dir = "p_ME_test_%s_%s" % (self.type, i)
self.temp_dir_name = temp_dir
def run(self, proc_list, model, orders={}):
"""Execute MG5 on the list of processes mentioned in proc_list, using
the specified model, the specified maximal coupling orders and a certain
energy for incoming particles (for decay, incoming particle is at rest).
"""
self.res_list = [] # ensure that to be void, and avoid pointer problem
self.proc_list = proc_list
self.model = model
self.orders = orders
self.non_zero = 0
dir_name = os.path.join(self.mg5_path, self.temp_dir_name)
# Create a proc_card.dat in the v5 format
proc_card_location = os.path.join(self.mg5_path, 'proc_card_%s.dat' % \
self.temp_dir_name)
proc_card_file = open(proc_card_location, 'w')
proc_card_file.write(self.format_mg5_proc_card(proc_list, model, orders))
proc_card_file.close()
logging.info("proc_card.dat file for %i processes successfully created in %s" % \
(len(proc_list), os.path.join(dir_name, 'Cards')))
# Run mg5
logging.info("Running MG5")
#proc_card = open(proc_card_location, 'r').read()
new_proc_list = []
cmd = cmd_interface.MasterCmd()
cmd.no_notification()
cmd.exec_cmd('import command %s' %proc_card_location)
#for line in proc_card.split('\n'):
# cmd.exec_cmd(line, errorhandling=False)
os.remove(proc_card_location)
values = self.get_values()
self.res_list.append(values)
return values
def format_mg5_proc_card(self, proc_list, model, orders):
"""Create a proc_card.dat string following v5 conventions."""
if model != 'mssm':
v5_string = "import model %s\n" % os.path.join(self.model_dir, model)
else:
v5_string = "import model %s\n" % model
v5_string += "set automatic_html_opening False\n"
couplings = me_comparator.MERunner.get_coupling_definitions(orders)
for i, proc in enumerate(proc_list):
v5_string += 'add process ' + proc + ' ' + couplings + \
'@%i' % i + '\n'
v5_string += "output %s -f\n" % \
os.path.join(self.mg5_path, self.temp_dir_name)
v5_string += "launch -i --multicore\n"
v5_string += " set automatic_html_opening False\n"
v5_string += "edit_cards\n"
# v5_string += "set ickkw 0\n"
v5_string += "set LHC 13\n"
# v5_string += "set xqcut 0\n"
v5_string += "set auto_ptj_mjj True\n"
v5_string += "set cut_decays True\n"
v5_string += "set ickkw 0\n"
v5_string += "set xqcut 0\n"
v5_string += "survey run_01; refine 0.01; refine 0.01\n"
#v5_string += "print_results\n"
return v5_string
def get_values(self):
dir_name = os.path.join(self.mg5_path, self.temp_dir_name)
SubProc=[name for name in os.listdir(dir_name + '/SubProcesses')
if name[0]=='P' and
os.path.isdir(dir_name + '/SubProcesses/'+name) and \
name[1].isdigit()]
output = {}
#Part1: number of SubProcesses
numsubProc={}
for name in SubProc :
tag=name.split('_')[0][1:]
if numsubProc.has_key(tag):
numsubProc[tag]+=1
else: numsubProc[tag]=1
for key,value in numsubProc.items():
output['number_of_P'+key]=str(value)
#Part 2: cross section
for name in SubProc:
if os.path.exists(dir_name+'/SubProcesses/'+name+'/run_01_results.dat'):
filepath = dir_name+'/SubProcesses/'+name+'/run_01_results.dat'
else:
filepath = dir_name+'/SubProcesses/'+name+'/results.dat'
if not os.path.exists(filepath):
break
for line in file(filepath):
splitline=line.split()
#if len(splitline)==8:
output['cross_'+name]=splitline[0]
print "found %s %s" % (splitline[0], splitline[1])
else:
return output
filepath = dir_name+'/HTML/run_01/results.html'
text = open(filepath).read()
#id="#P1_qq_ll" href=#P1_qq_ll onClick="check_link('#P1_qq_ll','#P1_qq_ll','#P1_qq_ll')"> 842.9
info = re.findall('id="\#(?P<a1>\w*)" href=\#(?P=a1) onClick="check_link\(\'\#(?P=a1)\',\'\#(?P=a1)\',\'\#(?P=a1)\'\)">\s* ([\d.e+-]*)', text)
for name,value in info:
output['cross_'+name] = value
return output
class MG5OldRunner(MG5Runner):
"""Runner object for the MG5 Matrix Element generator."""
mg5_path = ""
name = 'v5 Ref'
type = 'v5_ref'
def format_mg5_proc_card(self, proc_list, model, orders):
"""Create a proc_card.dat string following v5 conventions."""
v5_string = "import model %s\n" % os.path.join(self.model_dir, model)
v5_string += "set automatic_html_opening False\n"
couplings = me_comparator.MERunner.get_coupling_definitions(orders)
for i, proc in enumerate(proc_list):
v5_string += 'add process ' + proc + ' ' + couplings + \
'@%i' % i + '\n'
v5_string += "output %s -f\n" % \
os.path.join(self.mg5_path, self.temp_dir_name)
v5_string += "launch -f \n"
return v5_string
def run(self, proc_list, model, orders={}):
"""Execute MG5 on the list of processes mentioned in proc_list, using
the specified model, the specified maximal coupling orders and a certain
energy for incoming particles (for decay, incoming particle is at rest).
"""
self.res_list = [] # ensure that to be void, and avoid pointer problem
self.proc_list = proc_list
self.model = model
self.orders = orders
self.non_zero = 0
dir_name = os.path.join(self.mg5_path, self.temp_dir_name)
# Create a proc_card.dat in the v5 format
proc_card_location = os.path.join(self.mg5_path, 'proc_card_%s.dat' % \
self.temp_dir_name)
proc_card_file = open(proc_card_location, 'w')
proc_card_file.write(self.format_mg5_proc_card(proc_list, model, orders))
proc_card_file.close()
logging.info("proc_card.dat file for %i processes successfully created in %s" % \
(len(proc_list), os.path.join(dir_name, 'Cards')))
# Run mg5
logging.info("Running MG5")
devnull = open(os.devnull,'w')
if logging.root.level >=20:
subprocess.call([pjoin(self.mg5_path,'bin','mg5'), proc_card_location],
stdout=devnull, stderr=devnull)
else:
subprocess.call([pjoin(self.mg5_path,'bin','mg5'), proc_card_location])
os.remove(proc_card_location)
values = self.get_values()
self.res_list.append(values)
return values
class MG5gaugeRunner(MG5Runner):
"""Runner object for the MG5 Matrix Element generator."""
def __init__(self, cms, gauge):
self.cms = cms
self.gauge = gauge
self.mg5_path = ""
self.name = 'MG_%s_%s' %(self.cms, self.gauge)
self.type = '%s_%s' %(self.cms, self.gauge)
def format_mg5_proc_card(self, proc_list, model, orders):
"""Create a proc_card.dat string following v5 conventions."""
v5_string = 'import model sm \n'
v5_string += 'set automatic_html_opening False\n'
v5_string += 'set complex_mass_scheme %s \n' % self.cms
v5_string += 'set gauge %s \n' % self.gauge
v5_string += "import model %s \n" % os.path.join(self.model_dir, model)
couplings = me_comparator.MERunner.get_coupling_definitions(orders)
for i, proc in enumerate(proc_list):
v5_string += 'add process ' + proc + ' ' + couplings + \
'@%i' % i + '\n'
v5_string += "output %s -f\n" % \
os.path.join(self.mg5_path, self.temp_dir_name)
v5_string += "launch -f \n"
v5_string += 'set complex_mass_scheme False \n'
v5_string += 'set gauge unitary'
return v5_string
``` |
{
"source": "jlrainbolt/pyunfold",
"score": 2
} |
#### File: pyunfold/pyunfold/teststat.py
```python
from __future__ import division, print_function
import numpy as np
from scipy.special import gammaln as lgamma
from .utils import none_to_empty_list
class TestStat(object):
"""Common base class for test statistic methods
"""
def __init__(self, tol=None, num_causes=None, test_range=None, **kwargs):
test_range = none_to_empty_list(test_range)
self.tol = tol
if num_causes is None:
raise ValueError('Number of causes (num_causes) must be provided.')
cause_bin_edges = np.arange(num_causes + 1, dtype=float)
# Get bin midpoints
cause_axis = (cause_bin_edges[1:] + cause_bin_edges[:-1]) / 2
self.cause_axis = cause_axis
self.ts_range = test_range
self.has_ts_range = False
self.ts_bins = self.set_test_range_bins()
# Initialize Unnatural TS data
self.stat = np.inf
self.dof = -1
self.dofSet = False
def set_test_range_bins(self):
bins = [0, -1]
if self.ts_range != []:
lTSR = len(self.ts_range)
err_mess = ("***\n Test stat range can only have two elements. "
"This has {}. Exiting...***\n".format(lTSR))
assert lTSR == 2, err_mess
xlo = self.ts_range[0]
xhi = self.ts_range[1]
err_mess = "***\n Test stat limits reversed. xlo must be < xhi. Exiting...***\n"
assert xlo < xhi, err_mess
# Find the bins corresponding to the test range requested
lobin = np.searchsorted(self.cause_axis, xlo)
hibin = np.searchsorted(self.cause_axis, xhi)
bins = [lobin, hibin]
self.has_ts_range = True
return bins
def get_array_range(self, dist1, dist2):
if self.has_ts_range:
NR1 = dist1[self.ts_bins[0]:self.ts_bins[1]]
NR2 = dist2[self.ts_bins[0]:self.ts_bins[1]]
return NR1, NR2
else:
return dist1.copy(), dist2.copy()
def pass_tol(self):
"""Function testing whether TS < tol
"""
pass_tol = self.stat < self.tol
return pass_tol
def set_dof(self, dof):
"""Set degrees of freedom
"""
if self.dof == -1:
self.dof = dof
def check_lengths(self, dist1, dist2):
"""Test for equal length distributions
"""
ln1 = len(dist1)
ln2 = len(dist2)
err_mess = ("Test Statistic arrays are not equal length. "
"{} != {}. Exiting...\n".format(ln1, ln2))
assert ln1 == ln2, err_mess
if not self.dofSet:
self.set_dof(ln1)
self.dofSet = True
def calc(self, dist1, dist2):
"""Undefined test statistics calculator
"""
raise NotImplementedError()
class Chi2(TestStat):
"""Reduced chi-squared test statistic
"""
def calc(self, dist1, dist2):
"""Calculate the test statistic between two input distributions
Parameters
----------
dist1 : array_like
Input distribution.
dist2 : array_like
Input distribution.
Returns
-------
stat : float
Test statistic
"""
dist1, dist2 = self.get_array_range(dist1, dist2)
self.check_lengths(dist1, dist2)
n1 = np.sum(dist1)
n2 = np.sum(dist2)
h_sum = dist1 + dist2
# Don't divide by 0...
h_sum[(h_sum < 1)] = 1.
h_dif = n2 * dist1 - n1 * dist2
h_quot = h_dif * h_dif / h_sum
stat = np.sum(h_quot)/(n1*n2)/self.dof
self.stat = stat
return stat
class BF(TestStat):
"""Bayes factor test statistic
Notes
-----
For details related to the Bayes fator see [1]_.
References
----------
.. [1] <NAME> and <NAME> and <NAME> and <NAME>.
"A Bayesian Approach to Comparing Cosmic Ray Energy Spectra".
*The Astrophysical Journal* 738 (1):82.
`<https://doi.org/10.1088/0004-637X/738/1/82>`_.
"""
def calc(self, dist1, dist2):
"""Calculate the test statistic between two input distributions
Parameters
----------
dist1 : array_like
Input distribution.
dist2 : array_like
Input distribution.
Returns
-------
stat : float
Test statistic
"""
dist1, dist2 = self.get_array_range(dist1, dist2)
self.check_lengths(dist1, dist2)
lnB = 0
n1 = np.sum(dist1)
n2 = np.sum(dist2)
nFactor = lgamma(n1+n2+2) - lgamma(n1+1) - lgamma(n2+1)
lnB += nFactor
for i in range(0, len(dist1)):
lnB += lgamma(dist1[i]+1) + lgamma(dist2[i]+1) - lgamma(dist1[i]+dist2[i]+2)
self.stat = lnB
return lnB
class RMD(TestStat):
"""Maximum relative difference test statistic
"""
def calc(self, dist1, dist2):
"""Calculate the test statistic between two input distributions
Parameters
----------
dist1 : array_like
Input distribution.
dist2 : array_like
Input distribution.
Returns
-------
stat : float
Test statistic
"""
dist1, dist2 = self.get_array_range(dist1, dist2)
self.check_lengths(dist1, dist2)
h_sum = dist1+dist2
h_sum[(h_sum < 1)] = 1.
h_dif = np.abs(dist1 - dist2)
h_quot = h_dif / h_sum
stat = np.max(h_quot)
self.stat = stat
return stat
class KS(TestStat):
"""Kolmogorov-Smirnov (KS) two-sided test statistic
"""
def calc(self, dist1, dist2):
"""Calculate the test statistic between two input distributions
Parameters
----------
dist1 : array_like
Input distribution.
dist2 : array_like
Input distribution.
Returns
-------
stat : float
Test statistic
"""
dist1, dist2 = self.get_array_range(dist1, dist2)
self.check_lengths(dist1, dist2)
n1 = np.sum(dist1)
n2 = np.sum(dist2)
cs1 = np.cumsum(dist1)/n1
cs2 = np.cumsum(dist2)/n2
len1 = len(dist1)
self.en = np.sqrt(len1/2)
stat = np.max(np.abs(cs1-cs2))
self.stat = stat
return stat
TEST_STATISTICS = {"chi2": Chi2,
"bf": BF,
"rmd": RMD,
"ks": KS,
}
def get_ts(name='ks'):
"""Convenience function for retrieving test statisitc calculators
Parameters
----------
name : {'ks', 'chi2', 'bf', 'rmd'}
Name of test statistic.
Returns
-------
ts : TestStat
Test statistics calculator
"""
if name in TEST_STATISTICS:
ts = TEST_STATISTICS[name]
return ts
else:
raise ValueError('Invalid test statistic, {}, entered. Must be '
'in {}'.format(name, TEST_STATISTICS.keys()))
```
#### File: pyunfold/tests/test_callbacks.py
```python
from __future__ import division, print_function
import numpy as np
import pytest
from scipy.interpolate import UnivariateSpline
from pyunfold.unfold import iterative_unfold
from pyunfold.callbacks import (Callback, CallbackList, Logger,
Regularizer, SplineRegularizer,
validate_callbacks, extract_regularizer,
setup_callbacks_regularizer)
@pytest.mark.parametrize('attr', ['on_unfolding_begin',
'on_unfolding_end',
'on_iteration_begin',
'on_iteration_end'])
def test_callback_attributes(attr):
assert hasattr(Callback(), attr)
@pytest.mark.parametrize('callbacks', [[Logger()], Logger()])
def test_logger(capsys, callbacks, example_dataset):
# Perform iterative unfolding
unfolded_results = iterative_unfold(data=example_dataset.data,
data_err=example_dataset.data_err,
response=example_dataset.response,
response_err=example_dataset.response_err,
efficiencies=example_dataset.efficiencies,
efficiencies_err=example_dataset.efficiencies_err,
return_iterations=True,
callbacks=callbacks)
# Get stdout and std err from iterative_unfold
out, err = capsys.readouterr()
# Build expected output
expected_output = ''
for row_index, row in unfolded_results.iterrows():
row_output = ('Iteration {}: ts = {:0.4f}, ts_stopping ='
' {}\n'.format(row_index + 1,
row['ts_iter'],
row['ts_stopping']))
expected_output += row_output
assert expected_output == out
def test_Logger_isinstance_Callback():
logger = Logger()
assert isinstance(logger, Callback)
def test_SplineRegularizer_isinstance_Regularizer():
spline_reg = SplineRegularizer()
assert isinstance(spline_reg, Regularizer)
def test_SplineRegularizer(example_dataset):
degree = 3
smooth = 20
spline_reg = SplineRegularizer(degree=degree, smooth=smooth)
unfolded_with_reg = iterative_unfold(data=example_dataset.data,
data_err=example_dataset.data_err,
response=example_dataset.response,
response_err=example_dataset.response_err,
efficiencies=example_dataset.efficiencies,
efficiencies_err=example_dataset.efficiencies_err,
return_iterations=True,
callbacks=[spline_reg])
unfolded_no_reg = iterative_unfold(data=example_dataset.data,
data_err=example_dataset.data_err,
response=example_dataset.response,
response_err=example_dataset.response_err,
efficiencies=example_dataset.efficiencies,
efficiencies_err=example_dataset.efficiencies_err,
return_iterations=True)
no_reg = unfolded_no_reg.iloc[0]['unfolded']
x = np.arange(len(no_reg), dtype=float)
spline = UnivariateSpline(x, no_reg, k=degree, s=smooth)
fitted_unfolded = spline(x)
np.testing.assert_allclose(unfolded_with_reg.iloc[0]['unfolded'],
fitted_unfolded)
def test_SplineRegularizer_groups(example_dataset):
degree = 3
smooth = 20
groups = np.empty_like(example_dataset.data)
groups[:len(groups) // 2] = 0
groups[len(groups) // 2:] = 1
spline_reg = SplineRegularizer(degree=degree, smooth=smooth, groups=groups)
unfolded_with_reg = iterative_unfold(data=example_dataset.data,
data_err=example_dataset.data_err,
response=example_dataset.response,
response_err=example_dataset.response_err,
efficiencies=example_dataset.efficiencies,
efficiencies_err=example_dataset.efficiencies_err,
return_iterations=True,
callbacks=[spline_reg])
unfolded_no_reg = iterative_unfold(data=example_dataset.data,
data_err=example_dataset.data_err,
response=example_dataset.response,
response_err=example_dataset.response_err,
efficiencies=example_dataset.efficiencies,
efficiencies_err=example_dataset.efficiencies_err,
return_iterations=True)
# Manually regularize each group independently
y_no_reg = unfolded_no_reg.iloc[0]['unfolded']
x = np.arange(len(y_no_reg), dtype=float)
fitted_unfolded_no_reg = np.empty(len(y_no_reg))
group_ids = np.unique(groups)
for group in group_ids:
group_mask = groups == group
x_group = x[group_mask]
y_group = y_no_reg[group_mask]
spline_group = UnivariateSpline(x_group, y_group, k=degree, s=smooth)
fitted_unfolded_group = spline_group(x_group)
fitted_unfolded_no_reg[group_mask] = fitted_unfolded_group
np.testing.assert_allclose(unfolded_with_reg.iloc[0]['unfolded'],
fitted_unfolded_no_reg)
def test_SplineRegularizer_groups_raises(example_dataset):
degree = 3
smooth = 20
groups = np.empty(len(example_dataset.data) - 1)
groups[:len(groups) // 2] = 0
groups[len(groups) // 2:] = 1
spline_reg = SplineRegularizer(degree=degree, smooth=smooth, groups=groups)
with pytest.raises(ValueError) as excinfo:
iterative_unfold(data=example_dataset.data,
data_err=example_dataset.data_err,
response=example_dataset.response,
response_err=example_dataset.response_err,
efficiencies=example_dataset.efficiencies,
efficiencies_err=example_dataset.efficiencies_err,
return_iterations=True,
callbacks=[spline_reg])
err_msg = ('Invalid groups array. There should be an entry '
'for each cause bin. However, got len(groups)={} '
'while there are {} cause bins.'.format(len(groups),
len(example_dataset.data)))
assert err_msg == str(excinfo.value)
def test_validate_callbacks():
callbacks = [Logger(), SplineRegularizer()]
assert validate_callbacks(callbacks) == callbacks
def test_validate_empty_callbacks():
assert validate_callbacks(None) == []
@pytest.mark.parametrize('callback', [Logger(), SplineRegularizer()])
def test_validate_callbacks_single_callback(callback):
validate_callbacks(callback) == [callback]
def test_validate_callbacks_raises():
callbacks = [Logger(), SplineRegularizer(), 'not a callback']
with pytest.raises(TypeError) as excinfo:
validate_callbacks(callbacks)
err_msg = 'Found non-callback object in callbacks: {}'.format(['not a callback'])
assert err_msg == str(excinfo.value)
def test_extract_regularizer_mutliple_raises():
callbacks = [SplineRegularizer(), SplineRegularizer()]
with pytest.raises(NotImplementedError) as excinfo:
extract_regularizer(callbacks)
err_msg = 'Multiple regularizer callbacks where provided.'
assert err_msg == str(excinfo.value)
def test_extract_regularizer_no_regularizer():
callbacks = [Logger()]
assert extract_regularizer(callbacks) is None
@pytest.mark.parametrize('callback', [SplineRegularizer()])
def test_extract_regularizer(callback):
callbacks = [Logger(), callback]
assert extract_regularizer(callbacks) == callback
def test_setup_callbacks_regularizer():
callbacks = [Logger(), SplineRegularizer()]
c, r = setup_callbacks_regularizer(callbacks)
assert isinstance(c, CallbackList)
assert len(c) == 1
assert c.callbacks[0] is callbacks[0]
assert r is callbacks[1]
def test_callbacklist_empty():
c = CallbackList()
assert c.callbacks == []
def test_callbacklist_callbacks():
logger = Logger()
reg = SplineRegularizer()
callbacks = [logger, reg]
c = CallbackList(callbacks=callbacks)
assert len(c) == len(callbacks)
assert all(i is j for i, j in zip(c.callbacks, callbacks))
def test_callbacklist_method_calls():
class MethodChecker(Callback):
def __init__(self):
super(Callback, self).__init__()
self.called_unfolding_begin = False
self.called_on_unfolding_end = False
self.called_on_iteration_begin = False
self.called_on_iteration_end = False
def on_unfolding_begin(self, status=None):
self.called_on_unfolding_begin = True
def on_unfolding_end(self, status=None):
self.called_on_unfolding_end = True
def on_iteration_begin(self, iteration, status=None):
self.called_on_iteration_begin = True
def on_iteration_end(self, iteration, status=None):
self.called_on_iteration_end = True
method_checker = MethodChecker()
c = CallbackList(method_checker)
c.on_iteration_begin(1)
assert method_checker.called_on_iteration_begin
c.on_iteration_end(1)
assert method_checker.called_on_iteration_end
c.on_unfolding_begin()
assert method_checker.called_on_unfolding_begin
c.on_unfolding_end()
assert method_checker.called_on_unfolding_end
```
#### File: pyunfold/tests/test_priors.py
```python
from __future__ import division, print_function
import pytest
import numpy as np
from pyunfold.priors import jeffreys_prior, setup_prior, uniform_prior
@pytest.mark.parametrize('prior', ['uniform',
123,
123.0])
def test_setup_prior_invalid_prior(prior):
with pytest.raises(TypeError) as excinfo:
setup_prior(prior)
expected_msg = ('prior must be either None or array_like, '
'but got {}'.format(type(prior)))
assert expected_msg == str(excinfo.value)
def test_setup_prior_non_normalized_raises():
prior = [1, 2, 3, 4]
with pytest.raises(ValueError) as excinfo:
setup_prior(prior)
expected_msg = ('Prior (which is an array of probabilities) does '
'not add to 1. sum(prior) = {}'.format(np.sum(prior)))
assert expected_msg == str(excinfo.value)
def test_setup_prior_negative_raises():
prior = [2, 0, -1]
with pytest.raises(ValueError) as excinfo:
setup_prior(prior)
expected_msg = ('Input prior has negative values. Since the values '
'of prior are interpreted as probabilities, they '
'cannot be negative.')
assert expected_msg == str(excinfo.value)
def test_jeffreys_prior():
causes = np.linspace(5, 10, 4)
# Calculate expected prior
ln_factor = np.log(causes.max() / causes.min())
prior = 1 / (ln_factor * causes)
prior = prior / np.sum(prior)
np.testing.assert_allclose(prior, jeffreys_prior(causes=causes))
def test_jeffreys_prior_normalized():
causes = np.array([0.5, 1.5])
prior = jeffreys_prior(causes=causes)
np.testing.assert_allclose(prior.sum(), 1)
@pytest.mark.parametrize('type_', [list, tuple, np.array])
def test_jeffreys_prior_array_like(type_):
causes = type_([1, 2, 3, 4, 5])
jeffreys_prior(causes=causes)
@pytest.mark.parametrize('num_causes', [1, 7, 100])
def test_uniform_prior(num_causes):
prior = uniform_prior(num_causes)
# Correct number of cause bins
assert len(prior) == num_causes
# Every bin has same probability
assert len(np.unique(prior))
# Sum of probabilities add to one
np.testing.assert_allclose(np.sum(prior), 1)
```
#### File: pyunfold/pyunfold/utils.py
```python
from __future__ import division, print_function
import numpy as np
def assert_same_shape(*arrays):
"""Checks that each input array_like objects are the same shape
"""
arrays = cast_to_array(*arrays)
shapes = [array.shape for array in arrays]
unique_shapes = set(shapes)
if not len(unique_shapes) == 1:
raise ValueError('Multiple shapes found: {}'.format(unique_shapes))
def cast_to_array(*arrays):
"""Casts input arrays to numpy.ndarray objects
Note that no copy is made if an input array is already a numpy.ndarray.
Parameters
----------
arrays : array_like
Input array_like objects to be cast to numpy arrays.
Returns
-------
output : list
List of casted numpy arrays.
Examples
--------
>>> import numpy as np
>>> a_original = [1, 2, 3]
>>> b_original = np.array([4.5, 2.1, 900])
>>> a, b = cast_to_array(a_original, b_original)
>>> a
array([1, 2, 3])
>>> b
array([ 4.5, 2.1, 900. ])
>>> b is b_original
True
"""
if len(arrays) == 1:
output = np.asarray(arrays[0])
else:
output = map(np.asarray, arrays)
return output
def none_to_empty_list(*args):
"""Replaces None inputs with an empty list
Examples
--------
Single input case
>>> none_to_empty_list(None)
[]
Multiple input case
>>> a, b, c = None, 'woo', 34
>>> none_to_empty_list(a, b, c)
[[], 'woo', 34]
"""
outputs = []
for arg in args:
outputs.append(arg if arg is not None else [])
if len(outputs) == 1:
return outputs[0]
else:
return outputs
def safe_inverse(x):
"""Safely inverts the elements in x
Parameters
----------
x : array_like
Input array to take the inverse of (i.e. 1 / x).
Returns
-------
inv : numpy.ndarray
Inverse of input array with inf set to zero.
Examples
--------
>>> a = [1, 2, 3, 0, 4]
>>> safe_inverse(a)
array([1. , 0.5 , 0.33333333, 0. , 0.25 ])
"""
x = np.asarray(x)
is_zero = x == 0
with np.errstate(divide='ignore'):
inv = 1 / x
inv[is_zero] = 0
return inv
``` |
{
"source": "jlramalheira/sudoku",
"score": 4
} |
#### File: sudoku/sudoku/coloring.py
```python
import networkx as nx
from math import sqrt
from queue import *
import random
def welsh_powell(graph):
"""Runs the Welsh-Powell algorithm to color a graph.
Arguments:
graph (networkx.Graph): a graph.
"""
def _welsh(graph):
for node in graph.node:
if not graph.node[node]['fixed']:
for neighbor in graph.neighbors(node):
if (graph.node[neighbor]['fixed']):
try:
graph.node[node]['label'].remove(
graph.node[neighbor]['label'])
except:
pass
def _update(graph):
for node in graph.node:
if (not graph.node[node]['fixed'] and
len(graph.node[node]['label']) == 1):
graph.node[node]['fixed'] = True
graph.node[node]['label'] = graph.node[node]['label'][0]
def _clear(graph):
for node in graph.node:
if (graph.node[node]['fixed'] and
type(graph.node[node]['label']) is not int):
graph.node[node]['fixed'] = False
def _engage(graph):
for i in range(size):
for j in range(size):
name = '{}{}'.format(i, j)
if not graph.node[name]['fixed']:
graph.node[name]['fixed'] = True
_welsh(graph)
_update(graph)
_clear(graph)
size = int(sqrt(len(graph.node)))
for node in graph.node:
if (graph.node[node]['label'] is None):
graph.node[node]['label'] = [(x + 1) for x in range(size)]
for i in range(size):
_engage(graph)
_welsh(graph)
_update(graph)
return graph
def sequencial_coloring(graph):
"""Runs the Sequencial Coloring algorithm to color a graph.
Arguments:
graph (networkx.Graph): a graph.
"""
def is_label_possible(graph, node, label):
for neighbor in graph.neighbors(node):
if graph.node[neighbor]['label'] == label:
return False
return True
size = int(sqrt(len(graph.node)))
labels = [(x + 1) for x in range(size)]
for label in labels:
for node in graph.node:
if is_label_possible(graph, node, label):
graph.node[node]['label'] = label
return graph
def class_coloring(graph):
"""Runs the Class Coloring algorithm to color a graph.
Arguments:
graph (networkx.Graph): a graph.
"""
def _init_classes_and_candidates(graph,classes,candidates):
for node in graph.node:
if graph.node[node]['fixed']:
classes[graph.node[node]['label'] - 1].add(node)
else:
candidates.append(node)
def _coloring(graph,candidates,classes):
while len(candidates) != 0:
v = candidates.pop()
for i in range(len(classes)):
neigh_set = set(graph.neighbors(v))
if len(classes[i].intersection(neigh_set)) == 0:
classes[i].add(v)
graph.node[v]['label'] = i + 1
break
size = int(sqrt(len(graph.node)))
classes = [set() for x in range(size)]
candidates = []
_init_classes_and_candidates(graph,classes,candidates)
_coloring(graph,candidates,classes)
return graph
def class_coloring_backtracking(graph):
"""Runs the Class Coloring Backtracking algorithm to color a graph.
Arguments:
graph (networkx.Graph): a graph.
"""
def _candidate_was_colored(graph,node):
return graph.node[node]['label'] != None
def _remove_from_class(last_colored,classes):
for c in classes:
if last_colored in c:
c.remove(last_colored)
def _init_classes_and_candidates(graph, classes, candidates):
for node in graph.node:
if graph.node[node]['fixed']:
classes[graph.node[node]['label'] - 1].add(node)
else:
candidates.append(node)
def _coloring(graph, candidates, classes, colored_stack):
while len(candidates) != 0:
v = candidates.pop()
init_index = 0 if type(graph.node[v]['label']) != int else graph.node[v]['label']
graph.node[v]['label'] = None
for i in range(init_index,len(classes)):
neigh_set = set(graph.neighbors(v))
if len(classes[i].intersection(neigh_set)) == 0:
classes[i].add(v)
graph.node[v]['label'] = i + 1
colored_stack.append(v)
break
if not _candidate_was_colored(graph,v):
candidates.append(v)
last_colored = colored_stack.pop()
_remove_from_class(last_colored,classes)
candidates.append(last_colored)
size = int(sqrt(len(graph.node)))
classes = [set() for x in range(size)]
candidates = []
colored_stack = []
_init_classes_and_candidates(graph,classes,candidates)
_coloring(graph,candidates,classes,colored_stack)
#raise NotImplementedError('')
return graph
def dsatur(graph):
"""Runs the Degree of Saturation heuristic algorithm to color a graph.
Arguments:
graph (networkx.Graph): a graph.
"""
def _saturation(graph):
candidates = 0
for node in graph.node:
if not graph.node[node]['fixed']:
candidates+=1
for neighbor in graph.neighbors(node):
if (graph.node[neighbor]['fixed']):
graph.node[node]['label'].add(
graph.node[neighbor]['label'])
return candidates
def _find_highest_saturation(graph):
highest_saturation = -1
for node in graph.node:
if (not graph.node[node]['fixed'] and
type(graph.node[node]['label']) is not int):
if (len(graph.node[node]['label']) > highest_saturation):
highest_saturation = len(graph.node[node]['label'])
highest_node = node
return highest_node
def _find_smallest_color(graph,node):
size = int(sqrt(len(graph.node)))
for i in range(1,size+1):
if not (i in graph.node[node]['label']):
return i
def _update_saturation(graph,node):
for neighbor in graph.neighbors(node):
if ( not graph.node[neighbor]['fixed'] and
type(graph.node[neighbor]['label']) is not int):
graph.node[neighbor]['label'].add(
graph.node[node]['label'])
for node in graph.node:
if (graph.node[node]['label'] is None):
graph.node[node]['label'] = set()
candidates = _saturation(graph)
while(candidates>0):
highest_node = _find_highest_saturation(graph)
color = _find_smallest_color(graph,highest_node)
if(color != None):
graph.node[highest_node]['label'] = color
_update_saturation(graph,highest_node)
candidates-=1
else:
break
return graph
def bfs_heuristic(graph,max_iterations):
"""Runs the Breadth First Search heuristic algorithm to color a graph.
Arguments:
graph (networkx.Graph): a graph.
"""
def _clear(graph):
for node in graph.node:
if not graph.node[node]['fixed']:
graph.node[node]['label'] = None
def _is_possible_color(graph,nodes,color):
for node in nodes:
if graph.node[node]['label'] == color:
return False
return True
def _coloring(graph,node):
size = int(sqrt(len(graph.node)))
colors = []
for i in range(size):
if _is_possible_color(graph,graph.neighbors(node),i+1):
colors.append(i+1)
if len(colors) > 0:
r = random.SystemRandom()
graph.node[node]['label'] = r.choice(colors)
return True
return False
def _bfs(graph,node):
q = Queue()
if not graph.node[node]['fixed']:
if not _coloring(graph,node):
return
q.put(node)
while not q.empty():
v = q.get()
neighbors = graph.neighbors(v)
for neighbor in neighbors:
if graph.node[neighbor]['label'] == None:
if not _coloring(graph,neighbor):
return
q.put(neighbor)
def _is_valid_solution(graph):
for node in graph.node:
if graph.node[node]['label'] == None:
return False
return True
for i in range(max_iterations):
r = random.SystemRandom()
_bfs(graph,r.choice(graph.nodes()))
if _is_valid_solution(graph):
break
else:
_clear(graph)
for i in range(9):
for j in range(9):
print (graph.node['{}{}'.format(i,j)]['label'],end=' ')
print()
return graph
def dfs_heuristic(graph,max_iterations):
"""Runs the Depth First Search heuristic algorithm to color a graph.
Arguments:
graph (networkx.Graph): a graph.
"""
def _clear(graph):
for node in graph.node:
if not graph.node[node]['fixed']:
graph.node[node]['label'] = None
def _is_possible_color(graph,nodes,color):
for node in nodes:
if graph.node[node]['label'] == color:
return False
return True
def _coloring(graph,node):
size = int(sqrt(len(graph.node)))
colors = []
for i in range(size):
if _is_possible_color(graph,graph.neighbors(node),i+1):
colors.append(i+1)
if len(colors) > 0:
r = random.SystemRandom()
graph.node[node]['label'] = r.choice(colors)
return True
return False
def _dfs(graph,node):
stack = []
if not graph.node[node]['fixed']:
if not _coloring(graph,node):
return
stack.append(node)
while not len(stack) == 0:
v = stack.pop()
neighbors = graph.neighbors(v)
for neighbor in neighbors:
if graph.node[neighbor]['label'] == None:
if not _coloring(graph,neighbor):
return
stack.append(neighbor)
def _is_valid_solution(graph):
for node in graph.node:
if graph.node[node]['label'] == None:
return False
return True
for i in range(max_iterations):
r = random.SystemRandom()
_dfs(graph,r.choice(graph.nodes()))
if _is_valid_solution(graph):
break
else:
_clear(graph)
for i in range(9):
for j in range(9):
print (graph.node['{}{}'.format(i,j)]['label'],end=' ')
print()
return graph
```
#### File: sudoku/tests/coloring_tests.py
```python
import unittest
import os.path
import sudoku.io
import sudoku.coloring
class ColoringTests(unittest.TestCase):
def test_welsh_powell(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/sample-2.sdk')
graph = sudoku.io.read(filepath)
graph_solved = sudoku.coloring.welsh_powell(graph)
def test_sequencial_coloring(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/25/sample-0.sdk')
graph = sudoku.io.read(filepath)
graph_solved = sudoku.coloring.sequencial_coloring(graph)
def test_class_coloring(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/sample-2.sdk')
graph = sudoku.io.read(filepath)
graph_solved = sudoku.coloring.class_coloring(graph)
def test_class_coloring_backtracking(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/sample-2.sdk')
graph = sudoku.io.read(filepath)
graph_solved = sudoku.coloring.class_coloring_backtracking(graph)
def test_dsatur(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/solved-0.sdk')
graph = sudoku.io.read(filepath)
graph_solved = sudoku.coloring.dsatur(graph)
def test_bfs_heuristic(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/sample-2.sdk')
graph = sudoku.io.read(filepath)
max_iterations = 100000
graph_solved = sudoku.coloring.bfs_heuristic(graph,max_iterations)
def test_dfs_heuristic(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/sample-2.sdk')
graph = sudoku.io.read(filepath)
max_iterations = 100000
graph_solved = sudoku.coloring.dfs_heuristic(graph,max_iterations)
``` |
{
"source": "jlramirez/index-herbariorum-python-client",
"score": 3
} |
#### File: index-herbariorum-python-client/tests/client_test.py
```python
import os
import unittest
from indexherbariorum.client import IndexHerbariorumApi
class IndexHerbariorumApiTest(unittest.TestCase):
"""Tests for Index Herbariorum API"""
def test_countries(self):
api = IndexHerbariorumApi()
countries = api.countries()
self.assertEqual(countries['meta']['code'], 200)
def test_staff(self):
api = IndexHerbariorumApi()
staff = api.staff(rq={'code': 'ny', 'sort': 'lastName'})
self.assertEqual(staff['meta']['code'], 200)
with self.assertRaises(Exception):
api.staff(rq={'download': 'yes'})
def test_institutions(self):
api = IndexHerbariorumApi()
institutions = api.institutions(rq={'country': 'italy', 'city': 'rome', 'sort': 'code'})
self.assertEqual(institutions['meta']['code'], 200)
with self.assertRaises(Exception):
api.institutions(rq={'download': 'yes'})
def test_institution(self):
api = IndexHerbariorumApi()
institution = api.institution('ala')
self.assertEqual(institution['code'], 'ALA')
def test_count_countries(self):
api = IndexHerbariorumApi()
count = api.count_countries()
self.assertIsInstance(count, int)
self.assertGreaterEqual(count, 0)
def test_count_staff(self):
api = IndexHerbariorumApi()
count = api.count_staff(rq={'country': 'spain', 'correspondent': 'yes'})
self.assertIsInstance(count, int)
self.assertGreaterEqual(count, 0)
def test_count_institutions(self):
api = IndexHerbariorumApi()
count = api.count_institutions(rq={'country': 'bolivia'})
self.assertIsInstance(count, int)
self.assertGreaterEqual(count, 0)
def test_download(self):
api = IndexHerbariorumApi()
api.download('institutions', rq={'country': 'italy', 'city': 'rome'})
api.download('staff', rq={'state': 'new york', 'correspondent': 'yes'}, filename='staff.csv')
self.assertTrue(os.path.exists('index_herbariorum.csv'))
self.assertTrue(os.path.exists('staff.csv'))
with self.assertRaises(Exception):
api.download('staff')
api.download('countries')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jlrandulfe/drone-swarm",
"score": 2
} |
#### File: src/controller/p_controller.py
```python
import numpy as np
import rospy
import geometry_msgs.msg
import std_msgs.msg
# Local libraries
from controller import array_operations
from controller import error_functions
from pycopter.srv import DroneSwarmMultiArray
def dist(vector):
length = np.sqrt(pow(vector[0],2)+pow(vector[1],2))
return length
class PControlNode():
def __init__(self, kp=0.02):
self.start = False
self.new_it = False
self.n_drones = 0
self.timestamp = 0
self.movement = "static"
self.velocity = np.array([0,0]) # [m/s, m/s]
self.sin_amplitude = np.array([0, 0]) # [m, m]
self.sin_frequency = 0.0 # [Hz]
self.timestamp = 0 # [ms]
self.kp = kp
# Variables for getting the settling time
self.t1 = 0.0
self.t2 = 0.0
self.init_error = 0.0
self.set_time = 0.0
# Errors matrix
self.errors = np.array([[0, 1, 2],[1, 0, 3],[2, 3, 0]])
self.abs_errors = self.errors.copy()
self.system_error = 0.0
self.predicted_rel_positions = np.array([])
self.predicted_distances = np.array([])
self.desired_distances = np.array([])
# Control variables matrix
self.control_u = np.array([])
# Kalman filter topic susbscribers
rospy.Subscriber("kalman/pos_estimation",
std_msgs.msg.Float64MultiArray,
self.kalman_callback, queue_size=1)
# Drone controller topic publisher
self.control_var_pub = rospy.Publisher(
"controller/control_value",
std_msgs.msg.Float64MultiArray,
queue_size=1)
self.errors_pub = rospy.Publisher(
"controller/errors",
std_msgs.msg.Float64MultiArray,
queue_size=1)
return
def kalman_callback(self, data):
"""
Calculate and send the control variable of the drones
The control is done with a P controller fed with the actual
distances between the drones, which are obtained in the Kalman
node.
It will not work until the desired distances are obtained from
the pattern generator.
"""
# Can not go through the routine without the desired distance
# been given beforehand.
if not self.start:
return
self.timestamp = data.layout.data_offset
# Get the relative positions from the Kalman node.
self.predicted_rel_positions = array_operations.multiarray2np(data)
self.new_it = True
return
def pat_gen_start(self):
"""
Store the desired distances, and allow the controller to start
"""
x_value, y_value = 0, 0
try:
setup_pycopter = rospy.ServiceProxy("supervisor/kalman",
DroneSwarmMultiArray)
resp = setup_pycopter(True)
self.n_drones = resp.n_rows
x_value = resp.param1
y_value = resp.param2
freq = resp.param3
movement = resp.param4
if resp.param6 > 0.0:
self.kp = resp.param6
except rospy.ServiceException as e:
print("Service call failed: {}".format(e))
return -1
self.desired_distances = array_operations.multiarray2np_sqr(resp)
if movement == 1:
self.movement = "static"
self.velocity = np.array([0, 0])
self.sin_amplitude = np.array([0, 0])
elif movement == 2:
self.movement = "linear"
self.velocity = np.array([x_value, y_value])
self.sin_amplitude = np.array([0, 0])
elif movement == 3:
self.movement = "sinusoidal"
self.velocity = np.array([0, 0])
self.sin_amplitude = np.array([x_value, y_value])
self.sin_frequency = freq
else:
raise ValueError("Non recognized movement int: {}".format(movement))
rospy.loginfo("\n{}".format(self.desired_distances))
# self.desired_distances = array_operations.multiarray2np_sqr(data.data)
self.start = True
rospy.loginfo("Controller: Received formation from supervisor.")
return 0
def gradient_descent_control(self):
"""
Apply gradient descent for finding the control action
For a given N number of agents, calculate the control action so
it minimizes the accumulated error of the drones positions.
"""
# Calculate the predicted distances between the drones. Then, get the
# errors to the desired distances.
predicted_distances = np.linalg.norm(self.predicted_rel_positions,
axis=2)
self.predicted_distances = predicted_distances
self.errors = error_functions.simple_differences(
predicted_distances, self.desired_distances)
# Unit vectors calculation. Create a virtual axis so the division is
# dimension meaningful.
unit_vectors = np.zeros_like(self.predicted_rel_positions)
np.divide(self.predicted_rel_positions, predicted_distances[:,:,None],
out=unit_vectors, where=predicted_distances[:,:,None]!=[0,0])
self.abs_errors = self.errors.copy()
# Calculate and send the final control variable
vectorial_errors = self.errors[:, :, None] * unit_vectors
self.system_error = np.linalg.norm(vectorial_errors, axis=2).sum()
self.errors = (vectorial_errors).sum(axis=1)
self.control_u = np.clip(self.errors * self.kp, -1, 1)
# Settling time calculations
if self.init_error == 0.0:
self.init_error = self.system_error
if self.t1 == 0.0 and self.system_error < 0.9*self.init_error:
self.t1 = self.timestamp
if self.t2 == 0.0 and self.system_error < 0.1*self.init_error:
self.t2 = self.timestamp
self.set_time = self.t2 - self.t1
# Print system error and settling time
rospy.loginfo("System error: {}".format(self.system_error))
rospy.loginfo("System error percentage: {}".format((self.system_error/self.init_error)*100))
rospy.loginfo("Settling time: {}".format(self.set_time))
return
def set_leader_velocity(self):
if self.movement == "static":
pass
elif self.movement == "linear":
for i in range(self.n_drones):
self.control_u[i] += self.velocity
elif self.movement == "sinusoidal":
self.control_u[0] += self.sin_amplitude * np.sin(
0.01*self.sin_frequency * (self.timestamp/1000.0))
else:
raise ValueError("Unrecognized movement type")
return
def run(self):
"""
Main routine. Wait for initialization and then do the main loop
It waits until the supervisor node service is active. Then, it
requests the desired formation from it.
Afterwards, it enters the main loop, where it continuously
waits for position predictions. After receiving a prediction, it
calculates the control action by using a gradient-descent based
controller. Finally, it applies to the leader of the formation
the desired movement of the swarm on top of the control action.
"""
rospy.loginfo("Controller started. Waiting for a desired formation")
rospy.wait_for_service("/supervisor/kalman")
rospy.loginfo("Online")
# Connect to the supervisor service and get the desired formation.
self.pat_gen_start()
# Main loop. Wait for predictions and calculate the control action
rate = rospy.Rate(200)
while not rospy.is_shutdown():
if self.start and self.new_it:
self.gradient_descent_control()
self.set_leader_velocity()
self.control_var_pub.publish(array_operations.np2multiarray(
self.control_u))
self.errors_pub.publish(array_operations.np2multiarray(
self.abs_errors, extra_val=self.system_error))
rospy.logdebug("Kalman: published Z ")
rospy.logdebug("Controller: published U ")
for i in range(self.n_drones):
rospy.logdebug("{}".format(self.control_u[i]))
self.new_it = False
rate.sleep()
rospy.spin()
return
def main():
# Instantiate the error_estimator node class and run it
controller = PControlNode()
controller.run()
return
```
#### File: src/kalman_filter/array_operations.py
```python
import numpy as np
import rospy
import geometry_msgs.msg
import std_msgs.msg
# Local libraries
def np2multiarray(data):
"""
Convert a 2D square numpy.array into a Float64MultiArray msg
"""
n_drones = data.shape[0]
# Define the 2 dimensions of the array.
dim_1 = std_msgs.msg.MultiArrayDimension(label="drone_n", size=n_drones,
stride=n_drones**2)
dim_2 = std_msgs.msg.MultiArrayDimension(label="drone_n", size=n_drones,
stride=n_drones)
# Create the layout of the message, necessary for deserializing it.
layout = std_msgs.msg.MultiArrayLayout()
layout.dim.append(dim_1)
layout.dim.append(dim_2)
# Create the output message with the data and the created layout.
message = std_msgs.msg.Float64MultiArray()
message.layout = layout
message.data = data.reshape(data.size).tolist()
return message
def multiarray2np(data):
"""
Convert a Float64MultiArray msg into a 2D square numpy.array
"""
np_data = np.array(data.data)
# Assuming that the output is a square matrix simplifies the problem.
dim_size = int(np.sqrt(np_data.size))
data_array = np_data.reshape([dim_size, dim_size])
return data_array
```
#### File: src/kalman_filter/kalman.py
```python
import numpy as np
from scipy import linalg as la
from matplotlib import pyplot as pl
from matplotlib import mlab as mlab
class Kalman:
def __init__(self):
# Kalman matrices init!
# Observation matrix H 1/d * [ px, py ] JACOBIAN because we've non linear measure
self.H = np.array([[1.0, 1.0]])
# time step
self.dt = 5e-2
# distance sensor noise
self.radar_sigma = 0.1
# measurement covariance matrix
self.R = np.array([[0.01],
[0.01]])
# States p01_x and p01_y
self.state_X = np.array([[0.0, 0.0]]).T
# P Covariance matrix for the position meas
self.cov_P = np.array([[0.5, 0.0],
[0.0, 0.5]])
# predicted state
self.predict_state_X = np.array([[0.0, 0.0]]).T
self.predict_cov_P = np.array([[0.0, 0.0],
[0.0, 0.0]])
self.F = np.array([[1.0, 0.0],
[0.0, 1.0]])
# Matrix G is for determine the dynamics of the states based on the sensor measures in this case velocity to
# estimate position.!
self.G = np.array([[self.dt, 0.0],
[0.0, self.dt]])
# variance vx and vy
# vel_sigma = np.array([[0.01, 0],
# [0, 0.01]])
self.vel_sigma = 0.1
# self.Q = np.outer(self.G*vel_sigma, vel_sigma*self.G.transpose())
self.Q = (self.G * self.vel_sigma).dot(self.vel_sigma * self.G.transpose())
self.K = np.array([[0.1, 0.1]]).T
# self.i = 0
self.err_class = 0.0
self.start_prediction_flag = False
# pl.ion()
# self.fig, self.axis = pl.subplots(3, 1)
def predict(self, state_sim, relative_velocities, timestamp):
print('STATE BEFORE FLAG', self.state_X)
if not self.start_prediction_flag:
self.state_X = state_sim + np.random.rand(2, 1) * 0.5
self.start_prediction_flag = True
print('STATE AFTER FLAG: ', self.state_X)
self.dt = timestamp
# self.G = np.array([[self.dt, 0],
# [0, self.dt]])
# self.Q = (self.G * self.vel_sigma).dot(self.vel_sigma * self.G.transpose())
# print("*****DT****:: ", self.dt)
# self.G = np.array([[self.dt, 0],
# [0, self.dt]])
# print(" PREDICTION ")
# print("\n")
# predict the position in the plane of the drone
# How the position evolvers -> dynamics
# self.next_pos_x = self.state_X[0] + relative_velocities[0] * self.dt
# self.next_pos_y = self.state_X[1] + relative_velocities[1] * self.dt
# gaussian distributions for position in x and y they are stochastic variables. That is why we estimate position
# and we calculate the variance of that estimation. mean + var : normal distr
# print(" F size: ", self.F.size)
# print("\n")
# print(" state_X size: ", self.state_X.size)
# print("\n")
# print(" G: ", self.G)
# print("\n")
# print(" rel_velocities size: ", relative_velocities.shape)
# print("\n")
# print(" first mult size: ", self.F.dot(self.state_X).shape)
# print("\n")
# print(" second mult : ", self.G.dot(relative_velocities).reshape(2, 1))
# print("\n")
# print(" Q size: ", self.Q.size)
# print("\n")
# self.state_X = state_sim
self.predict_state_X = self.F.dot(self.state_X) + self.G.dot(relative_velocities.reshape(2, 1))
self.predict_cov_P = self.F.dot(self.cov_P).dot(self.F.transpose()) + self.Q
self.state_X = self.predict_state_X
self.cov_P = self.predict_cov_P
# print(" predict_State : ", self.state_X)
# print("\n")
# print(" predict_cov_P : ", self.cov_P)
# print("\n")
def distance(self, x, y):
# norm distance calculation because we have a radio frequency sensor that measures the distance between drones
sum_sq = (x * x) + (y * y)
d = np.sqrt(sum_sq)
return d
def update(self, state_sim, distance_sensor):
# print(" UPDATE ") # STEP K
# print("\n")
# correct the KF
# print(" predict_State size: ", self.predict_state_X.size)
# print("\n")
# previous_state = self.predict_state_X
# previous_cov = self.predict_cov_P
# due the fact that the distance has a non linear relation with the position we calculate the jacobian
# scalar * 2D array
# print("STATE SIMULATOR: ", state_sim)
dist = (self.distance(self.state_X[0][0], self.state_X[1][0]))
# print(" statex: ", self.state_X[0][0])
# print("\n")
if dist.any() == 0:
pass
else:
self.H = (1 / dist) * self.state_X.transpose()
# print(" H ", self.H)
# print("\n")
# print(" Previous state: ", previous_state)
# print("\n")
# print(" H shape: ", self.H.shape)
# print("\n")
# print(" H : ", self.H)
# print("\n")
# print(" previous_State size: ", previous_state.shape)
# print("\n")
# print(" first mult size: ", (self.H * self.radar_sigma).shape)
# print("\n")
# print(" second mult size: ", (self.radar_sigma * self.H.reshape(2,1)).shape)
# print("\n")
# print(" H_trans: ", self.H.transpose().shape)
# print("\n")
self.R = (self.H * self.radar_sigma).dot((self.radar_sigma * self.H.reshape(2, 1)))
# print(" R size: ", self.R.shape)
# print("\n")
# print(" R: ", self.R)
# print("\n")
S = self.H.dot(self.cov_P).dot(self.H.reshape(2, 1)) + self.R
# print(" S size: ", S.shape)
# print("\n")
# print(" P : ", self.cov_P)
# print("\n")
# print(" S : ", S)
# print("\n")
# print(" first mult size: ", (self.H * self.radar_sigma))
# print("\n")
if np.size(S) == 1: ## WHATS WRONG HERE?
self.K = self.cov_P.dot(self.H.reshape(2, 1)) / S
# print(" P * Ht size: ", (self.cov_P.dot(self.H.reshape(2, 1))).shape)
# print("\n")
# print(" P * Ht : ", (self.cov_P.dot(self.H.reshape(2, 1))))
# print("\n")
# print(" K size: ", self.K.shape)
# print("\n")
# print(" K : ", self.K)
# print("\n")
# print(" prueba2: ", self.H.dot(self.cov_P))
# print("\n")
# print(" prueba1: ", np.outer(self.K, self.H.dot(self.cov_P)))
# print("\n")
self.cov_P = self.cov_P - np.outer(self.K, self.H.dot(self.cov_P))
else:
self.K = self.cov_P.dot(self.H.reshape(2, 1)).dot(la.inv(S))
self.cov_P = self.cov_P - np.outer(self.K, self.H.dot(self.cov_P))
self.state_X = self.state_X + self.K * (self.error(distance_sensor))
# print("DISTANCE SENSOR: ", distance_sensor)
# print(" H : ", self.H)
# print("\n")
# print(" cov_P: ", self.cov_P)
# print("\n")
# print(" state_X: ", self.state_X)
# print("\n")
# print(" cov_P: ", self.cov_P)
# print("\n")
def error(self, distance_sensor):
# for tracking the error: here we compare the distance that we obtain with the sensor vs the distance calculated
# by the position estimation.
dist_estimation = self.distance(self.state_X[0][0], self.state_X[1][0])
err = distance_sensor - dist_estimation
# print('err:', err)
self.err_class = err
# self.err_plot[self.i] = distance_sensor - dist_estimation
# rmse = np.sqrt(dist_estimation*dist_estimation - distance_sensor*distance_sensor)
# self.i = self.i +1
# print(" Error: ", err)
# print("\n")
# print(" RMSE CALC: ", rmse)
# print("\n")
return err
def variance_calculation(self, P):
norm_cov = la.norm(P)
# print('NORM COVARIANCE: ', norm_cov)
def animation(self, it, tf, time, err_plt):
err_plt[it] = self.err_class
self.fig.tight_layout()
xpl = 10
ypl = 1
y_y_lim = 10
xlimits0 = np.linspace(-2.5, xpl, 300) # Limits for pos in axis 'x' and 'y'
xlimits1 = np.linspace(-2.5, y_y_lim, 300)
# Plot of X position relative
self.axis[0].clear()
self.axis[0].grid("on")
pxgauss = mlab.normpdf(xlimits0, self.state_X[0][0], np.sqrt(self.cov_P[0, 0]))
self.axis[0].plot(xlimits0, pxgauss)
self.axis[0].fill_between(xlimits0, pxgauss, color='cyan')
self.axis[0].set_xlim([-xpl, xpl])
self.axis[0].set_ylim([0, ypl])
self.axis[0].set_yticks([0, 0.5 * ypl, ypl])
self.axis[0].set_title("Estimated relative X position")
self.axis[0].set_xlabel("[m]")
self.axis[0].arrow(0, 0, 0, ypl, \
head_width=0.05, head_length=0.1, fc='k', ec='k')
# Plot of Y position relative
self.axis[1].clear()
self.axis[1].grid("on")
pygauss = mlab.normpdf(xlimits1, self.state_X[1], np.sqrt(self.cov_P[1, 1]))
self.axis[1].plot(xlimits1, pygauss)
self.axis[1].fill_between(xlimits1, pygauss, color='cyan')
self.axis[1].set_xlim([-xpl, xpl])
self.axis[1].set_ylim([0, ypl])
self.axis[1].set_yticks([0, 0.5 * ypl, ypl])
self.axis[1].set_title("Estimated relative Y velocity")
self.axis[1].set_xlabel("[m]")
self.axis[1].arrow(0, 0, 0, ypl, \
head_width=0.05, head_length=0.1, fc='k', ec='k')
# Plot of distance error
self.axis[2].clear()
self.axis[2].grid("on")
self.axis[2].plot(time[0:it], err_plt[0:it], 'r')
self.axis[2].set_xlim([0, tf])
self.axis[2].set_ylim([0, 1])
self.axis[2].set_title("Error in distance estimation")
self.axis[2].set_xlabel("[m]")
```
#### File: offboardnode/src/offboard_node.py
```python
import rospy
import mavros
from mavros.utils import *
from mavros import setpoint as SP
import mavros.setpoint
import mavros.command
import mavros_msgs.msg
import mavros_msgs.srv
import sys
import signal
from geometry_msgs.msg import Vector3
import math
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
current_pose = Vector3()
UAV_state = mavros_msgs.msg.State()
def _state_callback(topic):
UAV_state.armed = topic.armed
UAV_state.connected = topic.connected
UAV_state.mode = topic.mode
UAV_state.guided = topic.guided
def _setpoint_position_callback(topic):
pass
def _local_position_callback(topic):
print(topic.pose.position.z)
pass
def _set_pose(pose, x, y, z):
pose.pose.position.x = x
pose.pose.position.y = y
pose.pose.position.z = z
pose.header = mavros.setpoint.Header(
frame_id="att_pose",
stamp=rospy.Time.now())
def update_setpoint():
pass
def main():
rospy.init_node('default_offboard', anonymous=True)
rate = rospy.Rate(20)
mavros.set_namespace('/mavros')
# setup subscriber
# /mavros/state
state_sub = rospy.Subscriber(mavros.get_topic('state'),
mavros_msgs.msg.State, _state_callback)
# /mavros/local_position/pose
local_position_sub = rospy.Subscriber(mavros.get_topic('local_position', 'pose'),
SP.PoseStamped, _local_position_callback)
# /mavros/setpoint_raw/target_local
setpoint_local_sub = rospy.Subscriber(mavros.get_topic('setpoint_raw', 'target_local'),
mavros_msgs.msg.PositionTarget, _setpoint_position_callback)
# setup publisher
# /mavros/setpoint/position/local
setpoint_local_pub = mavros.setpoint.get_pub_position_local(queue_size=10)
velocity_pub = mavros.setpoint.get_pub_velocity_cmd_vel(queue_size=10)
# setup service
# /mavros/cmd/arming
set_arming = rospy.ServiceProxy('/mavros/cmd/arming', mavros_msgs.srv.CommandBool)
# /mavros/set_mode
set_mode = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode)
setpoint_msg = mavros.setpoint.PoseStamped(
header=mavros.setpoint.Header(
frame_id="att_pose",
stamp=rospy.Time.now()),
)
velocity_msg = mavros.setpoint.TwistStamped(
header=mavros.setpoint.Header(frame_id="vel_pose",
stamp=rospy.Time.now()),
)
# wait for FCU connection
while (not UAV_state.connected):
rate.sleep()
# initialize the setpoint
setpoint_msg.pose.position.x = 0
setpoint_msg.pose.position.y = 0
setpoint_msg.pose.position.z = 3
mavros.command.arming(True)
# send 100 setpoints before starting
for i in range(0, 50):
setpoint_local_pub.publish(setpoint_msg)
rate.sleep()
set_mode(0, 'OFFBOARD')
last_request = rospy.Time.now()
# enter the main loop
while (True):
# print "Entered whiled loop"
if (UAV_state.mode != "OFFBOARD" and
(rospy.Time.now() - last_request > rospy.Duration(5.0))):
set_mode(0, 'OFFBOARD')
print("enabling offboard mode")
last_request = rospy.Time.now()
else:
if (not UAV_state.armed and
(rospy.Time.now() - last_request > rospy.Duration(5.0))):
if (mavros.command.arming(True)):
print("Vehicle armed")
last_request = rospy.Time.now()
setpoint_msg.pose.position.z = 3
if (rospy.Time.now()-last_request > rospy.Duration(5.0)):
velocity_msg.twist.linear.x = 10
velocity_pub.publish(velocity_msg)
print("Setting velocity set point")
else:
setpoint_local_pub.publish(setpoint_msg)
print("Height: %f" % setpoint_msg.pose.position.z)
rate.sleep()
return 0
if __name__ == '__main__':
main()
```
#### File: pycopter/test/test_positions_start_services.py
```python
import rospy
import std_msgs.msg
# Local libraries
from pycopter.srv import DroneSwarmMultiArray
from pycopter.srv import DroneSwarmMultiArrayResponse
from pycopter.srv import PycopterStartStop
start = False
ndrones = 3
def handle_start_positions(req):
resp = DroneSwarmMultiArrayResponse()
if ndrones == 2:
resp.n_rows = 2
resp.data = [1, 2, 0, 1]
elif ndrones == 3:
resp.n_rows = 3
resp.data = [1, 2, 1, 3, 2, 3]
resp.param1 = 40 # seconds
resp.param2 = 50 # milliseconds
rospy.loginfo("Response sent back")
global start
start = True
return resp
def main():
# Instantiate the error_estimator node class and run it
rospy.Service('supervisor/pycopter', DroneSwarmMultiArray,
handle_start_positions)
rospy.loginfo("The service is ready")
while not start:
pass
rospy.wait_for_service("pycopter/start_stop")
try:
start_pycopter = rospy.ServiceProxy("pycopter/start_stop",
PycopterStartStop)
resp = start_pycopter(start=True, stop=False)
ack = resp.ack
except rospy.ServiceException as e:
print("Service call failed: {}".format(e))
return -1
if ack:
rospy.loginfo("Start command acknowledged")
else:
rospy.logwarn("Start command rejected")
return
if __name__ == "__main__":
rospy.init_node("test_positions_services")
main()
```
#### File: drone-swarm/kalman/animation.py
```python
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as pl
def draw3d(ax, xyz, R, quadcolor):
# We draw in ENU coordinates, R and xyz are in NED
ax.scatter(xyz[1], xyz[0], -xyz[2], color=quadcolor)
ax.quiver(xyz[1], xyz[0], -xyz[2], R[0, 1], R[0, 0], R[0, 2], pivot='tail', \
color='red')
ax.quiver(xyz[1], xyz[0], -xyz[2], R[1, 1], R[1, 0], R[1, 2], pivot='tail', \
color='green')
ax.quiver(xyz[1], xyz[0], -xyz[2], -R[2, 1], -R[2, 0], -R[2, 2], pivot='tail', \
color='blue')
def draw2d(fig, X, fc, quadcolor):
agents = fc.agents
m = fc.m
pl.figure(fig)
for i in range(0, agents):
if m == 2:
pl.plot(X[m*i], X[m*i+1], 'o'+quadcolor[i])
def draw_edges(fig, X, fc, n):
agents = fc.agents
edges = fc.edges
m = fc.m
B = fc.B
pl.figure(fig)
a, b = 0, 0
for i in range(0, edges):
for j in range(0, agents):
if B[j,i] == 1:
a = j
elif B[j,i] == -1:
b = j
if m == 2:
if i == n:
pl.plot([X[m*a], X[m*b]], [X[m*a+1], X[m*b+1]], 'r--', lw=2)
else:
pl.plot([X[m*a], X[m*b]], [X[m*a+1], X[m*b+1]], 'k--', lw=2)
``` |
{
"source": "jlrandulfe/PyGCMSE",
"score": 3
} |
#### File: PyGCMSE/gcmse/gcmse.py
```python
import numpy as np
from scipy import ndimage
def GCMSE(ref_image, work_image, kappa=0.5, option=1):
"""GCMSE --- Gradient Conduction Mean Square Error.
Computation of the GCMSE. An image quality assessment measurement
for image filtering, focused on edge preservation evaluation.
Both input images are compared, returning a float number. As little
as the GCMSE is, more similar the images are. This metric is edge
preservation oriented, thus differences between border regions will
contribute more to the final result.
The borders are obtained from the reference image, and it only works
with images of the same scale, size and geometry. This metric is not
intended to measure any image processing applications but filtering
i.e.: it will NOT work for assessing the quality of compression,
contrast stretching...
Parameters
---------
ref_image[]: Array of pixels. Pixel values 0 to 255.
Reference image. The border regions will be obtained from it.
This image is the ideal objective, and the filtered images must
be as much similar to it as possible.
work_image[]: Array of pixels. Pixel values 0 to 255.
Image that is compared to the reference one.
kappa: decimal number. Values 0 to 1
Conductance parameter. It increases the amount of the images
that are analyzed, as it defines the permisivity for pixels to
belong to border regions, and how high is their contribution.
option: integer. Values: 1 or 2
Select which of the Perona-Malik equations will be used.
Returns
-------
gcmse: float
Value of the GCMSE metric between the 2 provided images. It gets
smaller as the images are more similar.
weight: float
Amount of the image that has been taken into account.
"""
# Normalization of the images to [0,1] values.
ref_image_float = ref_image.astype('float32')
work_image_float = work_image.astype('float32')
normed_ref_image = ref_image_float / 255
normed_work_image = work_image_float / 255
# Initialization and calculation of south and east gradients arrays.
gradient_S = np.zeros_like(normed_ref_image)
gradient_E = gradient_S.copy()
gradient_S[:-1,: ] = np.diff(normed_ref_image, axis=0)
gradient_E[: ,:-1] = np.diff(normed_ref_image, axis=1)
# Image conduction is calculated using the Perona-Malik equations.
if option == 1:
cond_S = np.exp(-(gradient_S/kappa) ** 2)
cond_E = np.exp(-(gradient_E/kappa) ** 2)
elif option == 2:
cond_S = 1.0 / (1 + (gradient_S/kappa)**2)
cond_E = 1.0 / (1 + (gradient_E/kappa)**2)
# New conduction components are initialized to 1 in order to treat
# image corners as homogeneous regions
cond_N = np.ones_like(normed_ref_image)
cond_W = cond_N.copy()
# South and East arrays values are moved one position in order to
# obtain North and West values, respectively.
cond_N[1:, :] = cond_S[:-1, :]
cond_W[:, 1:] = cond_E[:, :-1]
# Conduction module is the mean of the 4 directional values.
conduction = (cond_N + cond_S + cond_W + cond_E) / 4
conduction = np.clip (conduction, 0., 1.)
G = 1 - conduction
# Calculation of the GCMSE value
num = ((G*(normed_ref_image - normed_work_image)) ** 2).sum()
gcmse = num * normed_ref_image.size / G.sum()
weight = G.sum() / G.size
return [gcmse, weight]
``` |
{
"source": "JLRepo/CREAM",
"score": 2
} |
#### File: CREAM/utils/IoU.py
```python
import numpy as np
import xml.etree.ElementTree as ET
import torch
def get_gt_boxes(xmlfile):
'''get ground-truth bbox from VOC xml file'''
tree = ET.parse(xmlfile)
objs = tree.findall('object')
num_objs = len(objs)
gt_boxes = []
for obj in objs:
bbox = obj.find('bndbox')
x1 = float(bbox.find('xmin').text)-1
y1 = float(bbox.find('ymin').text)-1
x2 = float(bbox.find('xmax').text)-1
y2 = float(bbox.find('ymax').text)-1
gt_boxes.append((x1, y1, x2, y2))
return gt_boxes
def get_cls_gt_boxes(xmlfile, cls):
'''get ground-truth bbox from VOC xml file'''
tree = ET.parse(xmlfile)
objs = tree.findall('object')
num_objs = len(objs)
gt_boxes = []
for obj in objs:
bbox = obj.find('bndbox')
cls_name = obj.find('name').text
#print(cls_name, cls)
if cls_name != cls:
continue
x1 = float(bbox.find('xmin').text)-1
y1 = float(bbox.find('ymin').text)-1
x2 = float(bbox.find('xmax').text)-1
y2 = float(bbox.find('ymax').text)-1
gt_boxes.append((x1, y1, x2, y2))
if len(gt_boxes)==0:
pass
#print('%s bbox = 0'%cls)
return gt_boxes
def get_cls_and_gt_boxes(xmlfile, cls,class_to_idx):
'''get ground-truth bbox from VOC xml file'''
tree = ET.parse(xmlfile)
objs = tree.findall('object')
num_objs = len(objs)
gt_boxes = []
for obj in objs:
bbox = obj.find('bndbox')
cls_name = obj.find('name').text
#print(cls_name, cls)
if cls_name != cls:
continue
x1 = float(bbox.find('xmin').text)-1
y1 = float(bbox.find('ymin').text)-1
x2 = float(bbox.find('xmax').text)-1
y2 = float(bbox.find('ymax').text)-1
gt_boxes.append((class_to_idx[cls_name],[x1, y1, x2-x1, y2-y1]))
if len(gt_boxes)==0:
pass
#print('%s bbox = 0'%cls)
return gt_boxes
def convert_boxes(boxes):
''' convert the bbox to the format (x1, y1, x2, y2) where x1,y1<x2,y2'''
converted_boxes = []
for bbox in boxes:
(x1, y1, x2, y2) = bbox
converted_boxes.append((min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)))
return converted_boxes
def IoU(a, b):
#print(a, b)
x1 = max(a[0], b[0])
y1 = max(a[1], b[1])
x2 = min(a[2], b[2])
y2 = min(a[3], b[3])
def compute_area(box):
dx = max(0, box[2]-box[0])
dy = max(0, box[3]-box[1])
dx = float(dx)
dy = float(dy)
return dx*dy
#print(x1, y1, x2, y2)
w = max(0, x2-x1+1)
h = max(0, y2-y1+1)
#inter = w*h
#aarea = (a[2]-a[0]+1)*(a[3]-a[1]+1)
#barea = (b[2]-b[0]+1)*(b[3]-b[1]+1)
inter = compute_area([x1, y1, x2, y2])
aarea = compute_area(a)
barea = compute_area(b)
#assert aarea+barea-inter>0
if aarea + barea - inter <=0:
print(a)
print(b)
o = inter / (aarea+barea-inter)
#if w<=0 or h<=0:
# o = 0
return o
def to_2d_tensor(inp):
inp = torch.Tensor(inp)
if len(inp.size()) < 2:
inp = inp.unsqueeze(0)
return inp
def xywh_to_x1y1x2y2(boxes):
boxes = to_2d_tensor(boxes)
boxes[:, 2] += boxes[:, 0] - 1
boxes[:, 3] += boxes[:, 1] - 1
return boxes
def x1y1x2y2_to_xywh(boxes):
boxes = to_2d_tensor(boxes)
boxes[:, 2] -= boxes[:, 0] - 1
boxes[:, 3] -= boxes[:, 1] - 1
return boxes
def compute_IoU(pred_box, gt_box):
boxes1 = to_2d_tensor(pred_box)
# boxes1 = xywh_to_x1y1x2y2(boxes1)
boxes1[:, 2] = torch.clamp(boxes1[:, 0] + boxes1[:, 2], 0, 1)
boxes1[:, 3] = torch.clamp(boxes1[:, 1] + boxes1[:, 3], 0, 1)
boxes2 = to_2d_tensor(gt_box)
boxes2[:, 2] = torch.clamp(boxes2[:, 0] + boxes2[:, 2], 0, 1)
boxes2[:, 3] = torch.clamp(boxes2[:, 1] + boxes2[:, 3], 0, 1)
# boxes2 = xywh_to_x1y1x2y2(boxes2)
intersec = boxes1.clone()
intersec[:, 0] = torch.max(boxes1[:, 0], boxes2[:, 0])
intersec[:, 1] = torch.max(boxes1[:, 1], boxes2[:, 1])
intersec[:, 2] = torch.min(boxes1[:, 2], boxes2[:, 2])
intersec[:, 3] = torch.min(boxes1[:, 3], boxes2[:, 3])
def compute_area(boxes):
# in (x1, y1, x2, y2) format
dx = boxes[:, 2] - boxes[:, 0]
dx[dx < 0] = 0
dy = boxes[:, 3] - boxes[:, 1]
dy[dy < 0] = 0
return dx * dy
a1 = compute_area(boxes1)
a2 = compute_area(boxes2)
ia = compute_area(intersec)
assert ((a1 + a2 - ia < 0).sum() == 0)
return ia / (a1 + a2 - ia)
``` |
{
"source": "jlrgraham23/okta-sdk-python",
"score": 2
} |
#### File: tests/unit/test_applications_ut.py
```python
import aiohttp
import asyncio
import json
import pytest
import okta.models as models
from okta.client import Client as OktaClient
@pytest.mark.asyncio
async def test_set_provisioning_connection(monkeypatch, mocker):
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token}
client = OktaClient(config)
# mock http requests, verify if custom header is present in request
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"text": "mock response text"}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
profile = models.ProvisioningConnectionProfile(
{
'authScheme': models.ProvisioningConnectionAuthScheme('TOKEN'),
'token': 'TEST'
}
)
provisioning_conn_req = models.ProvisioningConnectionRequest({'profile': profile})
_ = await client.set_default_provisioning_connection_for_application('test_app_id', provisioning_conn_req, query_params={'activate': True})
assert mock_http_request.request_info['url'].endswith('/apps/test_app_id/connections/default/?activate=True')
data = mock_http_request.request_info['data']
assert json.loads(data) == {"profile": {"authScheme": "TOKEN", "token": "TEST"}}
@pytest.mark.asyncio
async def test_list_features_for_application(monkeypatch, mocker):
mocked_response = """[
{
"name": "USER_PROVISIONING",
"status": "ENABLED",
"description": "User provisioning settings from Okta to a downstream application",
"capabilities": {
"create": {
"lifecycleCreate": {
"status": "DISABLED"
}
},
"update": {
"profile": {
"status": "DISABLED"
},
"lifecycleDeactivate": {
"status": "DISABLED"
},
"password": {
"status": "DISABLED",
"seed": "RANDOM",
"change": "KEEP_EXISTING"
}
}
},
"_links": {
"self": {
"href": "https://${yourOktaDomain}/api/v1/apps/${applicationId}/features/USER_PROVISIONING",
"hints": {
"allow": [
"GET",
"PUT"
]
}
}
}
}
]"""
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token}
client = OktaClient(config)
# mock http requests, verify if custom header is present in request
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return mocked_response
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
features, _, err = await client.list_features_for_application('test_app_id')
assert isinstance(features[0], models.ApplicationFeature)
assert features[0].name == 'USER_PROVISIONING'
assert isinstance(features[0].capabilities, models.CapabilitiesObject)
assert isinstance(features[0].capabilities.create, models.CapabilitiesCreateObject)
assert isinstance(features[0].capabilities.update, models.CapabilitiesUpdateObject)
assert isinstance(features[0].capabilities.update, models.CapabilitiesUpdateObject)
assert isinstance(features[0].capabilities.update.password, models.PasswordSettingObject)
assert isinstance(features[0].capabilities.update.password.change, models.ChangeEnum)
assert isinstance(features[0].status, models.EnabledStatus)
@pytest.mark.asyncio
async def test_get_feature_for_application(monkeypatch, mocker):
mocked_response = """{
"name": "USER_PROVISIONING",
"status": "ENABLED",
"description": "User provisioning settings from Okta to a downstream application",
"capabilities": {
"create": {
"lifecycleCreate": {
"status": "DISABLED"
}
},
"update": {
"profile": {
"status": "DISABLED"
},
"lifecycleDeactivate": {
"status": "DISABLED"
},
"password": {
"status": "DISABLED",
"seed": "RANDOM",
"change": "KEEP_EXISTING"
}
}
},
"_links": {
"self": {
"href": "https://${yourOktaDomain}/api/v1/apps/${applicationId}/features/USER_PROVISIONING",
"hints": {
"allow": [
"GET",
"PUT"
]
}
}
}
}"""
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token}
client = OktaClient(config)
# mock http requests, verify if custom header is present in request
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return mocked_response
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
feature, _, err = await client.get_feature_for_application('test_app_id', 'USER_PROVISIONING')
assert isinstance(feature, models.ApplicationFeature)
assert feature.name == 'USER_PROVISIONING'
assert isinstance(feature.capabilities, models.CapabilitiesObject)
assert isinstance(feature.capabilities.create, models.CapabilitiesCreateObject)
assert isinstance(feature.capabilities.update, models.CapabilitiesUpdateObject)
assert isinstance(feature.capabilities.update, models.CapabilitiesUpdateObject)
assert isinstance(feature.capabilities.update.password, models.PasswordSettingObject)
assert isinstance(feature.capabilities.update.password.change, models.ChangeEnum)
assert isinstance(feature.status, models.EnabledStatus)
@pytest.mark.asyncio
async def test_update_feature_for_application(monkeypatch, mocker):
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token}
client = OktaClient(config)
# mock http requests, verify if custom header is present in request
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"text": "mock response text"}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
capabilities_object_dict = {
"create": {
"lifecycleCreate": {
"status": "ENABLED"
}
},
"update": {
"lifecycleDeactivate": {
"status": "ENABLED"
},
"profile":{
"status": "ENABLED"
},
"password":{
"status": "ENABLED",
"seed": "RANDOM",
"change": "CHANGE"
}
}
}
capabilities_object = models.CapabilitiesObject(capabilities_object_dict)
feature, _, err = await client.update_feature_for_application('test_app_id', 'test_name', capabilities_object)
assert mock_http_request.request_info['url'].endswith('/apps/test_app_id/features/test_name')
data = mock_http_request.request_info['data']
assert json.loads(data) == capabilities_object_dict
@pytest.mark.asyncio
async def test_upload_application_logo(monkeypatch, mocker):
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token}
client = OktaClient(config)
# mock http requests, verify if custom header is present in request
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"text": "mock response text"}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
logo = mocker.Mock()
_, err = await client.upload_application_logo('test_app_id', logo)
assert mock_http_request.request_info['url'].endswith('/apps/test_app_id/logo')
data = mock_http_request.request_info['data']
assert data == {'file': logo}
```
#### File: tests/unit/test_client.py
```python
import aiohttp
import asyncio
import aiohttp
import logging
from aiohttp.client_reqrep import ConnectionKey
from ssl import SSLCertVerificationError
from okta.client import Client as OktaClient
import pytest
from okta.constants import FINDING_OKTA_DOMAIN
import yaml
import os
from okta.error_messages import ERROR_MESSAGE_API_TOKEN_DEFAULT, \
ERROR_MESSAGE_API_TOKEN_MISSING, ERROR_MESSAGE_AUTH_MODE_INVALID, \
ERROR_MESSAGE_CLIENT_ID_DEFAULT, ERROR_MESSAGE_CLIENT_ID_MISSING,\
ERROR_MESSAGE_ORG_URL_ADMIN, ERROR_MESSAGE_ORG_URL_MISSING, \
ERROR_MESSAGE_ORG_URL_NOT_HTTPS, ERROR_MESSAGE_ORG_URL_TYPO, \
ERROR_MESSAGE_ORG_URL_YOUROKTADOMAIN, ERROR_MESSAGE_SCOPES_PK_MISSING, \
ERROR_MESSAGE_PROXY_MISSING_HOST, ERROR_MESSAGE_PROXY_MISSING_AUTH, \
ERROR_MESSAGE_PROXY_INVALID_PORT
from okta.constants import _GLOBAL_YAML_PATH, _LOCAL_YAML_PATH
from okta.exceptions import HTTPException
from okta.http_client import HTTPClient
"""
Testing Okta Client Instantiation in different scenarios
"""
def test_constructor_user_config_empty(fs):
config = {}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_ORG_URL_MISSING in str(exception_info.value)
assert ERROR_MESSAGE_API_TOKEN_MISSING in str(exception_info.value)
def test_constructor_user_config_url_empty():
config = {'orgUrl': '', 'token': 'TOKEN'}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_ORG_URL_MISSING in str(exception_info.value)
def test_constructor_user_config_url_not_https():
config = {'orgUrl': 'http://test.okta.com', 'token': 'TOKEN'}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_ORG_URL_NOT_HTTPS in str(exception_info.value)
assert FINDING_OKTA_DOMAIN in str(exception_info.value)
def test_constructor_user_config_url_has_yourOktaDomain():
config = {
'orgUrl': 'https://{yourOktaDomain}.okta.com', 'token': 'TOKEN'
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_ORG_URL_YOUROKTADOMAIN in str(exception_info.value)
@ pytest.mark.parametrize("url", ["https://dev-admin.okta.com",
"https://dev-admin.oktapreview.com",
"https://dev-admin.okta-emea.com",
"https://test-admin.okta.com"])
def test_constructor_user_config_url_has_admin(url):
config = {
'orgUrl': url, 'token': 'TOKEN'
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert all(string in str(exception_info.value) for string in [
ERROR_MESSAGE_ORG_URL_ADMIN, f"Current value: {url}"])
def test_constructor_user_config_url_dot_com_twice():
url = 'https://test.okta.com.com'
config = {
'orgUrl': url, 'token': 'TOKEN'
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert all(string in str(exception_info.value) for string in [
ERROR_MESSAGE_ORG_URL_TYPO, f"Current value: {url}"])
def test_constructor_user_config_url_punctuation():
# test for urls with '://' multiple times
url = 'https://://test.okta.com'
config = {
'orgUrl': url, 'token': 'TOKEN'
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert all(string in str(exception_info.value) for string in [
ERROR_MESSAGE_ORG_URL_TYPO, f"Current value: {url}"])
def test_constructor_user_config_token_empty(fs):
config = {'orgUrl': 'https://test.okta.com', 'token': ''}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_API_TOKEN_MISSING in str(exception_info.value)
def test_constructor_user_config_url_has_apiToken(fs):
config = {
'orgUrl': 'https://test.okta.com', 'token': '{apiToken}'
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_API_TOKEN_DEFAULT in str(exception_info.value)
def test_constructor_user_config_auth_mode_invalid():
authorizationMode = "blah"
config = {'orgUrl': "https://test.okta.com",
'token': "TOKEN",
'authorizationMode': authorizationMode}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert all(string in str(exception_info.value) for string in [
ERROR_MESSAGE_AUTH_MODE_INVALID, f"with {authorizationMode}"])
def test_constructor_user_config_SSWS():
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token}
client = OktaClient(user_config=config)
loaded_config = client.get_config()
assert org_url == loaded_config['client']['orgUrl']
assert token == loaded_config['client']['token']
assert 'SSWS' == loaded_config['client']['authorizationMode']
def test_constructor_user_config_Bearer():
authorizationMode = "Bearer"
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url,
'token': token,
'authorizationMode': authorizationMode}
client = OktaClient(user_config=config)
loaded_config = client.get_config()
assert org_url == loaded_config['client']['orgUrl']
assert token == loaded_config['client']['token']
assert authorizationMode == loaded_config['client']['authorizationMode']
@ pytest.mark.parametrize("private_key", ["private key hash",
"pem_file.pem",
"{'Jwks'}"])
def test_constructor_user_config_PK(private_key):
org_url = "https://test.okta.com"
authorizationMode = "PrivateKey"
client_id = "clientID"
scopes = ["scope1"]
config = {
'orgUrl': org_url,
'authorizationMode': authorizationMode,
'clientId': client_id,
'scopes': scopes,
'privateKey': private_key
}
client = OktaClient(user_config=config)
loaded_config = client.get_config()
assert org_url == loaded_config['client']['orgUrl']
assert authorizationMode == loaded_config['client']['authorizationMode']
assert client_id == loaded_config['client']['clientId']
assert scopes == loaded_config['client']['scopes']
assert private_key == loaded_config['client']['privateKey']
def test_constructor_user_config_PK_empty(fs):
org_url = "https://test.okta.com"
authorizationMode = "PrivateKey"
config = {
'orgUrl': org_url,
'authorizationMode': authorizationMode,
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert all(string in str(exception_info.value) for string in [
ERROR_MESSAGE_CLIENT_ID_MISSING, ERROR_MESSAGE_SCOPES_PK_MISSING
])
def test_constructor_user_config_PK_client_id_empty():
org_url = "https://test.okta.com"
authorizationMode = "PrivateKey"
scopes = ["scope1"]
private_key_hash = "private key hash"
config = {
'orgUrl': org_url,
'authorizationMode': authorizationMode,
'clientId': "",
'scopes': scopes,
'privateKey': private_key_hash
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert all(string in str(exception_info.value) for string in [
ERROR_MESSAGE_CLIENT_ID_MISSING
])
def test_constructor_user_config_PK_client_id_default():
org_url = "https://test.okta.com"
authorizationMode = "PrivateKey"
scopes = ["scope1"]
private_key_hash = "private key hash"
config = {
'orgUrl': org_url,
'authorizationMode': authorizationMode,
'clientId': "{clientId}",
'scopes': scopes,
'privateKey': private_key_hash
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert all(string in str(exception_info.value) for string in [
ERROR_MESSAGE_CLIENT_ID_DEFAULT
])
@ pytest.mark.parametrize("scopes,private_key", [([], "private key hash"),
(["scope1"], ""),
([], "")])
def test_constructor_user_config_PK_scopes_and_or_private_key_empty(
scopes,
private_key):
org_url = "https://test.okta.com"
authorizationMode = "PrivateKey"
client_id = "clientID"
config = {
'orgUrl': org_url,
'authorizationMode': authorizationMode,
'clientId': client_id,
'scopes': scopes,
'privateKey': private_key
}
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert all(string in str(exception_info.value) for string in [
ERROR_MESSAGE_SCOPES_PK_MISSING
])
"""
Testing constructor with YAML configurations
"""
def test_constructor_global_config_SSWS(fs):
fs.pause()
global_sample = os.path.join(os.path.dirname(
__file__), "files", "SSWS-sample-global.yaml")
with open(global_sample) as file:
global_config = yaml.load(file, Loader=yaml.SafeLoader)
org_url = global_config["okta"]["client"]["orgUrl"]
token = global_config["okta"]["client"]["token"]
fs.resume()
fs.create_file(_GLOBAL_YAML_PATH, contents=yaml.dump(global_config))
client = OktaClient()
loaded_config = client.get_config()
assert org_url == loaded_config['client']['orgUrl']
assert token == loaded_config['client']['token']
def test_constructor_local_config_SSWS(fs):
fs.pause()
local_sample = os.path.join(os.path.dirname(
__file__), "files", "SSWS-sample-local.yaml")
with open(local_sample) as file:
local_config = yaml.load(file, Loader=yaml.SafeLoader)
org_url = local_config["okta"]["client"]["orgUrl"]
token = local_config["okta"]["client"]["token"]
fs.resume()
fs.create_file(_LOCAL_YAML_PATH, contents=yaml.dump(local_config))
client = OktaClient()
loaded_config = client.get_config()
assert org_url == loaded_config['client']['orgUrl']
assert token == loaded_config['client']['token']
def test_constructor_global_config_PK(fs):
fs.pause()
global_sample = os.path.join(os.path.dirname(
__file__), "files", "PK-sample-global.yaml")
with open(global_sample) as file:
global_config = yaml.load(file, Loader=yaml.SafeLoader)
org_url = global_config["okta"]["client"]["orgUrl"]
client_id = global_config["okta"]["client"]["clientId"]
private_key = global_config["okta"]["client"]["privateKey"]
fs.resume()
fs.create_file(_GLOBAL_YAML_PATH, contents=yaml.dump(global_config))
client = OktaClient()
loaded_config = client.get_config()
assert org_url == loaded_config['client']['orgUrl']
assert client_id == loaded_config['client']['clientId']
assert private_key == loaded_config['client']['privateKey']
def test_constructor_local_config_PK(fs):
fs.pause()
local_sample = os.path.join(os.path.dirname(
__file__), "files", "PK-sample-local.yaml")
with open(local_sample) as file:
local_config = yaml.load(file, Loader=yaml.SafeLoader)
org_url = local_config["okta"]["client"]["orgUrl"]
client_id = local_config["okta"]["client"]["clientId"]
private_key = local_config["okta"]["client"]["privateKey"]
fs.resume()
fs.create_file(_LOCAL_YAML_PATH, contents=yaml.dump(local_config))
client = OktaClient()
loaded_config = client.get_config()
assert org_url == loaded_config['client']['orgUrl']
assert client_id == loaded_config['client']['clientId']
assert private_key == loaded_config['client']['privateKey']
def test_constructor_env_vars_SSWS():
org_url = "https://test.okta.com"
token = "TOKEN"
os.environ["OKTA_CLIENT_ORGURL"] = org_url
os.environ["OKTA_CLIENT_TOKEN"] = token
client = OktaClient()
loaded_config = client.get_config()
os.environ.pop("OKTA_CLIENT_ORGURL")
os.environ.pop("OKTA_CLIENT_TOKEN")
assert org_url == loaded_config['client']['orgUrl']
assert token == loaded_config['client']['token']
def test_constructor_env_vars_PK():
authorizationMode = "PrivateKey"
org_url = "https://test.okta.com"
client_id = "clientID"
scopes = "scope1,scope2,scope3"
private_key = "private key"
os.environ["OKTA_CLIENT_AUTHORIZATIONMODE"] = authorizationMode
os.environ["OKTA_CLIENT_ORGURL"] = org_url
os.environ["OKTA_CLIENT_CLIENTID"] = client_id
os.environ["OKTA_CLIENT_SCOPES"] = scopes
os.environ["OKTA_CLIENT_PRIVATEKEY"] = private_key
client = OktaClient()
loaded_config = client.get_config()
os.environ.pop("OKTA_CLIENT_ORGURL")
os.environ.pop("OKTA_CLIENT_AUTHORIZATIONMODE")
os.environ.pop("OKTA_CLIENT_CLIENTID")
os.environ.pop("OKTA_CLIENT_SCOPES")
os.environ.pop("OKTA_CLIENT_PRIVATEKEY")
assert authorizationMode == loaded_config['client']['authorizationMode']
assert org_url == loaded_config['client']['orgUrl']
assert client_id == loaded_config['client']['clientId']
assert scopes.split(',') == loaded_config['client']['scopes']
assert private_key == loaded_config['client']['privateKey']
def test_constructor_precedence_highest_rank_local_yaml(fs):
# Setup Global config
fs.pause()
global_sample = os.path.join(os.path.dirname(
__file__), "files", "SSWS-sample-global.yaml")
with open(global_sample) as file:
global_config = yaml.load(file, Loader=yaml.SafeLoader)
global_org_url = global_config["okta"]["client"]["orgUrl"]
global_token = global_config["okta"]["client"]["token"]
fs.resume()
fs.create_file(_GLOBAL_YAML_PATH, contents=yaml.dump(global_config))
# Setup Local config
fs.pause()
local_sample = os.path.join(os.path.dirname(
__file__), "files", "SSWS-sample-local.yaml")
with open(local_sample) as file:
local_config = yaml.load(file, Loader=yaml.SafeLoader)
local_org_url = local_config["okta"]["client"]["orgUrl"]
local_token = local_config["okta"]["client"]["token"]
fs.resume()
fs.create_file(_LOCAL_YAML_PATH, contents=yaml.dump(local_config))
# Create client and validate values
client = OktaClient()
loaded_config = client.get_config()
assert local_org_url == loaded_config['client']['orgUrl']
assert local_token == loaded_config['client']['token']
assert local_org_url != global_org_url
assert local_token != global_token
assert global_org_url != loaded_config['client']['orgUrl']
assert global_token != loaded_config['client']['token']
def test_constructor_precedence_highest_rank_env_vars(fs):
# Setup Local config
fs.pause()
local_sample = os.path.join(os.path.dirname(
__file__), "files", "SSWS-sample-local.yaml")
with open(local_sample) as file:
local_config = yaml.load(file, Loader=yaml.SafeLoader)
local_org_url = local_config["okta"]["client"]["orgUrl"]
local_token = local_config["okta"]["client"]["token"]
fs.resume()
fs.create_file(_LOCAL_YAML_PATH, contents=yaml.dump(local_config))
# Setup env. vars
env_org_url = "https://test.env.okta.com"
env_token = "envTOKEN"
os.environ["OKTA_CLIENT_ORGURL"] = env_org_url
os.environ["OKTA_CLIENT_TOKEN"] = env_token
client = OktaClient()
loaded_config = client.get_config()
os.environ.pop("OKTA_CLIENT_ORGURL")
os.environ.pop("OKTA_CLIENT_TOKEN")
assert local_org_url != loaded_config['client']['orgUrl']
assert local_token != loaded_config['client']['token']
assert local_org_url != env_org_url
assert local_token != env_token
assert env_org_url == loaded_config['client']['orgUrl']
assert env_token == loaded_config['client']['token']
def test_constructor_precedence_highest_rank_user_config():
# Setup env. vars
env_org_url = "https://test.env.okta.com"
env_token = "envTOKEN"
os.environ["OKTA_CLIENT_ORGURL"] = env_org_url
os.environ["OKTA_CLIENT_TOKEN"] = env_token
# Setup user config
user_org_url = "https://test.user.okta.com"
user_token = "userTOKEN"
config = {'orgUrl': user_org_url, 'token': user_token}
client = OktaClient(config)
loaded_config = client.get_config()
os.environ.pop("OKTA_CLIENT_ORGURL")
os.environ.pop("OKTA_CLIENT_TOKEN")
assert user_org_url == loaded_config['client']['orgUrl']
assert user_token == loaded_config['client']['token']
assert user_org_url != env_org_url
assert user_token != env_token
assert env_org_url != loaded_config['client']['orgUrl']
assert env_token != loaded_config['client']['token']
def test_constructor_valid_proxy():
org_url = "https://test.okta.com"
token = "TOKEN"
port = 8080
host = "test.okta.com"
username = "username"
password = "password"
config = {
'orgUrl': org_url,
'token': token,
'proxy': {
'port': port,
'host': host,
'username': username,
'password': password
}
}
# Ensure no error is raised and correct proxy is determined
client = OktaClient(user_config=config)
assert client.get_request_executor(
)._http_client._proxy == f"http://{username}:{password}@{host}:{port}/"
def test_constructor_valid_no_proxy():
org_url = "https://test.okta.com"
token = "TOKEN"
config = {
'orgUrl': org_url,
'token': token
}
# Ensure no error is raised and proxy is None
client = OktaClient(user_config=config)
assert client.get_request_executor(
)._http_client._proxy is None
def test_constructor_valid_env_vars():
org_url = "https://test.okta.com"
token = "TOKEN"
config = {
'orgUrl': org_url,
'token': token
}
# Setting up env vars
os.environ["HTTP_PROXY"] = "http://user:[email protected]:8080"
os.environ["HTTPS_PROXY"] = "https://user:[email protected]:8080"
expected = os.environ["HTTPS_PROXY"]
client = OktaClient(user_config=config)
# Deleting env vars
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
# Ensure no error is raised and proxy is None
assert client.get_request_executor(
)._http_client._proxy == expected
def test_constructor_invalid_missing_host():
org_url = "https://test.okta.com"
token = "TOKEN"
port = 8080
username = "username"
password = "password"
config = {
'orgUrl': org_url,
'token': token,
'proxy': {
'port': port,
'username': username,
'password': password
}
}
# Expect error with config
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_PROXY_MISSING_HOST in exception_info.value
@pytest.mark.parametrize("username,password", [("", "password"),
("username", "")])
def test_constructor_invalid_missing_username_or_password(username, password):
org_url = "https://test.okta.com"
token = "TOKEN"
port = 8080
host = "test.okta.com"
config = {
'orgUrl': org_url,
'token': token,
'proxy': {
'port': port,
'host': host,
'username': username,
'password': password
}
}
# Expect error with config
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_PROXY_MISSING_AUTH in exception_info.value
@pytest.mark.parametrize("port", [-1, 0, 65536, "port"])
def test_constructor_invalid_port_number(port):
org_url = "https://test.okta.com"
token = "TOKEN"
host = "test.okta.com"
username = "username"
password = "password"
config = {
'orgUrl': org_url,
'token': token,
'proxy': {
'port': port,
'host': host,
'username': username,
'password': password
}
}
# Expect error with config
with pytest.raises(ValueError) as exception_info:
OktaClient(user_config=config)
assert ERROR_MESSAGE_PROXY_INVALID_PORT in exception_info.value
def test_constructor_custom_http_client_impl():
class CustomHTTPClient(HTTPClient):
pass
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url,
'token': token,
'httpClient': CustomHTTPClient}
client = OktaClient(config)
assert isinstance(client._request_executor._http_client, CustomHTTPClient)
def test_constructor_client_logging():
logger = logging.getLogger('okta-sdk-python')
assert logger.disabled
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url,
'token': token,
'logging': {"enabled": True, "logLevel": logging.DEBUG}}
client = OktaClient(config)
assert not logger.disabled
assert logger.level == logging.DEBUG
def test_client_raise_exception():
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token, 'raiseException': True}
client = OktaClient(config)
with pytest.raises(HTTPException):
asyncio.run(client.list_users())
def test_client_custom_headers(monkeypatch, mocker):
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token}
custom_headers = {'Header-Test-1': 'test value 1',
'Header-Test-2': 'test value 2'}
client = OktaClient(config)
# verify custom headers are set
client.set_custom_headers(custom_headers)
assert client.get_custom_headers() == custom_headers
# mock http requests, verify if custom header is present in request
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"text": "mock response text"}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
asyncio.run(client.list_users())
assert 'Header-Test-1' in mock_http_request.headers
assert 'Header-Test-2' in mock_http_request.headers
# verify custom headers are cleared
client.clear_custom_headers()
assert client.get_custom_headers() == {}
def test_client_handle_aiohttp_error(monkeypatch, mocker):
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token}
client = OktaClient(config)
class MockHTTPRequest():
def __call__(self, **params):
raise aiohttp.ClientConnectorCertificateError(
ConnectionKey(host=org_url,
port=443,
is_ssl=True,
ssl=None,
proxy=None,
proxy_auth=None,
proxy_headers_hash=None),
SSLCertVerificationError(1,
'[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: '
'unable to get local issuer certificate (_ssl.c:1123)'))
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"text": "mock response text"}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
res, resp_body, error = asyncio.run(client.list_users())
assert res is None
assert resp_body is None
assert isinstance(error, aiohttp.ClientError)
def test_client_log_debug(monkeypatch, caplog):
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token,
'logging': {'enabled': True, 'logLevel': logging.DEBUG}}
client = OktaClient(config)
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"embedded": null,' \
'"links": {"self": {"href": "https://test.okta.com/v1/users/test_id"}},' \
'"activated": "2021-01-01T00:00:00.000Z",' \
'"created": "2021-01-01T00:00:00.000Z",' \
'"credentials": null,' \
'"id": "test_id",' \
'"last_login": null,' \
'"profile": {"name": "test_name"},' \
'"status": null,' \
'"status_changed": null,' \
'"transitioning_to_status": null,' \
'"type": null}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
with caplog.at_level(logging.DEBUG):
res, resp_body, error = asyncio.run(client.list_users())
assert 'okta-sdk-python' in caplog.text
assert 'DEBUG' in caplog.text
assert "'method': 'GET'" in caplog.text
assert "'url': 'https://test.okta.com/api/v1/users'" in caplog.text
def test_client_log_info(monkeypatch, caplog):
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': token,
'logging': {'enabled': True, 'logLevel': logging.INFO}}
client = OktaClient(config)
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"embedded": null,' \
'"links": {"self": {"href": "https://test.okta.com/v1/users/test_id"}},' \
'"activated": "2021-01-01T00:00:00.000Z",' \
'"created": "2021-01-01T00:00:00.000Z",' \
'"credentials": null,' \
'"id": "test_id",' \
'"last_login": null,' \
'"profile": {"name": "test_name"},' \
'"status": null,' \
'"status_changed": null,' \
'"transitioning_to_status": null,' \
'"type": null}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
with caplog.at_level(logging.INFO):
res, resp_body, error = asyncio.run(client.list_users())
assert caplog.text == ''
def test_client_log_exception(monkeypatch, caplog):
org_url = "https://test.okta.com"
token = "TOKEN"
config = {'orgUrl': org_url, 'token': <PASSWORD>,
'logging': {'enabled': True, 'logLevel': logging.DEBUG}}
client = OktaClient(config)
class MockHTTPRequest():
def __call__(self, **params):
raise aiohttp.ClientConnectorCertificateError(
ConnectionKey(host=org_url,
port=443,
is_ssl=True,
ssl=None,
proxy=None,
proxy_auth=None,
proxy_headers_hash=None),
SSLCertVerificationError(1,
'[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: '
'unable to get local issuer certificate (_ssl.c:1123)'))
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"text": "mock response text"}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
with caplog.at_level(logging.DEBUG):
res, resp_body, error = asyncio.run(client.list_users())
assert 'Cannot connect to host https://test.okta.com' in caplog.text
def test_client_ssl_context(monkeypatch, mocker):
org_url = "https://test.okta.com"
token = "TOKEN"
mock_ssl_context = mocker.MagicMock()
config = {'orgUrl': org_url, 'token': <PASSWORD>, 'sslContext': mock_ssl_context}
client = OktaClient(config)
# mock http requests, verify if custom header is present in request
class MockHTTPRequest():
def __call__(self, **params):
self.request_info = params
self.headers = params['headers']
self.url = params['url']
self.content_type = 'application/json'
self.links = ''
self.text = MockHTTPRequest.mock_response_text
self.status = 200
return self
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
@staticmethod
async def mock_response_text():
return '[{"text": "mock response text"}]'
mock_http_request = MockHTTPRequest()
monkeypatch.setattr(aiohttp.ClientSession, 'request', mock_http_request)
asyncio.run(client.list_users())
assert mock_http_request.request_info['ssl_context'] == mock_ssl_context
@pytest.mark.asyncio
async def test_client_session(mocker):
org_url = "https://test.okta.com"
token = "TOKEN"
# no session
config = {'orgUrl': org_url, 'token': token}
client = OktaClient(config)
assert client._request_executor._http_client._session is None
# with session
config = {'orgUrl': org_url, 'token': token}
async with OktaClient(config) as client:
assert isinstance(client._request_executor._http_client._session, aiohttp.ClientSession)
``` |
{
"source": "jlrgraham/hjuutilainen-recipes",
"score": 3
} |
#### File: hjuutilainen-recipes/AlfredApp/Alfred2URLProvider.py
```python
import urllib2
import plistlib
from autopkglib import Processor, ProcessorError
__all__ = ["Alfred2URLProvider"]
UPDATE_INFO_PLIST_URL = "http://media.alfredapp.com/v2update/info.plist"
class Alfred2URLProvider(Processor):
"""Provides a download URL for the latest Alfred"""
input_variables = {
"base_url": {
"required": False,
"description": "The Alfred update info property list URL",
},
}
output_variables = {
"url": {
"description": "URL to the latest Alfred release.",
},
}
description = __doc__
def download_info_plist(self, base_url):
"""Downloads the info.plist file and returns a plist object"""
try:
f = urllib2.urlopen(base_url)
plist_data = f.read()
f.close()
except BaseException as e:
raise ProcessorError("Can't download %s: %s" % (base_url, e))
info_plist = plistlib.readPlistFromString(plist_data)
return info_plist
def get_alfred_dmg_url(self, base_url):
"""Find and return a download URL"""
# Alfred 2 update check uses a standard plist file.
# Grab it and parse...
info_plist = self.download_info_plist(base_url)
version = info_plist.get('version', None)
self.output("Found version %s" % version)
location = info_plist.get('location', None)
return location
def main(self):
base_url = self.env.get("base_url", UPDATE_INFO_PLIST_URL)
self.env["url"] = self.get_alfred_dmg_url(base_url)
self.output("Found URL %s" % self.env["url"])
if __name__ == "__main__":
processor = Alfred2URLProvider()
processor.execute_shell()
``` |
{
"source": "jlrgraham/munki",
"score": 2
} |
#### File: client/munkilib/adobeutils.py
```python
import os
import re
import subprocess
import time
import tempfile
import sqlite3
from xml.dom import minidom
from glob import glob
import FoundationPlist
import munkicommon
import munkistatus
import utils
# we use lots of camelCase-style names. Deal with it.
# pylint: disable=C0103
class AdobeInstallProgressMonitor(object):
"""A class to monitor installs/removals of Adobe products.
Finds the currently active installation log and scrapes data out of it.
Installations that install a product and updates may actually create
multiple logs."""
def __init__(self, kind='CS5', operation='install'):
'''Provide some hints as to what type of installer is running and
whether we are installing or removing'''
self.kind = kind
self.operation = operation
self.payload_count = {}
def get_current_log(self):
'''Returns the current Adobe install log'''
logpath = '/Library/Logs/Adobe/Installers'
# find the most recently-modified log file
proc = subprocess.Popen(['/bin/ls', '-t1', logpath],
bufsize=-1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, dummy_err) = proc.communicate()
if output:
firstitem = str(output).splitlines()[0]
if firstitem.endswith(".log"):
# return path of most recently modified log file
return os.path.join(logpath, firstitem)
return None
def info(self):
'''Returns the number of completed Adobe payloads,
and the AdobeCode of the most recently completed payload.'''
last_adobecode = ""
logfile = self.get_current_log()
if logfile:
if self.kind in ['CS6', 'CS5']:
regex = r'END TIMER :: \[Payload Operation :\{'
elif self.kind in ['CS3', 'CS4']:
if self.operation == 'install':
regex = r'Closed PCD cache session payload with ID'
else:
regex = r'Closed CAPS session for removal of payload'
else:
if self.operation == 'install':
regex = r'Completing installation for payload at '
else:
regex = r'Physical payload uninstall result '
cmd = ['/usr/bin/grep', '-E', regex, logfile]
proc = subprocess.Popen(cmd, bufsize=-1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, dummy_err) = proc.communicate()
if output:
lines = str(output).splitlines()
completed_payloads = len(lines)
if (not logfile in self.payload_count
or completed_payloads > self.payload_count[logfile]):
# record number of completed payloads
self.payload_count[logfile] = completed_payloads
# now try to get the AdobeCode of the most recently
# completed payload.
# this isn't 100% accurate, but it's mostly for show
# anyway...
regex = re.compile(r'[^{]*(\{[A-Fa-f0-9-]+\})')
lines.reverse()
for line in lines:
m = regex.match(line)
try:
last_adobecode = m.group(1)
break
except (IndexError, AttributeError):
pass
total_completed_payloads = 0
for key in self.payload_count.keys():
total_completed_payloads += self.payload_count[key]
return (total_completed_payloads, last_adobecode)
# dmg helper
# we need this instead of the one in munkicommon because the Adobe stuff
# needs the dmgs mounted under /Volumes. We can merge this later (or not).
def mountAdobeDmg(dmgpath):
"""
Attempts to mount the dmg at dmgpath
and returns a list of mountpoints
"""
mountpoints = []
dmgname = os.path.basename(dmgpath)
proc = subprocess.Popen(['/usr/bin/hdiutil', 'attach', dmgpath,
'-nobrowse', '-noverify', '-plist'],
bufsize=-1,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(pliststr, err) = proc.communicate()
if err:
munkicommon.display_error('Error %s mounting %s.' % (err, dmgname))
if pliststr:
plist = FoundationPlist.readPlistFromString(pliststr)
for entity in plist['system-entities']:
if 'mount-point' in entity:
mountpoints.append(entity['mount-point'])
return mountpoints
def getCS5uninstallXML(optionXMLfile):
'''Gets the uninstall deployment data from a CS5 installer'''
xml = ''
dom = minidom.parse(optionXMLfile)
DeploymentInfo = dom.getElementsByTagName('DeploymentInfo')
if DeploymentInfo:
for info_item in DeploymentInfo:
DeploymentUninstall = info_item.getElementsByTagName(
'DeploymentUninstall')
if DeploymentUninstall:
deploymentData = DeploymentUninstall[0].getElementsByTagName(
'Deployment')
if deploymentData:
Deployment = deploymentData[0]
xml += Deployment.toxml('UTF-8')
return xml
def getCS5mediaSignature(dirpath):
'''Returns the CS5 mediaSignature for an AAMEE CS5 install.
dirpath is typically the root of a mounted dmg'''
payloads_dir = ""
# look for a payloads folder
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith('/payloads'):
payloads_dir = path
# return empty-handed if we didn't find a payloads folder
if not payloads_dir:
return ''
# now look for setup.xml
setupxml = os.path.join(payloads_dir, 'Setup.xml')
if os.path.exists(setupxml) and os.path.isfile(setupxml):
# parse the XML
dom = minidom.parse(setupxml)
setupElements = dom.getElementsByTagName('Setup')
if setupElements:
mediaSignatureElements = \
setupElements[0].getElementsByTagName('mediaSignature')
if mediaSignatureElements:
element = mediaSignatureElements[0]
elementvalue = ''
for node in element.childNodes:
elementvalue += node.nodeValue
return elementvalue
return ""
def getPayloadInfo(dirpath):
'''Parses Adobe payloads, pulling out info useful to munki.
.proxy.xml files are used if available, or for CC-era updates
which do not contain one, the Media_db.db file, which contains
identical XML, is instead used.
CS3/CS4: contain only .proxy.xml
CS5/CS5.5/CS6: contain both
CC: contain only Media_db.db'''
payloadinfo = {}
# look for .proxy.xml file dir
if os.path.isdir(dirpath):
proxy_paths = glob(os.path.join(dirpath, '*.proxy.xml'))
if proxy_paths:
xmlpath = proxy_paths[0]
dom = minidom.parse(xmlpath)
# if there's no .proxy.xml we should hope there's a Media_db.db
else:
db_path = os.path.join(dirpath, 'Media_db.db')
if os.path.exists(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.execute("SELECT value FROM PayloadData WHERE "
"PayloadData.key = 'PayloadInfo'")
result = cur.fetchone()
cur.close()
if result:
info_xml = result[0].encode('UTF-8')
dom = minidom.parseString(info_xml)
else:
# no xml, no db, no payload info!
return payloadinfo
payload_info = dom.getElementsByTagName('PayloadInfo')
if payload_info:
installer_properties = payload_info[0].getElementsByTagName(
'InstallerProperties')
if installer_properties:
properties = installer_properties[0].getElementsByTagName(
'Property')
for prop in properties:
if 'name' in prop.attributes.keys():
propname = prop.attributes['name'].value.encode('UTF-8')
propvalue = ''
for node in prop.childNodes:
propvalue += node.nodeValue
if propname == 'AdobeCode':
payloadinfo['AdobeCode'] = propvalue
if propname == 'ProductName':
payloadinfo['display_name'] = propvalue
if propname == 'ProductVersion':
payloadinfo['version'] = propvalue
installmetadata = payload_info[0].getElementsByTagName(
'InstallDestinationMetadata')
if installmetadata:
totalsizes = installmetadata[0].getElementsByTagName(
'TotalSize')
if totalsizes:
installsize = ''
for node in totalsizes[0].childNodes:
installsize += node.nodeValue
payloadinfo['installed_size'] = int(installsize)/1024
return payloadinfo
def getAdobeSetupInfo(installroot):
'''Given the root of mounted Adobe DMG,
look for info about the installer or updater'''
info = {}
payloads = []
# look for all the payloads folders
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith('/payloads'):
driverfolder = ''
mediaSignature = ''
setupxml = os.path.join(path, 'setup.xml')
if os.path.exists(setupxml):
dom = minidom.parse(setupxml)
drivers = dom.getElementsByTagName('Driver')
if drivers:
driver = drivers[0]
if 'folder' in driver.attributes.keys():
driverfolder = driver.attributes[
'folder'].value.encode('UTF-8')
if driverfolder == '':
# look for mediaSignature (CS5 AAMEE install)
setupElements = dom.getElementsByTagName('Setup')
if setupElements:
mediaSignatureElements = setupElements[
0].getElementsByTagName('mediaSignature')
if mediaSignatureElements:
element = mediaSignatureElements[0]
for node in element.childNodes:
mediaSignature += node.nodeValue
for item in munkicommon.listdir(path):
payloadpath = os.path.join(path, item)
payloadinfo = getPayloadInfo(payloadpath)
if payloadinfo:
payloads.append(payloadinfo)
if ((driverfolder and item == driverfolder) or
(mediaSignature and
payloadinfo['AdobeCode'] == mediaSignature)):
info['display_name'] = payloadinfo['display_name']
info['version'] = payloadinfo['version']
info['AdobeSetupType'] = 'ProductInstall'
if not payloads:
# look for an extensions folder; almost certainly this is an Updater
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith("/extensions"):
for item in munkicommon.listdir(path):
#skip LanguagePacks
if item.find("LanguagePack") == -1:
itempath = os.path.join(path, item)
payloadinfo = getPayloadInfo(itempath)
if payloadinfo:
payloads.append(payloadinfo)
# we found an extensions dir,
# so no need to keep walking the install root
break
if payloads:
if len(payloads) == 1:
info['display_name'] = payloads[0]['display_name']
info['version'] = payloads[0]['version']
else:
if not 'display_name' in info:
info['display_name'] = "ADMIN: choose from payloads"
if not 'version' in info:
info['version'] = "ADMIN please set me"
info['payloads'] = payloads
installed_size = 0
for payload in payloads:
installed_size = installed_size + payload.get('installed_size', 0)
info['installed_size'] = installed_size
return info
def getAdobePackageInfo(installroot):
'''Gets the package name from the AdobeUberInstaller.xml file;
other info from the payloads folder'''
info = getAdobeSetupInfo(installroot)
info['description'] = ""
installerxml = os.path.join(installroot, "AdobeUberInstaller.xml")
if os.path.exists(installerxml):
description = ''
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
packagedescriptions = \
installinfo[0].getElementsByTagName("PackageDescription")
if packagedescriptions:
prop = packagedescriptions[0]
for node in prop.childNodes:
description += node.nodeValue
if description:
description_parts = description.split(' : ', 1)
info['display_name'] = description_parts[0]
if len(description_parts) > 1:
info['description'] = description_parts[1]
else:
info['description'] = ""
return info
else:
installerxml = os.path.join(installroot, "optionXML.xml")
if os.path.exists(installerxml):
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
pkgname_elems = installinfo[0].getElementsByTagName(
"PackageName")
if pkgname_elems:
prop = pkgname_elems[0]
pkgname = ""
for node in prop.childNodes:
pkgname += node.nodeValue
info['display_name'] = pkgname
if not info.get('display_name'):
info['display_name'] = os.path.basename(installroot)
return info
def getXMLtextElement(dom_node, name):
'''Returns the text value of the first item found with the given
tagname'''
value = None
subelements = dom_node.getElementsByTagName(name)
if subelements:
value = ''
for node in subelements[0].childNodes:
value += node.nodeValue
return value
def parseOptionXML(option_xml_file):
'''Parses an optionXML.xml file and pulls ot items of interest, returning
them in a dictionary'''
info = {}
dom = minidom.parse(option_xml_file)
installinfo = dom.getElementsByTagName('InstallInfo')
if installinfo:
if 'id' in installinfo[0].attributes.keys():
info['packager_id'] = installinfo[0].attributes['id'].value
if 'version' in installinfo[0].attributes.keys():
info['packager_version'] = installinfo[
0].attributes['version'].value
info['package_name'] = getXMLtextElement(installinfo[0], 'PackageName')
info['package_id'] = getXMLtextElement(installinfo[0], 'PackageID')
info['products'] = []
medias_elements = installinfo[0].getElementsByTagName('Medias')
if medias_elements:
media_elements = medias_elements[0].getElementsByTagName('Media')
if media_elements:
for media in media_elements:
product = {}
product['prodName'] = getXMLtextElement(media, 'prodName')
product['prodVersion'] = getXMLtextElement(
media, 'prodVersion')
setup_elements = media.getElementsByTagName('Setup')
if setup_elements:
mediaSignatureElements = setup_elements[
0].getElementsByTagName('mediaSignature')
if mediaSignatureElements:
product['mediaSignature'] = ''
element = mediaSignatureElements[0]
for node in element.childNodes:
product['mediaSignature'] += node.nodeValue
info['products'].append(product)
return info
def countPayloads(dirpath):
'''Attempts to count the payloads in the Adobe installation item'''
count = 0
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("/payloads"):
for subitem in munkicommon.listdir(path):
subitempath = os.path.join(path, subitem)
if os.path.isdir(subitempath):
count = count + 1
return count
def getPercent(current, maximum):
'''Returns a value useful with MunkiStatus to use when
displaying precent-done stauts'''
if maximum == 0:
percentdone = -1
elif current < 0:
percentdone = -1
elif current > maximum:
percentdone = -1
elif current == maximum:
percentdone = 100
else:
percentdone = int(float(current)/float(maximum)*100)
return percentdone
def findSetupApp(dirpath):
'''Search dirpath and enclosed directories for Setup.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Setup.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Setup")
if os.path.exists(setup_path):
return setup_path
return ''
def findInstallApp(dirpath):
'''Searches dirpath and enclosed directories for Install.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Install.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Install")
if os.path.exists(setup_path):
return setup_path
return ''
def findAdobePatchInstallerApp(dirpath):
'''Searches dirpath and enclosed directories for AdobePatchInstaller.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("AdobePatchInstaller.app"):
setup_path = os.path.join(
path, "Contents", "MacOS", "AdobePatchInstaller")
if os.path.exists(setup_path):
return setup_path
return ''
def findAdobeDeploymentManager(dirpath):
'''Searches dirpath and enclosed directories for AdobeDeploymentManager.
Returns path to the executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("pkg/Contents/Resources"):
dm_path = os.path.join(path, "AdobeDeploymentManager")
if os.path.exists(dm_path):
return dm_path
return ''
secondsToLive = {}
def killStupidProcesses():
'''A nasty bit of hackery to get Adobe CS5 AAMEE packages to install
when at the loginwindow.'''
stupid_processes = ["Adobe AIR Installer",
"Adobe AIR Application Installer",
"InstallAdobeHelp",
"open -a /Library/Application Support/Adobe/"
"SwitchBoard/SwitchBoard.app",
"/bin/bash /Library/Application Support/Adobe/"
"SwitchBoard/SwitchBoard.app/Contents/MacOS/"
"switchboard.sh"]
for procname in stupid_processes:
pid = utils.getPIDforProcessName(procname)
if pid:
if not pid in secondsToLive:
secondsToLive[pid] = 30
else:
secondsToLive[pid] = secondsToLive[pid] - 1
if secondsToLive[pid] == 0:
# it's been running too long; kill it
munkicommon.log("Killing PID %s: %s" % (pid, procname))
try:
os.kill(int(pid), 9)
except OSError:
pass
# remove this PID from our list
del secondsToLive[pid]
# only kill one process per invocation
return
def runAdobeInstallTool(
cmd, number_of_payloads=0, killAdobeAIR=False, payloads=None,
kind="CS5", operation="install"):
'''An abstraction of the tasks for running Adobe Setup,
AdobeUberInstaller, AdobeUberUninstaller, AdobeDeploymentManager, etc'''
# initialize an AdobeInstallProgressMonitor object.
progress_monitor = AdobeInstallProgressMonitor(
kind=kind, operation=operation)
if munkicommon.munkistatusoutput and not number_of_payloads:
# indeterminate progress bar
munkistatus.percent(-1)
proc = subprocess.Popen(cmd, shell=False, bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
old_payload_completed_count = 0
payloadname = ""
while proc.poll() == None:
time.sleep(1)
(payload_completed_count, adobe_code) = progress_monitor.info()
if payload_completed_count > old_payload_completed_count:
old_payload_completed_count = payload_completed_count
if adobe_code and payloads:
matched_payloads = [payload for payload in payloads
if payload.get('AdobeCode') == adobe_code]
if matched_payloads:
payloadname = matched_payloads[0].get('display_name')
else:
payloadname = adobe_code
payloadinfo = " - " + payloadname
else:
payloadinfo = ""
if number_of_payloads:
munkicommon.display_status_minor(
'Completed payload %s of %s%s' %
(payload_completed_count, number_of_payloads,
payloadinfo))
else:
munkicommon.display_status_minor(
'Completed payload %s%s',
payload_completed_count, payloadinfo)
if munkicommon.munkistatusoutput:
munkistatus.percent(
getPercent(payload_completed_count, number_of_payloads))
# Adobe AIR Installer workaround/hack
# CSx installs at the loginwindow hang when Adobe AIR is installed.
# So we check for this and kill the process. Ugly.
# Hopefully we can disable this in the future.
if killAdobeAIR:
if (not munkicommon.getconsoleuser() or
munkicommon.getconsoleuser() == u"loginwindow"):
# we're at the loginwindow.
killStupidProcesses()
# run of tool completed
retcode = proc.poll()
#check output for errors
output = proc.stdout.readlines()
for line in output:
line = line.rstrip("\n")
if line.startswith("Error"):
munkicommon.display_error(line)
if line.startswith("Exit Code:"):
if retcode == 0:
try:
retcode = int(line[11:])
except (ValueError, TypeError):
retcode = -1
if retcode != 0 and retcode != 8:
munkicommon.display_error(
'Adobe Setup error: %s: %s', retcode, adobeSetupError(retcode))
else:
if munkicommon.munkistatusoutput:
munkistatus.percent(100)
munkicommon.display_status_minor('Done.')
return retcode
def runAdobeSetup(dmgpath, uninstalling=False, payloads=None):
'''Runs the Adobe setup tool in silent mode from
an Adobe update DMG or an Adobe CS3 install DMG'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if mountpoints:
setup_path = findSetupApp(mountpoints[0])
if setup_path:
# look for install.xml or uninstall.xml at root
deploymentfile = None
installxml = os.path.join(mountpoints[0], "install.xml")
uninstallxml = os.path.join(mountpoints[0], "uninstall.xml")
if uninstalling:
operation = 'uninstall'
if os.path.exists(uninstallxml):
deploymentfile = uninstallxml
else:
# we've been asked to uninstall,
# but found no uninstall.xml
# so we need to bail
munkicommon.unmountdmg(mountpoints[0])
munkicommon.display_error(
'%s doesn\'t appear to contain uninstall info.',
os.path.basename(dmgpath))
return -1
else:
operation = 'install'
if os.path.exists(installxml):
deploymentfile = installxml
# try to find and count the number of payloads
# so we can give a rough progress indicator
number_of_payloads = countPayloads(mountpoints[0])
munkicommon.display_status_minor('Running Adobe Setup')
adobe_setup = [setup_path, '--mode=silent', '--skipProcessCheck=1']
if deploymentfile:
adobe_setup.append('--deploymentFile=%s' % deploymentfile)
retcode = runAdobeInstallTool(
adobe_setup, number_of_payloads, payloads=payloads,
kind='CS3', operation=operation)
else:
munkicommon.display_error(
'%s doesn\'t appear to contain Adobe Setup.' %
os.path.basename(dmgpath))
retcode = -1
munkicommon.unmountdmg(mountpoints[0])
return retcode
else:
munkicommon.display_error('No mountable filesystems on %s' % dmgpath)
return -1
def writefile(stringdata, path):
'''Writes string data to path.
Returns the path on success, empty string on failure.'''
try:
fileobject = open(path, mode='w', buffering=1)
print >> fileobject, stringdata.encode('UTF-8')
fileobject.close()
return path
except (OSError, IOError):
munkicommon.display_error("Couldn't write %s" % stringdata)
return ""
def doAdobeCS5Uninstall(adobeInstallInfo, payloads=None):
'''Runs the locally-installed Adobe CS5 tools to remove CS5 products.
We need the uninstallxml and the CS5 Setup.app.'''
uninstallxml = adobeInstallInfo.get('uninstallxml')
if not uninstallxml:
munkicommon.display_error("No uninstall.xml in adobe_install_info")
return -1
payloadcount = adobeInstallInfo.get('payload_count', 0)
path = os.path.join(munkicommon.tmpdir(), "uninstall.xml")
deploymentFile = writefile(uninstallxml, path)
if not deploymentFile:
return -1
setupapp = "/Library/Application Support/Adobe/OOBE/PDApp/DWA/Setup.app"
setup = os.path.join(setupapp, "Contents/MacOS/Setup")
if not os.path.exists(setup):
munkicommon.display_error("%s is not installed." % setupapp)
return -1
uninstall_cmd = [setup,
'--mode=silent',
'--action=uninstall',
'--skipProcessCheck=1',
'--deploymentFile=%s' % deploymentFile]
munkicommon.display_status_minor('Running Adobe Uninstall')
return runAdobeInstallTool(uninstall_cmd, payloadcount, payloads=payloads,
kind='CS5', operation='uninstall')
def runAdobeCCPpkgScript(dmgpath, payloads=None, operation='install'):
'''Installs or removes an Adobe product packaged via
Creative Cloud Packager'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if not mountpoints:
munkicommon.display_error("No mountable filesystems on %s" % dmgpath)
return -1
deploymentmanager = findAdobeDeploymentManager(mountpoints[0])
if not deploymentmanager:
munkicommon.display_error(
'%s doesn\'t appear to contain AdobeDeploymentManager',
os.path.basename(dmgpath))
munkicommon.unmountdmg(mountpoints[0])
return -1
# big hack to convince the Adobe tools to install off a mounted
# disk image.
#
# For some reason, some versions of the Adobe install tools refuse to
# install when the payloads are on a "removable" disk,
# which includes mounted disk images.
#
# we create a temporary directory on the local disk and then symlink
# some resources from the mounted disk image to the temporary
# directory. When we pass this temporary directory to the Adobe
# installation tools, they are now happy.
basepath = os.path.dirname(deploymentmanager)
preinstall_script = os.path.join(basepath, "preinstall")
if not os.path.exists(preinstall_script):
if operation == 'install':
munkicommon.display_error(
"No Adobe install script found on %s" % dmgpath)
else:
munkicommon.display_error(
"No Adobe uninstall script found on %s" % dmgpath)
munkicommon.unmountdmg(mountpoints[0])
return -1
number_of_payloads = countPayloads(basepath)
tmpdir = tempfile.mkdtemp(prefix='munki-', dir='/tmp')
# make our symlinks
for dir_name in ['ASU' 'ASU2', 'ProvisioningTool', 'uninstallinfo']:
if os.path.isdir(os.path.join(basepath, dir_name)):
os.symlink(os.path.join(basepath, dir_name),
os.path.join(tmpdir, dir_name))
for dir_name in ['Patches', 'Setup']:
realdir = os.path.join(basepath, dir_name)
if os.path.isdir(realdir):
tmpsubdir = os.path.join(tmpdir, dir_name)
os.mkdir(tmpsubdir)
for item in munkicommon.listdir(realdir):
os.symlink(os.path.join(realdir, item),
os.path.join(tmpsubdir, item))
os_version_tuple = munkicommon.getOsVersion(as_tuple=True)
if (os_version_tuple < (10, 11) and
(not munkicommon.getconsoleuser() or
munkicommon.getconsoleuser() == u"loginwindow")):
# we're at the loginwindow, so we need to run the deployment
# manager in the loginwindow context using launchctl bsexec
# launchctl bsexec doesn't work for this in El Cap, so do it
# only if we're running Yosemite or earlier
loginwindowPID = utils.getPIDforProcessName("loginwindow")
cmd = ['/bin/launchctl', 'bsexec', loginwindowPID]
else:
cmd = []
# preinstall script is in pkg/Contents/Resources, so calculate
# path to pkg
pkg_dir = os.path.dirname(os.path.dirname(basepath))
cmd.extend([preinstall_script, pkg_dir, '/', '/'])
if operation == 'install':
munkicommon.display_status_minor('Starting Adobe installer...')
retcode = runAdobeInstallTool(
cmd, number_of_payloads, killAdobeAIR=True, payloads=payloads,
kind='CS6', operation=operation)
# now clean up and return
dummy_result = subprocess.call(["/bin/rm", "-rf", tmpdir])
munkicommon.unmountdmg(mountpoints[0])
return retcode
def runAdobeCS5AAMEEInstall(dmgpath, payloads=None):
'''Installs a CS5 product using an AAMEE-generated package on a
disk image.'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if not mountpoints:
munkicommon.display_error("No mountable filesystems on %s" % dmgpath)
return -1
deploymentmanager = findAdobeDeploymentManager(mountpoints[0])
if deploymentmanager:
# big hack to convince the Adobe tools to install off a mounted
# disk image.
#
# For some reason, some versions of the Adobe install tools refuse to
# install when the payloads are on a "removable" disk,
# which includes mounted disk images.
#
# we create a temporary directory on the local disk and then symlink
# some resources from the mounted disk image to the temporary
# directory. When we pass this temporary directory to the Adobe
# installation tools, they are now happy.
basepath = os.path.dirname(deploymentmanager)
number_of_payloads = countPayloads(basepath)
tmpdir = tempfile.mkdtemp(prefix='munki-', dir='/tmp')
# make our symlinks
os.symlink(os.path.join(basepath, "ASU"), os.path.join(tmpdir, "ASU"))
os.symlink(os.path.join(basepath, "ProvisioningTool"),
os.path.join(tmpdir, "ProvisioningTool"))
for dir_name in ['Patches', 'Setup']:
realdir = os.path.join(basepath, dir_name)
if os.path.isdir(realdir):
tmpsubdir = os.path.join(tmpdir, dir_name)
os.mkdir(tmpsubdir)
for item in munkicommon.listdir(realdir):
os.symlink(
os.path.join(realdir, item),
os.path.join(tmpsubdir, item))
optionXMLfile = os.path.join(basepath, "optionXML.xml")
os_version_tuple = munkicommon.getOsVersion(as_tuple=True)
if (os_version_tuple < (10, 11) and
(not munkicommon.getconsoleuser() or
munkicommon.getconsoleuser() == u"loginwindow")):
# we're at the loginwindow, so we need to run the deployment
# manager in the loginwindow context using launchctl bsexec
# launchctl bsexec doesn't work for this in El Cap, so do it
# only if we're running Yosemite or earlier
loginwindowPID = utils.getPIDforProcessName("loginwindow")
cmd = ['/bin/launchctl', 'bsexec', loginwindowPID]
else:
cmd = []
cmd.extend([deploymentmanager, '--optXMLPath=%s' % optionXMLfile,
'--setupBasePath=%s' % basepath, '--installDirPath=/',
'--mode=install'])
munkicommon.display_status_minor('Starting Adobe installer...')
retcode = runAdobeInstallTool(
cmd, number_of_payloads, killAdobeAIR=True, payloads=payloads,
kind='CS5', operation='install')
# now clean up our symlink hackfest
dummy_result = subprocess.call(["/bin/rm", "-rf", tmpdir])
else:
munkicommon.display_error(
'%s doesn\'t appear to contain AdobeDeploymentManager',
os.path.basename(dmgpath))
retcode = -1
munkicommon.unmountdmg(mountpoints[0])
return retcode
def runAdobeCS5PatchInstaller(dmgpath, copylocal=False, payloads=None):
'''Runs the AdobePatchInstaller for CS5.
Optionally can copy the DMG contents to the local disk
to work around issues with the patcher.'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if mountpoints:
if copylocal:
# copy the update to the local disk before installing
updatedir = tempfile.mkdtemp(prefix='munki-', dir='/tmp')
retcode = subprocess.call(
["/bin/cp", "-r", mountpoints[0], updatedir])
# unmount diskimage
munkicommon.unmountdmg(mountpoints[0])
if retcode:
munkicommon.display_error(
'Error copying items from %s' % dmgpath)
return -1
# remove the dmg file to free up space, since we don't need it
# any longer
dummy_result = subprocess.call(["/bin/rm", dmgpath])
else:
updatedir = mountpoints[0]
patchinstaller = findAdobePatchInstallerApp(updatedir)
if patchinstaller:
# try to find and count the number of payloads
# so we can give a rough progress indicator
number_of_payloads = countPayloads(updatedir)
munkicommon.display_status_minor('Running Adobe Patch Installer')
install_cmd = [patchinstaller,
'--mode=silent',
'--skipProcessCheck=1']
retcode = runAdobeInstallTool(install_cmd,
number_of_payloads, payloads=payloads,
kind='CS5', operation='install')
else:
munkicommon.display_error(
"%s doesn't appear to contain AdobePatchInstaller.app.",
os.path.basename(dmgpath))
retcode = -1
if copylocal:
# clean up our mess
dummy_result = subprocess.call(["/bin/rm", "-rf", updatedir])
else:
munkicommon.unmountdmg(mountpoints[0])
return retcode
else:
munkicommon.display_error('No mountable filesystems on %s' % dmgpath)
return -1
def runAdobeUberTool(dmgpath, pkgname='', uninstalling=False, payloads=None):
'''Runs either AdobeUberInstaller or AdobeUberUninstaller
from a disk image and provides progress feedback.
pkgname is the name of a directory at the top level of the dmg
containing the AdobeUber tools and their XML files.'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if mountpoints:
installroot = mountpoints[0]
if uninstalling:
ubertool = os.path.join(installroot, pkgname,
"AdobeUberUninstaller")
else:
ubertool = os.path.join(installroot, pkgname,
"AdobeUberInstaller")
if os.path.exists(ubertool):
info = getAdobePackageInfo(installroot)
packagename = info['display_name']
action = "Installing"
operation = "install"
if uninstalling:
action = "Uninstalling"
operation = "uninstall"
munkicommon.display_status_major('%s %s' % (action, packagename))
if munkicommon.munkistatusoutput:
munkistatus.detail('Starting %s' % os.path.basename(ubertool))
# try to find and count the number of payloads
# so we can give a rough progress indicator
number_of_payloads = countPayloads(installroot)
retcode = runAdobeInstallTool(
[ubertool], number_of_payloads, killAdobeAIR=True,
payloads=payloads, kind='CS4', operation=operation)
else:
munkicommon.display_error("No %s found" % ubertool)
retcode = -1
munkicommon.unmountdmg(installroot)
return retcode
else:
munkicommon.display_error("No mountable filesystems on %s" % dmgpath)
return -1
def findAcrobatPatchApp(dirpath):
'''Attempts to find an AcrobatPro patching application
in dirpath. If found, returns the path to the bundled
patching script.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith(".app"):
# look for Adobe's patching script
patch_script_path = os.path.join(
path, 'Contents', 'Resources', 'ApplyOperation.py')
if os.path.exists(patch_script_path):
return path
return ''
def updateAcrobatPro(dmgpath):
"""Uses the scripts and Resources inside the Acrobat Patch application
bundle to silently update Acrobat Pro and related apps
Why oh why does this use a different mechanism than the other Adobe
apps?"""
if munkicommon.munkistatusoutput:
munkistatus.percent(-1)
#first mount the dmg
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if mountpoints:
installroot = mountpoints[0]
pathToAcrobatPatchApp = findAcrobatPatchApp(installroot)
else:
munkicommon.display_error("No mountable filesystems on %s" % dmgpath)
return -1
if not pathToAcrobatPatchApp:
munkicommon.display_error(
'No Acrobat Patch app at %s', pathToAcrobatPatchApp)
munkicommon.unmountdmg(installroot)
return -1
# some values needed by the patching script
resourcesDir = os.path.join(
pathToAcrobatPatchApp, 'Contents', 'Resources')
ApplyOperation = os.path.join(resourcesDir, 'ApplyOperation.py')
callingScriptPath = os.path.join(resourcesDir, 'InstallUpdates.sh')
appList = []
appListFile = os.path.join(resourcesDir, 'app_list.txt')
if os.path.exists(appListFile):
fileobj = open(appListFile, mode='r', buffering=-1)
if fileobj:
for line in fileobj.readlines():
appList.append(line)
fileobj.close()
if not appList:
munkicommon.display_error('Did not find a list of apps to update.')
munkicommon.unmountdmg(installroot)
return -1
payloadNum = -1
for line in appList:
payloadNum = payloadNum + 1
if munkicommon.munkistatusoutput:
munkistatus.percent(getPercent(payloadNum + 1, len(appList) + 1))
(appname, status) = line.split("\t")
munkicommon.display_status_minor('Searching for %s' % appname)
# first look in the obvious place
pathname = os.path.join("/Applications/Adobe Acrobat 9 Pro", appname)
if os.path.exists(pathname):
item = {}
item['path'] = pathname
candidates = [item]
else:
# use system_profiler to search for the app
candidates = [item for item in munkicommon.getAppData()
if item['path'].endswith('/' + appname)]
# hope there's only one!
if len(candidates) == 0:
if status == "optional":
continue
else:
munkicommon.display_error("Cannot patch %s because it "
"was not found on the startup "
"disk." % appname)
munkicommon.unmountdmg(installroot)
return -1
if len(candidates) > 1:
munkicommon.display_error("Cannot patch %s because we found "
"more than one copy on the "
"startup disk." % appname)
munkicommon.unmountdmg(installroot)
return -1
munkicommon.display_status_minor('Updating %s' % appname)
apppath = os.path.dirname(candidates[0]["path"])
cmd = [ApplyOperation, apppath, appname, resourcesDir,
callingScriptPath, str(payloadNum)]
proc = subprocess.Popen(cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while proc.poll() == None:
time.sleep(1)
# run of patch tool completed
retcode = proc.poll()
if retcode != 0:
munkicommon.display_error(
'Error patching %s: %s', appname, retcode)
break
else:
munkicommon.display_status_minor('Patching %s complete.', appname)
munkicommon.display_status_minor('Done.')
if munkicommon.munkistatusoutput:
munkistatus.percent(100)
munkicommon.unmountdmg(installroot)
return retcode
def getBundleInfo(path):
"""
Returns Info.plist data if available
for bundle at path
"""
infopath = os.path.join(path, "Contents", "Info.plist")
if not os.path.exists(infopath):
infopath = os.path.join(path, "Resources", "Info.plist")
if os.path.exists(infopath):
try:
plist = FoundationPlist.readPlist(infopath)
return plist
except FoundationPlist.NSPropertyListSerializationException:
pass
return None
def getAdobeInstallInfo(installdir):
'''Encapsulates info used by the Adobe Setup/Install app.'''
adobeInstallInfo = {}
if installdir:
adobeInstallInfo['media_signature'] = getCS5mediaSignature(installdir)
adobeInstallInfo['payload_count'] = countPayloads(installdir)
optionXMLfile = os.path.join(installdir, "optionXML.xml")
if os.path.exists(optionXMLfile):
adobeInstallInfo['uninstallxml'] = \
getCS5uninstallXML(optionXMLfile)
return adobeInstallInfo
def getAdobeCatalogInfo(mountpoint, pkgname=""):
'''Used by makepkginfo to build pkginfo data for Adobe
installers/updaters'''
# look for AdobeDeploymentManager (AAMEE installer)
deploymentmanager = findAdobeDeploymentManager(mountpoint)
if deploymentmanager:
dirpath = os.path.dirname(deploymentmanager)
option_xml_file = os.path.join(dirpath, 'optionXML.xml')
option_xml_info = {}
if os.path.exists(option_xml_file):
option_xml_info = parseOptionXML(option_xml_file)
cataloginfo = getAdobePackageInfo(dirpath)
if cataloginfo:
# add some more data
if option_xml_info.get('packager_id') == u'CloudPackager':
# CCP package
cataloginfo['display_name'] = option_xml_info.get(
'package_name', 'unknown')
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCCPUninstaller"
cataloginfo['installer_type'] = "AdobeCCPInstaller"
cataloginfo['minimum_os_version'] = "10.6.8"
mediasignatures = [
item['mediaSignature']
for item in option_xml_info.get('products', [])
if 'mediaSignature' in item]
else:
# AAMEE package
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCS5AAMEEPackage"
cataloginfo['installer_type'] = "AdobeCS5AAMEEPackage"
cataloginfo['minimum_os_version'] = "10.5.0"
cataloginfo['adobe_install_info'] = getAdobeInstallInfo(
installdir=dirpath)
mediasignature = cataloginfo['adobe_install_info'].get(
"media_signature")
mediasignatures = [mediasignature]
if mediasignatures:
# make a default <key>installs</key> array
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
installs = []
for mediasignature in mediasignatures:
signaturefile = mediasignature + ".db"
filepath = os.path.join(uninstalldir, signaturefile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
cataloginfo['installs'] = installs
return cataloginfo
# Look for Install.app (Bare metal CS5 install)
# we don't handle this type, but we'll report it
# back so makepkginfo can provide an error message
installapp = findInstallApp(mountpoint)
if installapp:
cataloginfo = {}
cataloginfo['installer_type'] = "AdobeCS5Installer"
return cataloginfo
# Look for AdobePatchInstaller.app (CS5 updater)
installapp = findAdobePatchInstallerApp(mountpoint)
if os.path.exists(installapp):
# this is a CS5 updater disk image
cataloginfo = getAdobePackageInfo(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = False
cataloginfo['installer_type'] = "AdobeCS5PatchInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
# make some (hopfully functional) installs items from the payloads
installs = []
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
# first look for a payload with a display_name matching the
# overall display_name
for payload in cataloginfo.get('payloads', []):
if (payload.get('display_name', '') ==
cataloginfo['display_name']):
if 'AdobeCode' in payload:
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
break
if installs == []:
# didn't find a payload with matching name
# just add all of the non-LangPack payloads
# to the installs list.
for payload in cataloginfo.get('payloads', []):
if 'AdobeCode' in payload:
if ("LangPack" in payload.get("display_name") or
"Language Files" in payload.get(
"display_name")):
# skip Language Packs
continue
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
cataloginfo['installs'] = installs
return cataloginfo
# Look for AdobeUberInstaller items (CS4 install)
pkgroot = os.path.join(mountpoint, pkgname)
adobeinstallxml = os.path.join(pkgroot, "AdobeUberInstaller.xml")
if os.path.exists(adobeinstallxml):
# this is a CS4 Enterprise Deployment package
cataloginfo = getAdobePackageInfo(pkgroot)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeUberUninstaller"
cataloginfo['installer_type'] = "AdobeUberInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
return cataloginfo
# maybe this is an Adobe update DMG or CS3 installer
# look for Adobe Setup.app
setuppath = findSetupApp(mountpoint)
if setuppath:
cataloginfo = getAdobeSetupInfo(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['installer_type'] = "AdobeSetup"
if cataloginfo.get('AdobeSetupType') == "ProductInstall":
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeSetup"
else:
cataloginfo['description'] = "Adobe updater"
cataloginfo['uninstallable'] = False
cataloginfo['update_for'] = ["PleaseEditMe-1.0.0.0.0"]
return cataloginfo
# maybe this is an Adobe Acrobat 9 Pro patcher?
acrobatpatcherapp = findAcrobatPatchApp(mountpoint)
if acrobatpatcherapp:
cataloginfo = {}
cataloginfo['installer_type'] = "AdobeAcrobatUpdater"
cataloginfo['uninstallable'] = False
plist = getBundleInfo(acrobatpatcherapp)
cataloginfo['version'] = munkicommon.getVersionString(plist)
cataloginfo['name'] = "AcrobatPro9Update"
cataloginfo['display_name'] = "Adobe Acrobat Pro Update"
cataloginfo['update_for'] = ["AcrobatPro9"]
cataloginfo['RestartAction'] = 'RequireLogout'
cataloginfo['requires'] = []
cataloginfo['installs'] = [
{'CFBundleIdentifier': 'com.adobe.Acrobat.Pro',
'CFBundleName': 'Acrobat',
'CFBundleShortVersionString': cataloginfo['version'],
'path': '/Applications/Adobe Acrobat 9 Pro/Adobe Acrobat Pro.app',
'type': 'application'}
]
return cataloginfo
# didn't find any Adobe installers/updaters we understand
return None
def adobeSetupError(errorcode):
'''Returns text description for numeric error code
Reference:
http://www.adobe.com/devnet/creativesuite/pdfs/DeployGuide.pdf'''
errormessage = {
0: "Application installed successfully",
1: "Unable to parse command line",
2: "Unknown user interface mode specified",
3: "Unable to initialize ExtendScript",
4: "User interface workflow failed",
5: "Unable to initialize user interface workflow",
6: "Silent workflow completed with errors",
7: "Unable to complete the silent workflow",
8: "Exit and restart",
9: "Unsupported operating system version",
10: "Unsupported file system",
11: "Another instance of Adobe Setup is running",
12: "CAPS integrity error",
13: "Media optimization failed",
14: "Failed due to insufficient privileges",
15: "Media DB Sync Failed",
16: "Failed to laod the Deployment file",
17: "EULA Acceptance Failed",
18: "C3PO Bootstrap Failed",
19: "Conflicting processes running",
20: "Install source path not specified or does not exist",
21: "Version of payloads is not supported by this version of RIB",
22: "Install Directory check failed",
23: "System Requirements Check failed",
24: "Exit User Canceled Workflow",
25: "A binary path Name exceeded Operating System's MAX PATH limit",
26: "Media Swap Required in Silent Mode",
27: "Keyed files detected in target",
28: "Base product is not installed",
29: "Base product has been moved",
30: "Insufficient disk space to install the payload + Done with errors",
31: "Insufficient disk space to install the payload + Failed",
32: "The patch is already applied",
9999: "Catastrophic error",
-1: "AdobeUberInstaller failed before launching Setup"}
return errormessage.get(errorcode, "Unknown error")
def doAdobeRemoval(item):
'''Wrapper for all the Adobe removal methods'''
uninstallmethod = item['uninstall_method']
payloads = item.get("payloads")
itempath = ""
if "uninstaller_item" in item:
managedinstallbase = munkicommon.pref('ManagedInstallDir')
itempath = os.path.join(managedinstallbase, 'Cache',
item["uninstaller_item"])
if not os.path.exists(itempath):
munkicommon.display_error("%s package for %s was "
"missing from the cache."
% (uninstallmethod, item['name']))
return -1
if uninstallmethod == "AdobeSetup":
# CS3 uninstall
retcode = runAdobeSetup(itempath, uninstalling=True, payloads=payloads)
elif uninstallmethod == "AdobeUberUninstaller":
# CS4 uninstall
pkgname = item.get("adobe_package_name") or item.get("package_path", "")
retcode = runAdobeUberTool(
itempath, pkgname, uninstalling=True, payloads=payloads)
elif uninstallmethod == "AdobeCS5AAMEEPackage":
# CS5 uninstall. Sheesh. Three releases, three methods.
adobeInstallInfo = item.get('adobe_install_info')
retcode = doAdobeCS5Uninstall(adobeInstallInfo, payloads=payloads)
elif uninstallmethod == "AdobeCCPUninstaller":
# Adobe Creative Cloud Packager packages
retcode = runAdobeCCPpkgScript(
itempath, payloads=payloads, operation="uninstall")
if retcode:
munkicommon.display_error("Uninstall of %s failed.", item['name'])
return retcode
def doAdobeInstall(item):
'''Wrapper to handle all the Adobe installer methods.
First get the path to the installer dmg. We know
it exists because installer.py already checked.'''
managedinstallbase = munkicommon.pref('ManagedInstallDir')
itempath = os.path.join(
managedinstallbase, 'Cache', item['installer_item'])
installer_type = item.get("installer_type", "")
payloads = item.get("payloads")
if installer_type == "AdobeSetup":
# Adobe CS3/CS4 updater or Adobe CS3 installer
retcode = runAdobeSetup(itempath, payloads=payloads)
elif installer_type == "AdobeUberInstaller":
# Adobe CS4 installer
pkgname = item.get("adobe_package_name") or item.get("package_path", "")
retcode = runAdobeUberTool(itempath, pkgname, payloads=payloads)
elif installer_type == "AdobeAcrobatUpdater":
# Acrobat Pro 9 updater
retcode = updateAcrobatPro(itempath)
elif installer_type == "AdobeCS5AAMEEPackage":
# Adobe CS5 AAMEE package
retcode = runAdobeCS5AAMEEInstall(itempath, payloads=payloads)
elif installer_type == "AdobeCS5PatchInstaller":
# Adobe CS5 updater
retcode = runAdobeCS5PatchInstaller(
itempath, copylocal=item.get("copy_local"), payloads=payloads)
elif installer_type == "AdobeCCPInstaller":
# Adobe Creative Cloud Packager packages
retcode = runAdobeCCPpkgScript(itempath, payloads=payloads)
return retcode
def main():
'''Placeholder'''
pass
if __name__ == '__main__':
main()
``` |
{
"source": "jlrickert/clipvault",
"score": 3
} |
#### File: clipvault/cmd/vault.py
```python
import pyperclip
from termcolor import colored, cprint
from rfc3987 import parse
from .cli import AbstractCli
class VaultCli(AbstractCli):
"""Vault description
"""
name = 'vault'
def command(self, args):
key, fromCb = self.__process_key(args.key)
if key and not fromCb:
value = pyperclip.paste()
self.vault[key] = value
else:
value = self.vault.set_password_by_input(key)
pyperclip.copy(value)
text = colored(
'Value for {} has been set and is now in your clipboard!'.format(
key),
'green')
cprint(text)
def _setup_parser(self, parser):
parser.add_argument(
'key', nargs='?', type=str,
help='Key to store contents of clipboard into')
def __process_key(self, key):
fromCb = False
if key is None:
key = pyperclip.paste()
fromCb = True
try:
tmp_key = key
uri = parse(tmp_key)
if uri['authority'] is not None:
key = uri['authority']
except ValueError:
pass
return key, fromCb
def __process_value(self):
return pyperclip.paste(), True
```
#### File: clipvault/clipvault/__main__.py
```python
import argparse
import sys
import pyperclip
from termcolor import colored, cprint
from .cli import Cli
from .vault import vault
def usage():
print('Missing key')
def copy_key(key, timeout=5):
try:
value = vault[key]
pyperclip.copy(value)
text = colored(
'"{} copied to clipboard for {} minutes!"'.format(key, timeout),
'green')
except vault.KeyError:
text = colored(
'"{} has no associated value"'.format(key, timeout),
'red')
cprint(text)
def set_key(key, value):
vault[key] = value
text = colored(
'Value for {} has been set!'.format(key),
'green')
cprint(text)
def main() -> None:
cli = Cli(sys.argv, vault)
return cli.run()
# args = sys.argv
# parser = argparse.ArgumentParser(description='')
# subparsers = parser.add_subparser()
# subparsers.require = True
# subparsers.dest = 'command'
# get_parser = subparsers.add_parser('get')
# set_parser = subparsers.add_parser('set')
# # parser.add_argument('command')
# if len(args) == 2:
# copy_key(args[1])
# elif len(args) >= 3:
# set_key(args[1], args[2])
# else:
# usage()
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "jlrickert/code_repo",
"score": 4
} |
#### File: code_repo/python-snippets/graph.py
```python
class Graph(object):
def __init__(self, graph_dic=None):
self.__graph_dic = dict()
for v, edges in graph_dic.items():
self.add_vertex(v)
for e in edges:
self.add_edge(v, e)
def vertices(self):
return set(self.__graph_dic.keys())
def edges(self):
return self.__generate_edges()
def add_vertex(self, vertex):
if vertex not in self.__graph_dic:
self.__graph_dic[vertex] = set()
def add_edge(self, vertex1, vertex2):
self.add_vertex(vertex1)
self.add_vertex(vertex2)
self.__graph_dic[vertex1].add(vertex2)
self.__graph_dic[vertex2].add(vertex1)
def adj(self, src):
return self.__graph_dic[src]
def __generate_edges(self):
edges = set()
for vertex in self.__graph_dic:
for neighbour in self.__graph_dic[vertex]:
edges.add((vertex, neighbour))
return edges
def __str__(self):
res = "vertices: "
for k in self.__graph_dic:
res += str(k)+" "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge)+" "
return res
def test_graph():
g = {
"a": ["d"],
"b": ["a", "c"],
"c": ["a", "f"],
"d": ["b", "e", "f"],
"e": ["b", "f"],
"f": ["d", "e"],
}
graph = Graph(g)
assert graph.adj("a") == {"b", "c", "d"}
assert graph.adj("b") == {"a", "d", "e", "c"}
assert graph.adj("c") == {"a", "b", "f"}
assert graph.adj("d") == {"a", "b", "e", "f"}
assert graph.adj("e") == {"b", "d", "f"}
assert graph.adj("f") == {"c", "d", "e"}
assert set(graph.edges()) == {
("a", "b"), ("a", "c"), ("a", "d"),
("b", "a"), ("b", "c"), ("b", "d"), ("b", "e"),
("c", "a"), ("c", "b"), ("c", "f"),
("d", "a"), ("d", "b"), ("d", "e"), ("d", "f"),
("e", "b"), ("e", "d"), ("e", "f"),
("f", "c"), ("f", "d"), ("f", "e")
}
``` |
{
"source": "jlrickert/form-monster",
"score": 2
} |
#### File: form-monster/examples/sample.py
```python
import logging
from datetime import datetime, date
from form_monster import Form, Web, WxView
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
class StrField():
def __init__(self, name):
pass
class computedField(object):
def __init__(self, name, field):
pass
form = Form([
(StrField("first_text"), { "optional": True }),
(ComputedField("full_name", StrField), { "optional": False })
])
```
#### File: form-monster/form_monster/test_form.py
```python
from collections import OrderedDict
from datetime import date, datetime, timedelta
import pytest
from .form import Form
from .fields import StrField, DateField, BoolField
days_in_year = int(18.0 * 365.25)
def compute_over_18(birth_date):
if birth_date:
return (datetime.now().date() - birth_date).days >= days_in_year
@pytest.fixture
def first_name():
return StrField("First Name")
@pytest.fixture
def last_name():
return StrField("Last Name", optional=True)
@pytest.fixture
def signature():
return StrField("Signature", validate=lambda name: name == "Jack")
@pytest.fixture
def full_name(first_name, last_name):
print("computing", first_name, last_name)
return StrField(
"Full Name",
dependencies=[first_name, last_name],
compute=lambda first, last: (first or "") + " " + (last or ""))
@pytest.fixture
def date_of_birth():
return DateField("Date of Birth")
@pytest.fixture
def over_18(date_of_birth):
return BoolField(
"Over 18", dependencies=[date_of_birth], compute=compute_over_18)
@pytest.fixture
def example_form(first_name, last_name, signature, full_name, date_of_birth,
over_18):
print('asds')
form = Form(
fields={
"first_name": first_name,
"last_name": last_name,
"signature": signature,
"full_name": full_name,
"date_of_birth": date_of_birth,
"over_18": over_18
})
print('gasdfe')
return form
def compute_full_name(first_name, last_name):
first = first_name or ""
last = ""
if last_name:
last = (" " + last_name)
return first + last
@pytest.fixture
def self_contained_form():
form = Form(
fields={
"first_name": StrField("First Name"),
"last_name": StrField("Last Name", optional=True),
"signature": StrField("signature", validate=lambda name: name == "Jack"),
"full_name": StrField(
"Full Name",
dependencies=["first_name", "last_name"],
compute=compute_full_name),
"date_of_birth": DateField("Date of Birth"),
"over_18": BoolField(
"Over 18",
dependencies=["date_of_birth"],
compute=compute_over_18)
})
return form
@pytest.fixture
def forms(example_form, self_contained_form):
return [example_form, self_contained_form]
class TestValues():
def test_set_value_for_simple_form(self, forms):
for form in forms:
form.set_value("first_name", "jack")
assert form.get_value("first_name") == "jack"
def test_computed(self, full_name, forms):
for form in forms:
assert id(full_name) != id(form.get_field("full_name"))
form.set_value("first_name", "Jack")
form.set_value("last_name", "Rabbit")
assert form.get_value("full_name") == "<NAME>"
class TestValidation():
def test_invalid_from_optional_fields(self, forms):
for form in forms:
assert form.is_valid() is False
def test_invalid(self, forms):
for form in forms:
form.set_value("first_name", "Jack")
form.set_value("signature", "Rawr")
assert form.is_valid() is False
def test_valid(self, forms):
for form in forms:
form.set_value("first_name", "Jack")
form.set_value("signature", "Jack")
dob = (datetime.now() - timedelta(days=(5 + 22 * 365))).date()
form.set_value("date_of_birth", dob)
for k, field in form.get_fields():
if not field.is_valid():
print(k, repr(field), field.get_value())
assert field.is_valid() is True
assert form.is_valid() is True
```
#### File: form-monster/form_monster/test_utils.py
```python
from .utils import UNIX_SHELL_RE, WINDOWS_SHELL_RE
def test_unix_shell_regex():
result = UNIX_SHELL_RE.match("/usr/bin/zsh")
assert result, "should return a value"
result = UNIX_SHELL_RE.match("/usr/bin/powershell")
assert result, "should return a value"
def test_windows_shell_regex():
result = WINDOWS_SHELL_RE.match("/usr/bin/zsh")
assert result is None
``` |
{
"source": "jlricon/pandas.table",
"score": 3
} |
#### File: jlricon/pandas.table/pandastable.py
```python
from pandas import DataFrame
import types
class PandasTable(DataFrame):
"""
We can have dt('a==1')
Then operations on that dt('a==1',sum(species)')
"""
def __call__(self, *args, **kwargs):
return (self._querycheck(args)._groupcheck(args,
kwargs)._colcheck(args))
def _groupcheck(self, args, kwargs):
"""If there is a groupby operation in the arguments, apply it"""
print(kwargs)
if "by" in kwargs:
if self._getfunc(args):
return PandasTable(
self.groupby(kwargs["by"]).apply(self._getfunc(args)))
else:
raise Exception("No function was defined")
return self
def _querycheck(self, args):
"""
If there is a query in the arguments, use it. In any case, return the
dataframe
"""
for arg in args:
if type(arg) == str and arg != 'N':
return PandasTable(self.query(arg))
return self
def _colcheck(self, args):
"""
If there is a column subsetting operation, do it
"""
for arg in args:
if type(arg) == list:
return PandasTable(self.loc[:, arg])
return self
def _getfunc(self, args):
"""
Returns a function, if present in the arguments
"""
for arg in args:
if isinstance(arg, types.FunctionType) or isinstance(
arg, types.BuiltinMethodType) or isinstance(
arg, types.BuiltinFunctionType):
return arg
if arg == 'N':
return len
return None
_N = "N"
``` |
{
"source": "jlries61/SPM_vs_XGBOOST",
"score": 3
} |
#### File: SPM_vs_XGBOOST/Scripts/funcs.py
```python
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import os
import shutil
import matplotlib.pyplot as plt
from scipy import stats
def df_anyobj(df):
anyobj = False
for varname in df.columns:
if df[varname].dtype == "object":
anyobj = True
break
return anyobj
def expandClass(inframe, varlist):
outframe = inframe.copy()
for varname in inframe.columns:
if inframe[varname].dtype == "object":
varlist.add(varname)
for varname in varlist:
values = inframe[varname]
cats = values.unique()
for cat in cats:
dummy = list()
for value in values:
if pd.isnull(cat) and pd.isnull(value):
dummy.append(1)
elif value == cat:
dummy.append(1)
else:
dummy.append(0)
if pd.isnull(cat):
name = varname + "_miss"
else:
name = varname + "_" + str(cat)
outframe[name] = pd.Series(dummy)
outframe.drop(varname, axis=1, inplace=True)
return outframe
def fileparts(path):
folder_ = os.path.dirname(path)
file_ = os.path.basename(path).split('.')[0]
ext_ = os.path.splitext(path)[1]
return folder_, file_, ext_
def fbb(s, n=12, t='l'): # fill by blanks
if n <= len(s):
return s
bl = ''
for i in range(n - len(s)):
bl += ' '
if t == "l":
s = bl+s
if t == 'r':
s = s+bl
return s
def get_dict_row(dict_input, row):
dict_output = {}
for key in dict_input.keys():
dict_output[key] = dict_input[key][row]
return dict_output
def copy_and_delete(source_filename, destination_filename, rem_source=True):
if os.path.isfile(destination_filename):
os.remove(destination_filename)
if os.path.isfile(source_filename):
shutil.copyfile(source_filename, destination_filename)
if rem_source:
os.remove(source_filename)
def create_report(report_folder, models, dataset, note=""):
report = list()
report.append("========= Dataset name: " + dataset['Name'] + " =========\n")
report.append(" - train part size: {}".format(dataset['N_obs_train_sam']) + "\n")
report.append(" - test part size: {}".format(dataset['N_obs_test_sam']) + "\n")
report.append(" - number of features: {}".format(dataset['N_features']) + "\n")
report.append(" - number of classes: {}".format(dataset['N_classes']) + "\n")
report.append("\n")
report.append("\n")
report.append(" --- Time elapsed during training ---\n")
for model in models:
report.append(" - " + model.perf['Name'] + ": {:.3f}".format(model.perf['TrainingTime']) + " seconds\n")
report.append("\n")
report.append("\n")
report.append(" --- Individual AUCs ---\n")
for model in models:
report.append(" - " + model.perf['Name'] + ": {:.3f}".format(model.perf['AUC']) + "\n")
report.append("\n")
report.append("\n")
report.append(" --- Performance for test set ---\n")
for model in models:
report.append("\n")
report.append(" --- " + model.perf['Name'] + " ---\n")
report.append("\n")
report.append(model.perf['StatsTxt'])
if not os.path.isdir(report_folder):
os.mkdir(report_folder)
f_report = open(report_folder + "/report" + note + ".txt", "w")
for line in report:
f_report.writelines(line)
f_report.close()
plt.clf()
for model in models:
plt.plot(1-model.perf['ROC_Specificity'], model.perf['ROC_Sensitivity'], model.perf['Color'], linewidth=2, label="AUC={:6.4f}, ".format(model.perf['AUC']) + model.perf['Name'])
plt.xlabel('False Positive Rate (1-Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.title('ROC (Dataset: ' + dataset['Name'] + ')')
plt.legend(loc=4)
plt.grid(True)
plt.savefig(report_folder + "/roc_curves" + note + ".png")
def get_common_stats(report_folder, models_lists, dataset, note):
n_model = len(models_lists)
n_sam = dataset['N_data_samples']
model_names = list()
for model_list in models_lists:
model_names.append(model_list[0].perf['Name'])
auc = np.zeros((n_sam, n_model))
fp = np.arange(0, 1.01, 0.01)
tp = np.zeros((len(fp), n_sam))
plt.clf()
for i in range(n_model):
for j in range(n_sam):
perf = models_lists[i][j].perf
if np.isnan(perf['AUC']):
continue
auc[j, i] = perf['AUC']
sensitivity = perf['ROC_Sensitivity']
specificity = perf['ROC_Specificity']
ispec = 1-specificity
plt.plot(ispec, sensitivity, perf['Color'], linewidth=0.5, alpha=0.4)
tp[:, j] = np.interp(fp, ispec, sensitivity)
tp_mean = np.mean(tp, axis=1)
plt.plot(fp, tp_mean, perf['Color'], linewidth=2,
label="AUC={:6.4f}, ".format(np.mean(auc[:, i])) + model_names[i])
plt.xlabel('False Positive Rate (1-Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.title('ROC (Dataset: ' + dataset['Name'] + ')')
plt.legend(loc=4)
plt.grid(True)
plt.savefig(report_folder + dataset['Name'] + "_roc_curves" + note + ".png")
auc_stats = {'Mean ': np.mean(auc, axis=0), 'Min ': np.min(auc, axis=0), 'Max ': np.max(auc, axis=0),
'Range': np.max(auc, axis=0)-np.min(auc, axis=0), 'Std ': np.std(auc, axis=0)}
time_sam = np.zeros((n_sam, n_model))
for i in range(n_model):
for j in range(n_sam):
time_sam[j, i] = models_lists[i][j].perf['TrainingTime']
time_mean = np.mean(time_sam, 0)
report = list()
report.append("========= Dataset name: " + dataset['Name'] + " =========\n")
report.append(" - train part size: {}".format(dataset['N_obs_train_sam']) + "\n")
report.append(" - test part size: {}".format(dataset['N_obs_test_sam']) + "\n")
report.append(" - number of features: {}".format(dataset['N_features']) + "\n")
report.append(" - number of classes: {}".format(dataset['N_classes']) + "\n")
report.append(" - number of data samples: {}".format(dataset['N_data_samples']) + "\n")
report.append("\n")
report.append("\n")
report.append("========= Average time elapsed during training (sec) =========\n")
line1 = ' '
line2 = 'Time'
for i in range(n_model):
line1 += fbb(model_names[i], 12)
line2 += '{:12.3f}'.format(time_mean[i])
report.append(line1 + "\n")
report.append(line2 + "\n")
report.append("\n")
report.append("\n")
report.append("========= Individual AUCs =========\n")
line1 = ' '
for i in range(n_model):
line1 += fbb(model_names[i], 12)
report.append(line1 + "\n")
for i in range(n_sam):
line2 = fbb("Sample#" + str(i+1), 9, 'r')
for j in range(n_model):
line2 += '{:12.3f}'.format(auc[i, j])
report.append(line2 + "\n")
report.append("\n")
report.append("\n")
report.append("========= Summary =========\n")
line1 = ' '
for i in range(n_model):
line1 += fbb(model_names[i], 12)
report.append(line1 + "\n")
for key in auc_stats.keys():
line2 = key
for i in range(n_model):
line2 += '{:12.3f}'.format(auc_stats[key][i])
report.append(line2 + "\n")
report.append("\n")
report.append("\n")
for i in range(0, n_model-1):
for j in range(i+1, n_model):
t, p = stats.ttest_rel(auc[:, i], auc[:, j])
report.append("========= Paired t-Test (" + model_names[i] + " <-> " + model_names[j] + ") =========\n")
report.append("t = {:.4f}".format(t) + "\n")
report.append("p = {:.4e}".format(p) + "\n")
if p <= 0.05:
report.append("Mean difference is significant.\n")
else:
report.append("Mean difference is NOT significant.\n")
report.append('\n')
f_report = open(report_folder + dataset['Name'] + "_summary_report" + note + ".txt", "w")
for line in report:
f_report.writelines(line)
f_report.close()
```
#### File: SPM_vs_XGBOOST/Scripts/sumrept4m.py
```python
import os
import pandas as pd
import re
from sklearn.metrics import roc_auc_score
def rng(series):
return series.max() - series.min()
def gtct(series1, series2):
cmp = series1.gt(series2)
count = 0
for bigger in cmp:
if bigger:
count = count + 1
return count
# Define constants
SPREAD = "../Datasets4.xlsx" # Input dataset information spreadsheet
OUTFILE = "../Reports/tn_vs_xgb_sumrept4m.xlsx" # Summary report workbook to create
fields1 = ["Name", "N Replications", "N Features", "N Learn", "N Holdout",
"Avg ROC (TN; MART)", "Avg ROC (TN; RGBOOST, MHESS=0)", "Avg ROC (TN; RGBOOST, MHESS=1)",
"Avg ROC (XGB)", "StdDev ROC (TN; MART)", "StdDev ROC (TN; RGB MHESS=0)",
"StdDev ROC (TN; RGB MHESS=1)", "StdDev ROC (XGB)", "Avg Delta_ROC (Best TN vs XGB)",
"Min Delta ROC (Best TN vs XGB)", "Max Delta ROC (Best TN vs XGB)",
"StdDev Delta ROC (Best TN vs XGB)"]
fields2 = ["Name", "Stat", "ROC(TN; MART)", "ROC(TN; RGBOOST; MHESS=0)",
"ROC(TN; RGBOOST; MHESS=1)", "ROC(XGBoost)"]
fields3 = ["Name", "N Replications", "N Features", "N Learn", "N Holdout", "Avg ROC (TN)",
"Avg ROC (XGB)", "Min ROC (TN)", "Min ROC (XGB)", "Max ROC (TN)", "Max ROC (XGB)",
"StdDev ROC (TN)", "StdDev ROC (XGB)", "Avg Delta ROC", "Min Delta ROC",
"Max Delta ROC", "StdDev Delta ROC", "# times TN beats XGB"]
REPTDIR = "../Reports/RGBOOST4M" # Directory from which to pull the model results
DATADIR = "../Data/Classification" # Repository of classification model datasets
SLASH = "/"
MODSET = "_RGLs-0_INF-0.0_SUBS-1.0_LR-0.1_DEPTH-7_PREDS-500_NTREES-400" # Model set to use
scorenames = ["Class", "Prob"] # Names of fields in score datasets
filename_root = ["mart", "treenet", "treenet2", "xgb"] # Input Filename prefixes
fltfmt = "%.5f"
# Define list of fields for the roc data frame (created once per input dataset)
roccols = filename_root.copy()
roccols.append("Delta")
# Read input dataset information spreadsheet
dsl = pd.read_excel(SPREAD)
# Initialize output data frames
summary = pd.DataFrame(columns = fields1)
detail = pd.DataFrame(columns = fields2)
MARTvXGB = pd.DataFrame(columns = fields3)
TNRGBM0 = pd.DataFrame(columns = fields3)
TNRGBM1 = pd.DataFrame(columns = fields3)
BestTN = pd.DataFrame(columns = fields3)
for i in dsl.index: # For each input dataset
dataname = dsl.loc[i, "Name"]
reptdir = REPTDIR + SLASH + dataname + MODSET # Directory containing model results
if not os.path.isdir(reptdir): # If it does not exist, then skip it
continue
nrepl = dsl.loc[i, "N_data_samples"]
datadir = DATADIR + SLASH + dataname + SLASH + "SAMPLES4" # Directory containing partioned data
trainfile1 = datadir + SLASH + "data_train_1.csv" # Training dataset (file)
holdfile1 = datadir + SLASH + "data_hold_1.csv" # Holdout dataset (file)
rept = REPTDIR + SLASH + dataname + "_summary_report" + MODSET + ".txt"
# Initialize summary report row
row = dict()
row["Name"] = dataname
row["N Replications"] = nrepl
row["N Features"] = dsl.loc[i, "N_features"]
# Extract record counts from the input datasets
traindata = pd.read_csv(trainfile1, low_memory=False) # Training data frame
holddata = pd.read_csv(holdfile1, low_memory=False) # Holdout data frame
row["N Learn"] = len(traindata.index)
row["N Holdout"] = len(holddata.index)
del traindata, holddata # These can be quite large, so free up the memory now
row_mart = row.copy()
# Determine best performing TN model
besttn = -1
with open(rept) as fh:
for line in fh:
if re.match("^Mean ", line):
values = line.split()
values.pop(0)
nval = len(values)
maxroc_tn = 0
for i in range(nval):
if i == 3:
continue
value = float(values[i])
if value > maxroc_tn:
besttn = i
maxroc_tn = value
break
# Define ROC data frame
roc = pd.DataFrame(columns=roccols)
for irepl in range(1, nrepl + 1):
roc_row = dict()
for rootname in filename_root:
score_file = reptdir + SLASH + rootname + "_score_test_" + str(irepl) + ".csv"
modscores = pd.read_csv(score_file, names=scorenames)
roc_row[rootname] = roc_auc_score(modscores["Class"], modscores["Prob"])
roc_row["Delta"] = roc_row[filename_root[besttn]] - roc_row["xgb"]
roc = roc.append(roc_row, ignore_index=True)
# Add ROC statistics to summary report
row["Avg ROC (TN; MART)"] = roc["mart"].mean()
row["Avg ROC (TN; RGBOOST, MHESS=0)"] = roc["treenet"].mean()
row["Avg ROC (TN; RGBOOST, MHESS=1)"] = roc["treenet2"].mean()
row["Avg ROC (XGB)"] = roc["xgb"].mean()
row["StdDev ROC (TN; MART)"] = roc["mart"].std()
row["StdDev ROC (TN; RGB MHESS=0)"] = roc["treenet"].std()
row["StdDev ROC (TN; RGB MHESS=1)"] = roc["treenet2"].std()
row["StdDev ROC (XGB)"] = roc["xgb"].std()
row["Avg Delta_ROC (Best TN vs XGB)"] = roc["Delta"].mean()
row["Min Delta ROC (Best TN vs XGB)"] = roc["Delta"].min()
row["Max Delta ROC (Best TN vs XGB)"] = roc["Delta"].max()
row["StdDev Delta ROC (Best TN vs XGB)"] = roc["Delta"].std()
summary = summary.append(row, ignore_index=True)
# Add by model type descriptive stats to the detail report
mean_row = dict({"Name":dataname, "Stat":"Mean", fields2[2]:row["Avg ROC (TN; MART)"],
fields2[3]:row["Avg ROC (TN; RGBOOST, MHESS=0)"],
fields2[4]:row["Avg ROC (TN; RGBOOST, MHESS=1)"],
fields2[5]:row["Avg ROC (XGB)"]})
min_row = dict({"Name":dataname, "Stat":"Min", fields2[2]:roc["mart"].min(),
fields2[3]:roc["treenet"].min(), fields2[4]:roc["treenet2"].min(),
fields2[5]:roc["xgb"].min()})
max_row = dict({"Name":dataname, "Stat":"Max", fields2[2]:roc["mart"].max(),
fields2[3]:roc["treenet"].max(), fields2[4]:roc["treenet2"].max(),
fields2[5]:roc["xgb"].min()})
range_row = dict({"Name":dataname, "Stat":"Range", fields2[2]:rng(roc["mart"]),
fields2[3]:rng(roc["treenet"]), fields2[4]:rng(roc["treenet2"]),
fields2[5]:rng(roc["xgb"])})
std_row = dict({"Name":dataname, "Stat":"Std", fields2[2]:row["StdDev ROC (TN; MART)"],
fields2[3]:row["StdDev ROC (TN; RGB MHESS=0)"],
fields2[4]:row["StdDev ROC (TN; RGB MHESS=1)"],
fields2[5]:row["StdDev ROC (XGB)"]})
detail = detail.append(pd.DataFrame([mean_row, min_row, max_row, range_row, std_row]),
ignore_index=True)
row_mart["Avg ROC (XGB)"] = mean_row[fields2[5]]
row_mart["Min ROC (XGB)"] = min_row[fields2[5]]
row_mart["Max ROC (XGB)"] = max_row[fields2[5]]
row_mart["StdDev ROC (XGB)"] = std_row[fields2[5]]
row_rgb0 = row_mart.copy()
row_rgb1 = row_mart.copy()
row_best = row_mart.copy()
row_mart["Avg ROC (TN)"] = mean_row[fields2[2]]
row_mart["Min ROC (TN)"] = min_row[fields2[2]]
row_mart["Max ROC (TN)"] = max_row[fields2[2]]
row_mart["StdDev ROC (TN)"] = std_row[fields2[2]]
delta_mart = roc["mart"].subtract(roc["xgb"])
row_mart["Avg Delta ROC"] = delta_mart.mean()
row_mart["Min Delta ROC"] = delta_mart.min()
row_mart["Max Delta ROC"] = delta_mart.max()
row_mart["StdDev Delta ROC"] = delta_mart.std()
row_mart["# times TN beats XGB"] = gtct(roc["mart"], roc["xgb"])
MARTvXGB = MARTvXGB.append(row_mart, ignore_index=True)
row_rgb0["Avg ROC (TN)"] = mean_row[fields2[3]]
row_rgb0["Min ROC (TN)"] = min_row[fields2[3]]
row_rgb0["Max ROC (TN)"] = max_row[fields2[3]]
row_rgb0["StdDev ROC (TN)"] = std_row[fields2[3]]
delta_rgb0 = roc["treenet"].subtract(roc["xgb"])
row_rgb0["Avg Delta ROC"] = delta_rgb0.mean()
row_rgb0["Min Delta ROC"] = delta_rgb0.min()
row_rgb0["Max Delta ROC"] = delta_rgb0.max()
row_rgb0["StdDev Delta ROC"] = delta_rgb0.std()
row_rgb0["# times TN beats XGB"] = gtct(roc["treenet"], roc["xgb"])
TNRGBM0 = TNRGBM0.append(row_rgb0, ignore_index=True)
row_rgb1["Avg ROC (TN)"] = mean_row[fields2[4]]
row_rgb1["Min ROC (TN)"] = min_row[fields2[4]]
row_rgb1["Max ROC (TN)"] = max_row[fields2[4]]
row_rgb1["StdDev ROC (TN)"] = std_row[fields2[4]]
delta_rgb1 = roc["treenet2"].subtract(roc["xgb"])
row_rgb1["Avg Delta ROC"] = delta_rgb1.mean()
row_rgb1["Min Delta ROC"] = delta_rgb1.min()
row_rgb1["Max Delta ROC"] = delta_rgb1.max()
row_rgb1["StdDev Delta ROC"] = delta_rgb1.std()
row_rgb1["# times TN beats XGB"] = gtct(roc["treenet2"], roc["xgb"])
TNRGBM1 = TNRGBM1.append(row_rgb1, ignore_index=True)
index = besttn + 2
row_best["Avg ROC (TN)"] = mean_row[fields2[index]]
row_best["Min ROC (TN)"] = min_row[fields2[index]]
row_best["Max ROC (TN)"] = max_row[fields2[index]]
row_best["StdDev ROC (TN)"] = std_row[fields2[index]]
delta_best = roc[filename_root[besttn]].subtract(roc["xgb"])
row_best["Avg Delta ROC"] = delta_best.mean()
row_best["Min Delta ROC"] = delta_best.min()
row_best["Max Delta ROC"] = delta_best.max()
row_best["StdDev Delta ROC"] = delta_best.std()
row_best["# times TN beats XGB"] = gtct(roc[filename_root[besttn]], roc["xgb"])
BestTN = BestTN.append(row_best, ignore_index=True)
# Write frames to the output spreadsheet
with pd.ExcelWriter(path = OUTFILE) as outwrite:
summary.to_excel(outwrite, sheet_name = "Summary", index = False, float_format = fltfmt)
detail.to_excel(outwrite, sheet_name = "ByDataset", index = False, float_format = fltfmt)
MARTvXGB.to_excel(outwrite, sheet_name = "TreeNet-MART vs XGB", index = False,
float_format = fltfmt)
TNRGBM0.to_excel(outwrite, sheet_name = "TreeNet-RGBOOST (MHESS=0) vs XGB", index = False,
float_format = fltfmt)
TNRGBM1.to_excel(outwrite, sheet_name = "TreeNet-RGBOOST (MHESS=1) vs XGB", index = False,
float_format = fltfmt)
BestTN.to_excel(outwrite, sheet_name = "Best TreeNet vs XGB", index = False, float_format = fltfmt)
``` |
{
"source": "Jlrine2/aws-cdk-python-constructs",
"score": 2
} |
#### File: aws-cdk-python-constructs/python_constructs/python_lambda_api.py
```python
from dataclasses import dataclass
from typing import Mapping, Optional, Union
from aws_cdk import (
Duration,
aws_lambda_python_alpha as lambda_python,
aws_logs as logs,
aws_apigateway as apigateway
)
from constructs import Construct
@dataclass
class LambdaApiPythonParams:
function_code_location: str
function_handler: str
function_index: str
function_environment: Optional[Mapping[str, str]] = None
function_log_retention: Optional[logs.RetentionDays] = None
function_memory_size: Union[int, float, None] = None
funtion_timeout: Optional[Duration] = Duration.seconds(30)
api_deploy: Optional[bool] = None
api_deploy_options: Optional[apigateway.StageOptions] = None
api_domain_name: Optional[apigateway.DomainNameOptions] = None
api_headers_parameters: Optional[Mapping[str, str]] = None
class LambdaApiPython(Construct):
def __init__(self, scope: Construct, _id: str, params: LambdaApiPythonParams = None):
super().__init__(scope, _id)
if params is None:
raise ValueError('Argument params is required')
function = lambda_python.PythonFunction(
self, f'{_id}_function',
entry=params.function_code_location,
handler=params.function_handler,
index=params.function_index,
environment=params.function_environment,
log_retention=params.function_log_retention,
memory_size=params.function_memory_size,
timeout=params.funtion_timeout
)
api = apigateway.LambdaRestApi(
self, f'{_id}-api',
handler=function,
proxy=True,
deploy=params.api_deploy,
deploy_options=params.api_deploy_options,
domain_name=params.api_domain_name,
parameters=params.api_headers_parameters,
)
``` |
{
"source": "Jlrine2/GameManagement",
"score": 3
} |
#### File: status/src/main.py
```python
import json
from os import environ
from pickletools import stringnl_noescape_pair
from urllib import response
from flask import make_response, Request
import google.cloud.compute_v1 as compute_v1
SERVER = environ['SERVER']
PROJECT = environ['PROJECT_ID']
ZONE = f"{environ['REGION']}-a"
def get_server_status(request: Request):
instance_client = compute_v1.InstancesClient()
print(f"Stopping {SERVER}")
instance = instance_client.get(
project=PROJECT,
zone=ZONE,
instance=SERVER
)
response = make_response(json.dumps({"status": instance.status}))
response.headers = {
'Access-Control-Allow-Origin': '*'
}
return response
``` |
{
"source": "Jlrine2/hyp3",
"score": 2
} |
#### File: tests/test_api/test_api_spec.py
```python
from http import HTTPStatus
from test_api.conftest import AUTH_COOKIE, JOBS_URI, SUBSCRIPTIONS_URI, USER_URI, login
from hyp3_api import auth, routes
ENDPOINTS = {
JOBS_URI: {'GET', 'HEAD', 'OPTIONS', 'POST'},
JOBS_URI + '/foo': {'GET', 'HEAD', 'OPTIONS'},
USER_URI: {'GET', 'HEAD', 'OPTIONS'},
SUBSCRIPTIONS_URI: {'GET', 'HEAD', 'OPTIONS', 'POST'},
SUBSCRIPTIONS_URI + '/foo': {'GET', 'HEAD', 'OPTIONS', 'PATCH'},
}
def test_options(client):
all_methods = {'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT', 'DELETE', 'PATCH'}
login(client)
for uri, good_methods in ENDPOINTS.items():
response = client.options(uri)
assert response.status_code == HTTPStatus.OK
allowed_methods = response.headers['allow'].split(', ')
assert set(allowed_methods) == good_methods
for bad_method in all_methods - good_methods:
response = client.open(uri, method=bad_method)
assert response.status_code == HTTPStatus.METHOD_NOT_ALLOWED
def test_not_logged_in(client):
for uri, methods in ENDPOINTS.items():
for method in methods:
response = client.open(uri, method=method)
if method == 'OPTIONS':
assert response.status_code == HTTPStatus.OK
else:
assert response.status_code == HTTPStatus.UNAUTHORIZED
def test_invalid_cookie(client):
for uri in ENDPOINTS:
client.set_cookie('localhost', AUTH_COOKIE, 'garbage I say!!! GARGBAGE!!!')
response = client.get(uri)
assert response.status_code == HTTPStatus.UNAUTHORIZED
def test_expired_cookie(client):
for uri in ENDPOINTS:
client.set_cookie('localhost', AUTH_COOKIE, auth.get_mock_jwt_cookie('user', -1))
response = client.get(uri)
assert response.status_code == HTTPStatus.UNAUTHORIZED
def test_no_route(client):
response = client.get('/no/such/path')
assert response.status_code == HTTPStatus.NOT_FOUND
def test_cors_no_origin(client):
for uri in ENDPOINTS:
response = client.get(uri)
assert 'Access-Control-Allow-Origin' not in response.headers
assert 'Access-Control-Allow-Credentials' not in response.headers
def test_cors_bad_origins(client):
bad_origins = [
'https://www.google.com',
'https://www.alaska.edu',
]
for uri in ENDPOINTS:
for origin in bad_origins:
response = client.get(uri, headers={'Origin': origin})
assert 'Access-Control-Allow-Origin' not in response.headers
assert 'Access-Control-Allow-Credentials' not in response.headers
def test_cors_good_origins(client):
good_origins = [
'https://search.asf.alaska.edu',
'https://search-test.asf.alaska.edu',
'http://local.asf.alaska.edu',
]
for uri in ENDPOINTS:
for origin in good_origins:
response = client.get(uri, headers={'Origin': origin})
assert response.headers['Access-Control-Allow-Origin'] == origin
assert response.headers['Access-Control-Allow-Credentials'] == 'true'
def test_hyp3_unavailable(client, monkeypatch):
monkeypatch.setenv('SYSTEM_AVAILABLE', 'false')
for uri, methods in ENDPOINTS.items():
for method in methods:
response = client.open(uri, method=method)
assert response.status_code == HTTPStatus.SERVICE_UNAVAILABLE
def test_redirect_root(client):
response = client.get('/')
assert response.location.endswith('/ui/')
assert response.status_code == HTTPStatus.FOUND
def test_ui_location(client):
response = client.get('/ui')
assert response.status_code == HTTPStatus.PERMANENT_REDIRECT
assert response.location.endswith('/ui/')
response = client.get('/ui/')
assert response.status_code == HTTPStatus.OK
def test_wkt_validator(client):
validator = routes.WKTValidator()
assert not validator.validate('foo')
assert validator.validate('POLYGON((-5 2, -3 2, -3 5, -5 5, -5 2))')
def test_banned_ip_address(client, monkeypatch):
monkeypatch.setenv('BANNED_CIDR_BLOCKS', '1.1.1.0/24,2.2.2.2/32')
good_addresses = ['2.2.2.1', '2.2.2.3', '1.1.2.1', ' 192.168.3.11']
for good_address in good_addresses:
client.environ_base = {'REMOTE_ADDR': good_address}
response = client.get('/ui/')
assert response.status_code == HTTPStatus.OK
bad_addresses = ['1.1.1.1', '1.1.1.255', '2.2.2.2']
for bad_address in bad_addresses:
client.environ_base = {'REMOTE_ADDR': bad_address}
response = client.get('/ui/')
assert response.status_code == HTTPStatus.FORBIDDEN
def test_error_format(client):
response = client.post(JOBS_URI)
assert response.status_code == HTTPStatus.UNAUTHORIZED
assert response.headers['Content-Type'] == 'application/problem+json'
assert response.json['status'] == HTTPStatus.UNAUTHORIZED
assert response.json['title'] == 'Unauthorized'
assert response.json['type'] == 'about:blank'
assert 'detail' in response.json
login(client)
response = client.post(JOBS_URI)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert response.headers['Content-Type'] == 'application/problem+json'
assert response.json['status'] == HTTPStatus.BAD_REQUEST
assert response.json['title'] == 'Bad Request'
assert response.json['type'] == 'about:blank'
assert 'detail' in response.json
``` |
{
"source": "JLRipley314/cheb-fftw-py",
"score": 3
} |
#### File: cheb-fftw-py/cheb/cheb.py
```python
import os, ctypes, pathlib, glob
import numpy as np
# path of the shared library
_path_cheb = glob.glob(str(pathlib.Path(__file__).parent.parent)+'/build/*/*/cheb.so')[0]
_lib_cheb = ctypes.CDLL(str(_path_cheb))
_cheb_initialized = False
#=============================================================================
_cheb_init = _lib_cheb.init
_cheb_init.restype = None
_cheb_init.argtypes = [
ctypes.c_size_t,
ctypes.c_double,
ctypes.c_double
]
def init(n:int, lower:float, upper:float) -> None:
"""
Initialize internal FFTW pointers, etc.
"""
global _cheb_initialized
_cheb_initialized = True
_cheb_init(
ctypes.c_size_t(n),
ctypes.c_double(lower),
ctypes.c_double(upper)
)
#=============================================================================
_cheb_cleanup = _lib_cheb.cleanup
_cheb_cleanup.restype = None
_cheb_cleanup.argtypes = []
def cleanup() -> None:
"""
Free internal FFTW pointers, etc.
"""
global _cheb_initialized
assert(_cheb_initialized)
_cheb_initialized = False
_cheb_cleanup()
#=============================================================================
_cheb_n = _lib_cheb.n
_cheb_n.restype = ctypes.c_size_t
_cheb_n.argtypes = []
def n() -> int:
"""
Number of Chebyshev collocation points.
"""
assert(_cheb_initialized)
return _cheb_n()
#=============================================================================
_cheb_lower = _lib_cheb.lower
_cheb_lower.restype = ctypes.c_double
_cheb_lower.argtypes = []
def lower() -> float:
"""
Lower boundary of domain in real space.
"""
assert(_cheb_initialized)
return _cheb_lower()
#=============================================================================
_cheb_upper = _lib_cheb.upper
_cheb_upper.restype = ctypes.c_double
_cheb_upper.argtypes = []
def upper() -> float:
"""
Upper boundary of domain in real space.
"""
assert(_cheb_initialized)
return _cheb_upper()
#=============================================================================
_cheb_pt = _lib_cheb.pt
_cheb_pt.restype = ctypes.c_double
_cheb_pt.argtypes = [
ctypes.c_size_t
]
def pt(i:int) -> float:
"""
Location of ith Chebyshev point in real space.
"""
assert(_cheb_initialized)
assert(i>=0 and i<n())
return _cheb_pt(ctypes.c_size_t(i))
#=============================================================================
_cheb_der = _lib_cheb.der
_cheb_der.restype = None
_cheb_der.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_double),
np.ctypeslib.ndpointer(ctypes.c_double)
]
def der(v:np.array, dv:np.array) -> None:
"""
Derivative over interval [lower, upper].
"""
assert(_cheb_initialized)
assert(v.size==n() and dv.size==n())
_cheb_der(v,dv)
#=============================================================================
_cheb_filter = _lib_cheb.filter
_cheb_filter.restype = None
_cheb_filter.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_double)
]
def filter(v:np.array) -> None:
"""
Low pass filter in Chebyshev space.
"""
assert(_cheb_initialized)
assert(v.size==n())
_cheb_filter(v)
``` |
{
"source": "JLRipley314/one-dim-amr",
"score": 3
} |
#### File: examples/massless_scalar_collapse/write_run_data.py
```python
import shutil
#############################################################################
def write_initial_data(run_data: dict) -> None:
with open("{}/initial_data.txt".format(run_data["home_dir"]), "w") as f:
f.write("amp={}\n".format(run_data["amp"]))
f.write("width={}\n".format(run_data["width"]))
f.write("center={}\n".format(run_data["center"]))
f.write("direction={}\n".format(run_data["direction"]))
f.write("initial_data={}\n".format(run_data["initial_data"]))
f.write("initial_black_hole_mass={}\n".format(run_data["initial_black_hole_mass"]))
shutil.copyfile(
"{}/initial_data.txt".format(run_data["home_dir"]),
"{}/initial_data.txt".format(run_data["output_dir"])
)
return
#############################################################################
def write_run_data(run_data: dict) -> str:
with open("{}/run_data.txt".format(run_data["home_dir"]), "w") as f:
f.write("output_dir={}\n".format(run_data["output_dir"]))
f.write("param_search={}\n".format(run_data["param_search"]))
f.write("theory={}\n".format(run_data["theory"]))
f.write("solver_Ze={}\n".format(run_data["solver_Ze"]))
f.write("use_excision={}\n".format(run_data["use_excision"]))
f.write("Nx={}\n".format(run_data["Nx"]))
f.write("Nt={}\n".format(run_data["Nt"]))
f.write("t_step_save={}\n".format(run_data["t_step_save"]))
f.write("stereographic_L={}\n".format(run_data["stereographic_L"]))
f.write("coupling_gbs={}\n".format(run_data["coupling_gbs"]))
f.write("cfl_num={}\n".format(run_data["cfl_num"]))
f.write("err_tolerance={}\n".format(run_data["err_tolerance"]))
shutil.copyfile(
"{}/run_data.txt".format(run_data["home_dir"]),
"{}/run_data.txt".format(run_data["output_dir"])
)
return
#############################################################################
def write_slurm_script(run_data: dict) -> None:
run_data["var_name"] = "output"
outputName = "{}/{}".format(run_data["output_dir"],"output.out")
with open("{}/run_TEdGB_collapse.slurm".format(run_data["home_dir"]), "w") as f:
f.write("#!/bin/sh\n")
f.write("#SBATCH -N 1\t\t# nodes=1\n")
f.write("#SBATCH --ntasks-per-node=1\t\t# ppn=1\n")
f.write("#SBATCH -J g{:.2f}\t\t# job name\n".format(float(run_data["coupling_gbs"])))
f.write("#SBATCH -t {}\t\t# walltime (dd:hh:mm:ss)\n".format(run_data["walltime"]))
f.write("#SBATCH -p dept\t\t# partition/queue name\n")
f.write("#SBATCH --mem={}MB\t\t# memory in MB\n".format(run_data["memory_usage_MB"]))
f.write("#SBATCH --output={}\t\t# file for STDOUT\n".format(outputName))
f.write("#SBATCH --mail-user=<EMAIL>\t\t# Mail id of the user\n")
# f.write("#SBATCH --mail-type=begin\t\t# Slurm will send mail at the beginning of the job\n")
# f.write("#SBATCH --mail-type=end\t\t# Slurm will send at the completion of your job\n")
f.write("\n./collapse\n\n")
shutil.copyfile(
"{}/run_TEdGB_collapse.slurm".format(run_data["home_dir"]),
"{}/run_TEdGB_collapse.slurm".format(run_data["output_dir"])
)
return
``` |
{
"source": "JLRipley314/scalar-field-kerr",
"score": 2
} |
#### File: JLRipley314/scalar-field-kerr/sim_class.py
```python
import subprocess, os, sys, time, shutil
from typing import List
from math import log
#=============================================================================
class Sim:
#=============================================================================
def __init__(self)->None:
self.home_dir= str(os.getcwd())
#=============================================================================
def make_output_dir(self)->None:
if self.output_dir is None:
time_of_day= time.asctime().split()
self.output_stem= '/'+str(
time_of_day[1]
+ '_'+time_of_day[2]
+ '_'+time_of_day[3].replace(':','_')
+ '_m'+str(self.black_hole_mass)
+ '_s'+str(self.black_hole_spin)
+ '_nx'+str(self.nx)
+ '_nl'+str(self.nl)
+ '_nm'+str(self.nm)
)
else:
self.output_stem = self.output_dir
if (self.computer=='home'):
self.output_dir= self.home_dir+'/Output'+self.output_stem
else:
self.output_dir=self.out_stem+self.output_stem
os.makedirs(self.output_dir)
#=============================================================================
## stereographic projection
def compactification(self, r:float)->float:
return r/(1.0 + (r/self.compactification_length))
#=============================================================================
def set_derived_params(self)->None:
#-----------------------------------------------------------------------------
sqrt_term= pow(
(self.black_hole_mass - self.black_hole_spin)
* (self.black_hole_mass + self.black_hole_spin)
,0.5)
self.horizon= self.black_hole_mass+sqrt_term
self.rl= self.horizon*self.rl_0
self.ru= self.horizon*self.ru_0
#-----------------------------------------------------------------------------
self.Rmin= self.compactification(self.horizon)
self.Rmax= self.compactification_length
#-----------------------------------------------------------------------------
if (self.use_cheb==True):
self.dt= float(
6.*pow(max([self.nx,self.nlat,self.nphi]),-2)
)
else:
self.dt= min(
float(
6.*pow(max([self.nlat,self.nphi]),-2)
)
,
self.cfl/(self.nx-1.0)
)
#-----------------------------------------------------------------------------
self.nt= int(
self.evolve_time*self.black_hole_mass/self.dt
)
#-----------------------------------------------------------------------------
self.t_step_save= int(
self.nt/float(self.num_saved_times)
)
if (self.t_step_save==0):
self.t_step_save= 1
#=============================================================================
def write_sim_params(self)->None:
self.parameter_file = 'params.txt'
with open(self.output_dir+'/'+self.parameter_file,'w') as f:
attrs= vars(self)
for param in attrs:
if type(attrs[param])==list:
vals = ''
for val in attrs[param]:
vals+= ' '+str(val)
f.write('{}{}\n'.format(param,vals))
else:
f.write('{} {}\n'.format(param,attrs[param]))
#=============================================================================
def write_slurm_script(self):
with open('{}/run.slurm'.format(self.output_dir), 'w') as f:
f.write('#!/bin/sh\n')
f.write('#SBATCH -J scalar\t\t# job name\n')
f.write('#SBATCH -t {}\t\t# walltime (dd:hh:mm:ss)\n'.format(self.walltime))
f.write('#SBATCH -p physics\t\t# partition/queue name\n')
f.write('#SBATCH --mem={}MB\n'.format(self.memory))
f.write('#SBATCH --output={}\t\t# file for STDOUT\n'.format(self.output_file))
f.write('#SBATCH --mail-user={}\t\t# Mail id of the user\n'.format(self.email))
#------------
## for openmp
#------------
f.write('#SBATCH --nodes=1\n')
f.write('#SBATCH --ntasks-per-node=1\n')
f.write('#SBATCH --cpus-per-task={}\n'.format(self.num_threads))
f.write('\nexport OMP_MAX_ACTIVE_LEVELS={}\n'.format(self.num_omp_levels))
f.write('\nexport OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK\n')
#------------
## executable
#------------
f.write('chmod 755 '+self.bin_name+'\n') ## make sure binary is executable
f.write('\n'+self.run_str+'\n')
#=============================================================================
def launch_run(self)->None:
self.set_derived_params()
self.make_output_dir()
if self.recompile==True:
self.then_recompile()
self.write_sim_params()
shutil.copyfile(
'Bin/{}'.format(self.bin_name),
'{}/{}'.format(self.output_dir, self.bin_name)
)
self.output_file= 'out.txt'
self.run_str= (
'./'+self.bin_name
+' '+self.parameter_file
+' .'
+' 2>&1 | tee '+self.output_file
)
if (self.time_it):
self.run_str = 'time '+self.run_str
if (self.computer=='home'):
os.environ['OMP_MAX_ACTIVE_LEVELS']= str(self.num_omp_levels)
os.environ['OMP_NUM_THREADS']= str(self.num_threads)
os.chdir(self.output_dir)
subprocess.call('chmod 755 '+self.bin_name, shell=True) ## make sure binary is executable
subprocess.call(self.run_str, shell=True)
os.chdir(self.home_dir)
else:
self.write_slurm_script()
os.chdir(self.output_dir)
subprocess.call('sbatch {}/run.slurm'.format(self.output_dir), shell=True)
os.chdir(self.home_dir)
#=============================================================================
def then_recompile(self)->None:
subprocess.call('make clean_obj'+self.bin_name,shell=True)
subprocess.call('make '+self.bin_name,shell=True)
``` |
{
"source": "JLRipley314/teuk-fortran",
"score": 3
} |
#### File: src/tables/tables_cheb.py
```python
import mpmath as mp
from typing import List
mp.prec= +mp.inf
#=============================================================================
def cheb_pts(n:int)->List[mp.mpf]:
return [mp.cos(mp.pi*i/(n-1)) for i in range(n)]
#=============================================================================
def diff_cheb(n:int,x:float)->mp.mpf:
return mp.diff(lambda x: mp.chebyt(n,x), x)
#=============================================================================
def pm(m:int) -> mp.mpf:
if m==0:
return mp.mpf(1)
else:
return mp.mpf(2)
#=============================================================================
def compute_to_cheb_to_real(n:int)->(List[mp.mpf],List[mp.mpf]):
to_cheb= [[0 for i in range(n)] for j in range(n)]
to_real= [[0 for i in range(n)] for j in range(n)]
N = n-1
for i in range(0,N+1):
to_cheb[i][0]= pm(i)*mp.fdiv(0.5,N)*mp.power(-1,i)
to_cheb[i][N]= mp.fdiv(pm(i),(2.0*N))
for i in range(1,N):
for j in range(1,N):
to_cheb[i][j]= pm(i)*mp.cos(mp.fdiv(i*j*mp.pi,N))
for i in range(0,N+1):
for j in range(0,N+1):
to_real[i][j]= mp.cos(mp.fdiv(i*j*mp.pi,N))
return (to_cheb,to_real)
#=============================================================================
def cheb_D(n:int)->List[mp.mpf]:
pts= cheb_pts(n)
Dmat = [[0 for i in range(n)] for j in range(n)]
N = n-1
Dmat[0][0] = mp.fdiv(mp.mpf(2)*mp.power(N,2)+mp.mpf(1),mp.mpf(6))
Dmat[N][N] = - mp.fdiv(mp.mpf(2)*mp.power(N,2)+mp.mpf(1),mp.mpf(6))
Dmat[0][N] = mp.mpf(0.5) * mp.power(-1,N)
Dmat[N][0] = - mp.mpf(0.5) * mp.power(-1,N)
for i in range(1,N):
Dmat[0][i] = mp.mpf(2.0) * mp.power(-1,i ) * mp.fdiv(1,mp.mpf(1)-pts[i])
Dmat[N][i] = - mp.mpf(2.0) * mp.power(-1,i+N) * mp.fdiv(1,mp.mpf(1)+pts[i])
Dmat[i][0] = - mp.mpf(0.5) * mp.power(-1,i ) * mp.fdiv(1,mp.mpf(1)-pts[i])
Dmat[i][N] = mp.mpf(0.5) * mp.power(-1,i+N) * mp.fdiv(1,mp.mpf(1)+pts[i])
Dmat[i][i] = - 0.5 * pts[i] * mp.fdiv(1,mp.mpf(1)-mp.power(pts[i],2))
for j in range(1,N):
if i!=j:
Dmat[i][j] = mp.power(-1,i+j) * mp.fdiv(1,pts[i]-pts[j])
return Dmat
#=============================================================================
def save_cheb(dir_name:str,n:int)->None:
pts= cheb_pts(n)
cheb_D_matrix= cheb_D(n)
to_cheb, to_real = compute_to_cheb_to_real(n)
with open(dir_name+"/cheb_to_real.txt","w") as f:
for line in to_real:
for val in line:
f.write(mp.nstr(val,32)+' ')
f.write('\n')
with open(dir_name+"/real_to_cheb.txt","w") as f:
for line in to_cheb:
for val in line:
f.write(mp.nstr(val,32)+' ')
f.write('\n')
with open(dir_name+"/cheb_pts.txt","w") as f:
for val in pts:
f.write(mp.nstr(val,32)+'\n')
with open(dir_name+"/cheb_D.txt","w") as f:
for line in cheb_D_matrix:
for val in line:
f.write(mp.nstr(val,32)+' ')
f.write('\n')
``` |
{
"source": "JLRitch/wigeon",
"score": 3
} |
#### File: test/unit/test_packages.py
```python
import unittest
import pathlib as pl
import time
import json
import shutil
# external imports
# project imports
from wigeon import packages
class TestPackage(unittest.TestCase):
cur_dir = pl.Path().cwd()
def test_create_dir(self):
"""
Checks that directory was created
"""
# inputs
package = packages.Package("example")
# cleanup folders before test
shutil.rmtree(package.pack_path, ignore_errors=True)
package.create(
env_list=["dev", "qa"],
db_engine="sqlite"
)
self.assertTrue(
self.cur_dir.joinpath("packages", "example").exists()
)
self.assertTrue(
self.cur_dir.joinpath("packages", "example", "manifest.json").exists()
)
expect_init_mani = {
"db_engine": "sqlite",
"environments": {
"dev": {
"connectionstring": None,
"server": None,
"database": None,
"username": None,
"password": <PASSWORD>
},
"qa": {
"connectionstring": None,
"server": None,
"database": None,
"username": None,
"password": <PASSWORD>
}
},
"migrations": []
}
with open(self.cur_dir.joinpath("packages", "example", "manifest.json"), "r") as f:
out_man_js = json.load(f)
self.assertEqual(out_man_js, expect_init_mani)
# cleanup folders after test
shutil.rmtree(package.pack_path, ignore_errors=True)
def test_exists(self):
"""
tests package existence methods
"""
# inputs
package_exist = packages.Package("exist")
package_no_exist = packages.Package("noexist")
# cleanup folders before test
shutil.rmtree(package_exist.pack_folder, ignore_errors=True)
shutil.rmtree(package_no_exist.pack_folder, ignore_errors=True)
package_exist.pack_path.mkdir(parents=True)
# assertions
self.assertRaises(
FileExistsError,
package_exist.exists,
raise_error_on_exists=True,
raise_error_on_not_exist=False
)
self.assertRaises(
FileExistsError,
package_no_exist.exists,
raise_error_on_exists=False,
raise_error_on_not_exist=True
)
self.assertTrue(package_exist.exists(
raise_error_on_exists=False,
raise_error_on_not_exist=False
)
)
self.assertFalse(package_no_exist.exists(
raise_error_on_exists=False,
raise_error_on_not_exist=False
)
)
# cleanup folders after test
shutil.rmtree(package_exist.pack_folder, ignore_errors=True)
shutil.rmtree(package_no_exist.pack_folder, ignore_errors=True)
def test_find_current_migration(self):
"""
test returns 1 for None or sting value of migration name prefix + 1
"""
# inputs
package = packages.Package("example")
self.assertEqual(
package.find_current_migration(migration_list=[]),
"0001"
)
self.assertEqual(
package.find_current_migration(migration_list=[pl.Path("0001-migration.sql")]),
"0002"
)
self.assertRaises(
ValueError,
package.find_current_migration,
migration_list=[pl.Path("9999-migration.sql")]
)
def test_list_migrations(self):
"""
test migration file reads
"""
self.maxDiff = None
no_migrations = packages.Package("no_migrations")
with_migrations = packages.Package("with_migrations")
# cleanup folders before test
shutil.rmtree(no_migrations.pack_folder, ignore_errors=True)
shutil.rmtree(with_migrations.pack_folder, ignore_errors=True)
# create migrations
with_migrations.pack_path.mkdir(parents=True, exist_ok=True)
no_migrations.pack_path.mkdir(parents=True, exist_ok=True)
mig_path = with_migrations.pack_path.joinpath("0001-migration1.sql")
with open(mig_path, 'w') as f:
f.write('emptyfile')
# assertions
self.assertEqual(
no_migrations.list_local_migrations(),
[]
)
self.assertEqual(
with_migrations.list_local_migrations(),
[mig_path]
)
# cleanup folders after test
shutil.rmtree(no_migrations.pack_folder, ignore_errors=True)
shutil.rmtree(with_migrations.pack_folder, ignore_errors=True)
def test_delete_dir(self):
"""
Checks that directory was created
"""
# inputs
package = packages.Package("example")
# cleanup folders after test
shutil.rmtree(package.pack_path, ignore_errors=True)
package.pack_path.mkdir(parents=True, exist_ok=True)
# sleep to ensure files are given time to create
time.sleep(1)
package.delete()
# sleep to ensure files are given time to delete
time.sleep(1)
self.assertFalse(
self.cur_dir.joinpath("packages", "example").exists()
)
if __name__ == '__main__':
unittest.main()
```
#### File: wigeon/wigeon/levels.py
```python
import random
names = [
"Reveille",
"AI Constructs and Cyborgs First!",
"Flawless Cowboy",
"Reunion Tour",
"The Truth and Reconciliation",
"Into the Belly of the Beast",
"Shut Up and Get Behind me... Sir",
"The Silent Cartographer",
"It's Quiet...",
"Shafted",
"I Would Have Been Your Daddy...",
"Rolling Thunder",
"If I had a Super Weapon...",
"Well Enough Alone",
"The Flood",
"343 Guilty Spark",
"The Library",
"Wait, It Gets Worse!",
"But I Don't Want to Ride the Elevator!",
"Fourth Floor: Tools, Guns, Keys to Super Weapons",
"The Gun Point at the Head of the Universe",
"Breaking Stuff to Look Tough",
"The Tunnels Below",
"Final Run",
"Under New Management",
"Upstairs, Downstairs",
"The Captain",
"...And the Horse You Rode in on",
"Light Fuse, Run Away",
"Warning: Hitchhikers May be Escaping Convicts"
]
def get_level():
return names[random.randrange(len(names))]
``` |
{
"source": "jlrpnbbngtn/brewtils",
"score": 2
} |
#### File: brewtils/brewtils/decorators.py
```python
import functools
import inspect
import os
import sys
from types import MethodType
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union
import six
from brewtils.choices import process_choices
from brewtils.display import resolve_form, resolve_schema, resolve_template
from brewtils.errors import PluginParamError, _deprecate
from brewtils.models import Command, Parameter, Resolvable
if sys.version_info.major == 2:
from funcsigs import signature, Parameter as InspectParameter # noqa
else:
from inspect import signature, Parameter as InspectParameter # noqa
__all__ = [
"client",
"command",
"parameter",
"parameters",
"system",
]
def client(
_wrapped=None, # type: Type
bg_name=None, # type: Optional[str]
bg_version=None, # type: Optional[str]
):
# type: (...) -> Type
"""Class decorator that marks a class as a beer-garden Client
Using this decorator is no longer strictly necessary. It was previously required in
order to mark a class as being a Beer-garden Client, and contained most of the logic
that currently resides in the ``parse_client`` function. However, that's no longer
the case and this currently exists mainly for back-compatibility reasons.
Applying this decorator to a client class does have the nice effect of preventing
linters from complaining if any special attributes are used. So that's something.
Those special attributes are below. Note that these are just placeholders until the
actual values are populated when the client instance is assigned to a Plugin:
* ``_bg_name``: an optional system name
* ``_bg_version``: an optional system version
* ``_bg_commands``: holds all registered commands
* ``_current_request``: Reference to the currently executing request
Args:
_wrapped: The class to decorate. This is handled as a positional argument and
shouldn't be explicitly set.
bg_name: Optional plugin name
bg_version: Optional plugin version
Returns:
The decorated class
"""
if _wrapped is None:
return functools.partial(client, bg_name=bg_name, bg_version=bg_version) # noqa
# Assign these here so linters don't complain
_wrapped._bg_name = bg_name
_wrapped._bg_version = bg_version
_wrapped._bg_commands = []
_wrapped._current_request = None
return _wrapped
def command(
_wrapped=None, # type: Union[Callable, MethodType]
description=None, # type: Optional[str]
parameters=None, # type: Optional[List[Parameter]]
command_type="ACTION", # type: str
output_type="STRING", # type: str
schema=None, # type: Optional[Union[dict, str]]
form=None, # type: Optional[Union[dict, list, str]]
template=None, # type: Optional[str]
icon_name=None, # type: Optional[str]
hidden=False, # type: Optional[bool]
metadata=None, # type: Optional[Dict]
):
"""Decorator for specifying Command details
For example:
.. code-block:: python
@command(output_type='JSON')
def echo_json(self, message):
return message
Args:
_wrapped: The function to decorate. This is handled as a positional argument and
shouldn't be explicitly set.
description: The command description. If not given the first line of the method
docstring will be used.
parameters: A list of Command parameters. It's recommended to use @parameter
decorators to declare Parameters instead of declaring them here, but it is
allowed. Any Parameters given here will be merged with Parameters sourced
from decorators and inferred from the method signature.
command_type: The command type. Valid options are Command.COMMAND_TYPES.
output_type: The output type. Valid options are Command.OUTPUT_TYPES.
schema: A custom schema definition.
form: A custom form definition.
template: A custom template definition.
icon_name: The icon name. Should be either a FontAwesome or a Glyphicon name.
hidden: Flag controlling whether the command is visible on the user interface.
metadata: Free-form dictionary
Returns:
The decorated function
"""
if _wrapped is None:
return functools.partial(
command,
description=description,
parameters=parameters,
command_type=command_type,
output_type=output_type,
schema=schema,
form=form,
template=template,
icon_name=icon_name,
hidden=hidden,
metadata=metadata,
)
new_command = Command(
description=description,
parameters=parameters,
command_type=command_type,
output_type=output_type,
schema=schema,
form=form,
template=template,
icon_name=icon_name,
hidden=hidden,
metadata=metadata,
)
# Python 2 compatibility
if hasattr(_wrapped, "__func__"):
_wrapped.__func__._command = new_command
else:
_wrapped._command = new_command
return _wrapped
def parameter(
_wrapped=None, # type: Union[Callable, MethodType, Type]
key=None, # type: str
type=None, # type: Optional[Union[str, Type]]
multi=None, # type: Optional[bool]
display_name=None, # type: Optional[str]
optional=None, # type: Optional[bool]
default=None, # type: Optional[Any]
description=None, # type: Optional[str]
choices=None, # type: Optional[Union[Callable, Dict, Iterable, str]]
parameters=None, # type: Optional[List[Parameter]]
nullable=None, # type: Optional[bool]
maximum=None, # type: Optional[int]
minimum=None, # type: Optional[int]
regex=None, # type: Optional[str]
form_input_type=None, # type: Optional[str]
type_info=None, # type: Optional[dict]
is_kwarg=None, # type: Optional[bool]
model=None, # type: Optional[Type]
):
"""Decorator for specifying Parameter details
For example::
@parameter(
key="message",
description="Message to echo",
optional=True,
type="String",
default="Hello, World!",
)
def echo(self, message):
return message
Args:
_wrapped: The function to decorate. This is handled as a positional argument and
shouldn't be explicitly set.
key: String specifying the parameter identifier. If the decorated object is a
method the key must match an argument name.
type: String indicating the type to use for this parameter.
multi: Boolean indicating if this parameter is a multi. See documentation for
discussion of what this means.
display_name: String that will be displayed as a label in the user interface.
optional: Boolean indicating if this parameter must be specified.
default: The value this parameter will be assigned if not overridden when
creating a request.
description: An additional string that will be displayed in the user interface.
choices: List or dictionary specifying allowed values. See documentation for
more information.
parameters: Any nested parameters. See also: the 'model' argument.
nullable: Boolean indicating if this parameter is allowed to be null.
maximum: Integer indicating the maximum value of the parameter.
minimum: Integer indicating the minimum value of the parameter.
regex: String describing a regular expression constraint on the parameter.
form_input_type: Specify the form input field type (e.g. textarea). Only used
for string fields.
type_info: Type-specific information. Mostly reserved for future use.
is_kwarg: Boolean indicating if this parameter is meant to be part of the
decorated function's kwargs. Only applies when the decorated object is a
method.
model: Class to be used as a model for this parameter. Must be a Python type
object, not an instance.
Returns:
The decorated function
"""
if _wrapped is None:
return functools.partial(
parameter,
key=key,
type=type,
multi=multi,
display_name=display_name,
optional=optional,
default=default,
description=description,
choices=choices,
parameters=parameters,
nullable=nullable,
maximum=maximum,
minimum=minimum,
regex=regex,
form_input_type=form_input_type,
type_info=type_info,
is_kwarg=is_kwarg,
model=model,
)
new_parameter = Parameter(
key=key,
type=type,
multi=multi,
display_name=display_name,
optional=optional,
default=default,
description=description,
choices=choices,
parameters=parameters,
nullable=nullable,
maximum=maximum,
minimum=minimum,
regex=regex,
form_input_type=form_input_type,
type_info=type_info,
is_kwarg=is_kwarg,
model=model,
)
# Python 2 compatibility
if hasattr(_wrapped, "__func__"):
_wrapped.__func__.parameters = getattr(_wrapped, "parameters", [])
_wrapped.__func__.parameters.insert(0, new_parameter)
else:
_wrapped.parameters = getattr(_wrapped, "parameters", [])
_wrapped.parameters.insert(0, new_parameter)
return _wrapped
def parameters(*args, **kwargs):
"""
.. deprecated:: 3.0
Will be removed in version 4.0. Please use ``@command`` instead.
Decorator for specifying multiple Parameter definitions at once
This can be useful for commands which have a large number of complicated
parameters but aren't good candidates for a Model.
.. code-block:: python
@parameter(**params[cmd1][param1])
@parameter(**params[cmd1][param2])
@parameter(**params[cmd1][param3])
def cmd1(self, **kwargs):
pass
Can become:
.. code-block:: python
@parameters(params[cmd1])
def cmd1(self, **kwargs):
pass
Args:
*args (iterable): Positional arguments
The first (and only) positional argument must be a list containing
dictionaries that describe parameters.
**kwargs: Used for bookkeeping. Don't set any of these yourself!
Returns:
func: The decorated function
"""
# This is the first invocation
if not kwargs.get("_partial"):
# Need the callable check to prevent applying the decorator with no parenthesis
if len(args) == 1 and not callable(args[0]):
return functools.partial(parameters, args[0], _partial=True)
raise PluginParamError("@parameters takes a single argument")
# This is the second invocation
else:
if len(args) != 2:
raise PluginParamError(
"Incorrect number of arguments for parameters partial call. Did you "
"set _partial=True? If so, please don't do that. If not, please let "
"the Beergarden team know how you got here!"
)
_deprecate(
"Looks like you're using the '@parameters' decorator. This is now deprecated - "
"for passing bulk parameter definitions it's recommended to use the @command "
"decorator parameters kwarg, like this: @command(parameters=[...])"
)
params = args[0]
_wrapped = args[1]
if not callable(_wrapped):
raise PluginParamError("@parameters must be applied to a callable")
try:
for param in params:
parameter(_wrapped, **param)
except TypeError:
raise PluginParamError("@parameters arg must be an iterable of dictionaries")
return _wrapped
def _parse_client(client):
# type: (object) -> List[Command]
"""Get a list of Beergarden Commands from a client object
This will iterate over everything returned from dir, looking for metadata added
by the decorators.
"""
bg_commands = []
for attr in dir(client):
method = getattr(client, attr)
method_command = _parse_method(method)
if method_command:
bg_commands.append(method_command)
return bg_commands
def _parse_method(method):
# type: (MethodType) -> Optional[Command]
"""Parse a method object as a Beer-garden command target
If the method looks like a valid command target (based on the presence of certain
attributes) then this method will initialize things:
- The command will be initialized.
- Every parameter will be initialized. Initializing a parameter is recursive - each
nested parameter will also be initialized.
- Top-level parameters are validated to ensure they match the method signature.
Args:
method: Method to parse
Returns:
Beergarden Command targeting the given method
"""
if (inspect.ismethod(method) or inspect.isfunction(method)) and (
hasattr(method, "_command") or hasattr(method, "parameters")
):
# Create a command object if there isn't one already
method_command = _initialize_command(method)
try:
# Need to initialize existing parameters before attempting to add parameters
# pulled from the method signature.
method_command.parameters = _initialize_parameters(
method_command.parameters + getattr(method, "parameters", [])
)
# Add and update parameters based on the method signature
_signature_parameters(method_command, method)
# Verify that all parameters conform to the method signature
_signature_validate(method_command, method)
except PluginParamError as ex:
six.raise_from(
PluginParamError(
"Error initializing parameters for command '%s': %s"
% (method_command.name, ex)
),
ex,
)
return method_command
def _initialize_command(method):
# type: (MethodType) -> Command
"""Initialize a Command
This takes care of ensuring a Command object is in the correct form. Things like:
- Assigning the name from the method name
- Pulling the description from the method docstring, if necessary
- Resolving display modifiers (schema, form, template)
Args:
method: The method with the Command to initialize
Returns:
The initialized Command
"""
cmd = getattr(method, "_command", Command())
cmd.name = _method_name(method)
cmd.description = cmd.description or _method_docstring(method)
try:
base_dir = os.path.dirname(inspect.getfile(method))
cmd.schema = resolve_schema(cmd.schema, base_dir=base_dir)
cmd.form = resolve_form(cmd.form, base_dir=base_dir)
cmd.template = resolve_template(cmd.template, base_dir=base_dir)
except PluginParamError as ex:
six.raise_from(
PluginParamError("Error initializing command '%s': %s" % (cmd.name, ex)),
ex,
)
return cmd
def _method_name(method):
# type: (MethodType) -> str
"""Get the name of a method
This is needed for Python 2 / 3 compatibility
Args:
method: Method to inspect
Returns:
Method name
"""
if hasattr(method, "func_name"):
command_name = method.func_name
else:
command_name = method.__name__
return command_name
def _method_docstring(method):
# type: (MethodType) -> str
"""Parse out the first line of the docstring from a method
This is needed for Python 2 / 3 compatibility
Args:
method: Method to inspect
Returns:
First line of docstring
"""
if hasattr(method, "func_doc"):
docstring = method.func_doc
else:
docstring = method.__doc__
return docstring.split("\n")[0] if docstring else None
def _sig_info(arg):
# type: (InspectParameter) -> Tuple[Any, bool]
"""Get the default and optionality of a method argument
This will return the "default" according to the method signature. For example, the
following would return "foo" as the default for Parameter param:
.. code-block:: python
def my_command(self, param="foo"):
...
The "optional" value returned will be a boolean indicating the presence of a default
argument. In the example above the "optional" value will be True. However, in the
following example the value would be False (and the "default" value will be None):
.. code-block:: python
def my_command(self, param):
...
A separate optional return is needed to indicate when a default is provided in the
signature, but the default is None. In the following, the default will still be
None, but the optional value will be True:
.. code-block:: python
def my_command(self, param=None):
...
Args:
arg: The method argument
Returns:
Tuple of (signature default, optionality)
"""
if arg.default != InspectParameter.empty:
return arg.default, True
return None, False
def _initialize_parameter(
param=None,
key=None,
type=None,
multi=None,
display_name=None,
optional=None,
default=None,
description=None,
choices=None,
parameters=None,
nullable=None,
maximum=None,
minimum=None,
regex=None,
form_input_type=None,
type_info=None,
is_kwarg=None,
model=None,
):
# type: (...) -> Parameter
"""Initialize a Parameter
This exists to move logic out of the @parameter decorator. Previously there was a
fair amount of logic in the decorator, which meant that it wasn't feasible to create
a Parameter without using it. This made things like nested models difficult to do
correctly.
There are also some checks and translation that need to happen for every Parameter,
most notably the "choices" attribute.
This method also ensures that these checks and translations occur for child
Parameters.
Args:
param: An already-created Parameter. If this is given all the other
Parameter-creation kwargs will be ignored
Keyword Args:
Will be used to construct a new Parameter
"""
param = param or Parameter(
key=key,
type=type,
multi=multi,
display_name=display_name,
optional=optional,
default=default,
description=description,
choices=choices,
parameters=parameters,
nullable=nullable,
maximum=maximum,
minimum=minimum,
regex=regex,
form_input_type=form_input_type,
type_info=type_info,
is_kwarg=is_kwarg,
model=model,
)
# Every parameter needs a key, so stop that right here
if param.key is None:
raise PluginParamError("Attempted to create a parameter without a key")
# Type and type info
# Type info is where type specific information goes. For now, this is specific
# to file types. See #289 for more details.
param.type = _format_type(param.type)
param.type_info = param.type_info or {}
if param.type in Resolvable.TYPES:
param.type_info["storage"] = "gridfs"
# Also nullify default parameters for safety
param.default = None
# Process the raw choices into a Choices object
param.choices = process_choices(param.choices)
# Now deal with nested parameters
if param.parameters or param.model:
if param.model:
# Can't specify a model and parameters - which should win?
if param.parameters:
raise PluginParamError(
"Error initializing parameter '%s': A parameter with both a model "
"and nested parameters is not allowed" % param.key
)
param.parameters = param.model.parameters
param.model = None
param.type = "Dictionary"
param.parameters = _initialize_parameters(param.parameters)
return param
def _format_type(param_type):
# type: (Any) -> str
"""Parse Parameter type
Args:
param_type: Raw Parameter type, usually from a decorator
Returns:
Properly formatted string describing the parameter type
"""
if param_type == str:
return "String"
elif param_type == int:
return "Integer"
elif param_type == float:
return "Float"
elif param_type == bool:
return "Boolean"
elif param_type == dict:
return "Dictionary"
elif str(param_type).lower() == "datetime":
return "DateTime"
elif not param_type:
return "Any"
else:
return str(param_type).title()
def _initialize_parameters(parameter_list):
# type: (Iterable[Parameter, object, dict]) -> List[Parameter]
"""Initialize Parameters from a list of parameter definitions
This exists for backwards compatibility with the old way of specifying Models.
Previously, models were defined by creating a class with a ``parameters`` class
attribute. This required constructing each parameter manually, without using the
``@parameter`` decorator.
This function takes a list where members can be any of the following:
- A Parameter object
- A class object with a ``parameters`` attribute
- A dict containing kwargs for constructing a Parameter
The Parameters in the returned list will be initialized. See the function
``_initialize_parameter`` for information on what that entails.
Args:
parameter_list: List of parameter precursors
Returns:
List of initialized parameters
"""
initialized_params = []
for param in parameter_list:
# This is already a Parameter. Only really need to interpret the choices
# definition and recurse down into nested Parameters
if isinstance(param, Parameter):
initialized_params.append(_initialize_parameter(param=param))
# This is a model class object. Needed for backwards compatibility
# See https://github.com/beer-garden/beer-garden/issues/354
elif hasattr(param, "parameters"):
_deprecate(
"Constructing a nested Parameters list using model class objects "
"is deprecated. Please pass the model's parameter list directly."
)
initialized_params += _initialize_parameters(param.parameters)
# This is a dict of Parameter kwargs
elif isinstance(param, dict):
initialized_params.append(_initialize_parameter(**param))
# No clue!
else:
raise PluginParamError("Unable to generate parameter from '%s'" % param)
return initialized_params
def _signature_parameters(cmd, method):
# type: (Command, MethodType) -> Command
"""Add and/or modify a Command's parameters based on the method signature
This will add / modify the Command's parameter list:
- Any arguments in the method signature that were not already known Parameters will
be added
- Any arguments that WERE already known (most likely from a @parameter decorator)
will potentially have their default and optional values updated:
- If either attribute is already defined (specified in the decorator) then that
value will be used. Explicit values will NOT be overridden.
- If the default attribute is not already defined then it will be set to the value
of the default parameter from the method signature, if any.
- If the optional attribute is not already defined then it will be set to True if
a default value exists in the method signature, otherwise it will be set to
False.
The parameter modification is confusing - see the _sig_info docstring for examples.
A final note - I'm not super happy about this. It makes sense - positional arguments
are "required", so mark them as non-optional. It's not *wrong*, but it's unexpected.
A @parameter that doesn't specify "optional=" will have a different optionality
based on the function signature.
Regardless, we went with this originally. If we want to change it we need to go
though a deprecation cycle and *loudly* publicize it since things wouldn't break
loudly for plugin developers, their plugins would just be subtly (but importantly)
different.
Args:
cmd: The Command to modify
method: Method to parse
Returns:
Command with modified parameter list
"""
# Now we need to reconcile the parameters with the method signature
for index, arg in enumerate(signature(method).parameters.values()):
# Don't want to include special parameters
if (index == 0 and arg.name in ("self", "cls")) or arg.kind in (
InspectParameter.VAR_KEYWORD,
InspectParameter.VAR_POSITIONAL,
):
continue
# Grab default and optionality according to the signature. We'll need it later.
sig_default, sig_optional = _sig_info(arg)
# Here the parameter was not previously defined so just add it to the list
if arg.name not in cmd.parameter_keys():
cmd.parameters.append(
_initialize_parameter(
key=arg.name, default=sig_default, optional=sig_optional
)
)
# Here the parameter WAS previously defined. So we potentially need to update
# the default and optional values (if they weren't explicitly set).
else:
param = cmd.get_parameter_by_key(arg.name)
if param.default is None:
param.default = sig_default
if param.optional is None:
param.optional = sig_optional
return cmd
def _signature_validate(cmd, method):
# type: (Command, MethodType) -> None
"""Ensure that a Command conforms to the method signature
This will do some validation and will raise a PluginParamError if there are any
issues.
It's expected that this will only be called for Parameters where this makes sense
(aka top-level Parameters). It doesn't make sense to call this for model Parameters,
so you shouldn't do that.
Args:
cmd: Command to validate
method: Target method object
Returns:
None
Raises:
PluginParamError: There was a validation problem
"""
for param in cmd.parameters:
sig_param = None
has_kwargs = False
for p in signature(method).parameters.values():
if p.name == param.key:
sig_param = p
if p.kind == InspectParameter.VAR_KEYWORD:
has_kwargs = True
# Couldn't find the parameter. That's OK if this parameter is meant to be part
# of the **kwargs AND the function has a **kwargs parameter.
if sig_param is None:
if not param.is_kwarg:
raise PluginParamError(
"Parameter was not not marked as part of kwargs and wasn't found "
"in the method signature (should is_kwarg be True?)"
)
elif not has_kwargs:
raise PluginParamError(
"Parameter was declared as a kwarg (is_kwarg=True) but the method "
"signature does not declare a **kwargs parameter"
)
# Cool, found the parameter. Just verify that it's not pure positional and that
# it's not marked as part of kwargs.
else:
if param.is_kwarg:
raise PluginParamError(
"Parameter was marked as part of kwargs but was found in the "
"method signature (should is_kwarg be False?)"
)
# I don't think this is even possible in Python < 3.8
if sig_param.kind == InspectParameter.POSITIONAL_ONLY:
raise PluginParamError(
"Sorry, positional-only type parameters are not supported"
)
# Alias the old names for compatibility
# This isn't deprecated, see https://github.com/beer-garden/beer-garden/issues/927
system = client
def command_registrar(*args, **kwargs):
"""
.. deprecated: 3.0
Will be removed in 4.0. Use ``@system`` instead.
"""
_deprecate(
"Looks like you're using the '@command_registrar' decorator. Heads up - this "
"name will be removed in version 4.0, please use '@system' instead. Thanks!"
)
return system(*args, **kwargs)
def register(*args, **kwargs):
"""
.. deprecated: 3.0
Will be removed in 4.0. Use ``@command`` instead.
"""
_deprecate(
"Looks like you're using the '@register' decorator. Heads up - this name will "
"be removed in version 4.0, please use '@command' instead. Thanks!"
)
return command(*args, **kwargs)
def plugin_param(*args, **kwargs):
"""
.. deprecated: 3.0
Will be removed in 4.0. Use ``@parameter`` instead.
"""
_deprecate(
"Looks like you're using the '@plugin_param' decorator. Heads up - this name "
"will be removed in version 4.0, please use '@parameter' instead. Thanks!"
)
return parameter(*args, **kwargs)
```
#### File: brewtils/brewtils/plugin.py
```python
import json
import logging
import logging.config
import os
import signal
import sys
import threading
import appdirs
from box import Box
from packaging.version import Version
from pathlib import Path
from requests import ConnectionError as RequestsConnectionError
import brewtils
from brewtils.config import load_config
from brewtils.decorators import _parse_client
from brewtils.display import resolve_template
from brewtils.errors import (
ConflictError,
DiscardMessageException,
PluginValidationError,
RequestProcessingError,
RestConnectionError,
ValidationError,
_deprecate,
)
from brewtils.log import configure_logging, default_config, find_log_file, read_log_file
from brewtils.models import Instance, System
from brewtils.request_handling import (
AdminProcessor,
HTTPRequestUpdater,
RequestConsumer,
RequestProcessor,
)
from brewtils.resolvers.manager import ResolutionManager
from brewtils.rest.easy_client import EasyClient
from brewtils.specification import _CONNECTION_SPEC
# This is what enables request nesting to work easily
request_context = threading.local()
request_context.current_request = None
# Global config, used to simplify BG client creation and sanity checks.
CONFIG = Box(default_box=True)
class Plugin(object):
"""A Beer-garden Plugin
This class represents a Beer-garden Plugin - a continuously-running process
that can receive and process Requests.
To work, a Plugin needs a Client instance - an instance of a class defining
which Requests this plugin can accept and process. The easiest way to define
a ``Client`` is by annotating a class with the ``@system`` decorator.
A Plugin needs certain pieces of information in order to function correctly. These
can be grouped into two high-level categories: identifying information and
connection information.
Identifying information is how Beer-garden differentiates this Plugin from all
other Plugins. If you already have fully-defined System model you can pass that
directly to the Plugin (``system=my_system``). However, normally it's simpler to
pass the pieces directly:
- ``name`` (required)
- ``version`` (required)
- ``instance_name`` (required, but defaults to "default")
- ``namespace``
- ``description``
- ``icon_name``
- ``metadata``
- ``display_name``
Connection information tells the Plugin how to communicate with Beer-garden. The
most important of these is the ``bg_host`` (to tell the plugin where to find the
Beer-garden you want to connect to):
- ``bg_host``
- ``bg_port``
- ``bg_url_prefix``
- ``ssl_enabled``
- ``ca_cert``
- ``ca_verify``
- ``client_cert``
An example plugin might look like this:
.. code-block:: python
Plugin(
name="Test",
version="1.0.0",
instance_name="default",
namespace="test plugins",
description="A Test",
bg_host="localhost",
)
Plugins use `Yapconf <https://github.com/loganasherjones/yapconf>`_ for
configuration loading, which means that values can be discovered from sources other
than direct argument passing. Config can be passed as command line arguments::
python my_plugin.py --bg-host localhost
Values can also be specified as environment variables with a "\\BG_" prefix::
BG_HOST=localhost python my_plugin.py
Plugins service requests using a
:py:class:`concurrent.futures.ThreadPoolExecutor`. The maximum number of
threads available is controlled by the ``max_concurrent`` argument.
.. warning::
Normally the processing of each Request occurs in a distinct thread context. If
you need to access shared state please be careful to use appropriate
concurrency mechanisms.
.. warning::
The default value for ``max_concurrent`` is 5, but setting it to 1 is allowed.
This means that a Plugin will essentially be single-threaded, but realize this
means that if the Plugin invokes a Command on itself in the course of processing
a Request then the Plugin **will** deadlock!
Args:
client: Instance of a class annotated with ``@system``.
bg_host (str): Beer-garden hostname
bg_port (int): Beer-garden port
bg_url_prefix (str): URL path that will be used as a prefix when communicating
with Beer-garden. Useful if Beer-garden is running on a URL other than '/'.
ssl_enabled (bool): Whether to use SSL for Beer-garden communication
ca_cert (str): Path to certificate file containing the certificate of the
authority that issued the Beer-garden server certificate
ca_verify (bool): Whether to verify Beer-garden server certificate
client_cert (str): Path to client certificate to use when communicating with
Beer-garden
api_version (int): Beer-garden API version to use
client_timeout (int): Max time to wait for Beer-garden server response
username (str): Username for Beer-garden authentication
password (str): <PASSWORD> <PASSWORD>
access_token (str): Access token for Beer-garden authentication
refresh_token (str): Refresh token for Beer-garden authentication
system (:py:class:`brewtils.models.System`): A Beer-garden System definition.
Incompatible with name, version, description, display_name, icon_name,
max_instances, and metadata parameters.
name (str): System name
version (str): System version
description (str): System description
display_name (str): System display name
icon_name (str): System icon name
max_instances (int): System maximum instances
metadata (dict): System metadata
instance_name (str): Instance name
namespace (str): Namespace name
logger (:py:class:`logging.Logger`): Logger that will be used by the Plugin.
Passing a logger will prevent the Plugin from preforming any additional
logging configuration.
worker_shutdown_timeout (int): Time to wait during shutdown to finish processing
max_concurrent (int): Maximum number of requests to process concurrently
max_attempts (int): Number of times to attempt updating of a Request
before giving up. Negative numbers are interpreted as no maximum.
max_timeout (int): Maximum amount of time to wait between Request update
attempts. Negative numbers are interpreted as no maximum.
starting_timeout (int): Initial time to wait between Request update attempts.
Will double on subsequent attempts until reaching max_timeout.
mq_max_attempts (int): Number of times to attempt reconnection to message queue
before giving up. Negative numbers are interpreted as no maximum.
mq_max_timeout (int): Maximum amount of time to wait between message queue
reconnect attempts. Negative numbers are interpreted as no maximum.
mq_starting_timeout (int): Initial time to wait between message queue reconnect
attempts. Will double on subsequent attempts until reaching mq_max_timeout.
working_directory (str): Path to a preferred working directory. Only used
when working with bytes parameters.
"""
def __init__(self, client=None, system=None, logger=None, **kwargs):
self._client = None
self._instance = None
self._admin_processor = None
self._request_processor = None
self._shutdown_event = threading.Event()
# Need to set up logging before loading config
self._custom_logger = False
self._logger = self._setup_logging(logger=logger, **kwargs)
# Now that logging is configured we can load the real config
self._config = load_config(**kwargs)
# If global config has already been set that's a warning
global CONFIG
if len(CONFIG):
self._logger.warning(
"Global CONFIG object is not empty! If multiple plugins are running in "
"this process please ensure any [System|Easy|Rest]Clients are passed "
"connection information as kwargs as auto-discovery may be incorrect."
)
CONFIG = Box(self._config.to_dict(), default_box=True)
# Now set up the system
self._system = self._setup_system(system, kwargs)
# Make sure this is set after self._system
if client:
self.client = client
# Now that the config is loaded we can create the EasyClient
self._ez_client = EasyClient(logger=self._logger, **self._config)
# With the EasyClient we can determine if this is an old garden
self._legacy = self._legacy_garden()
if not self._legacy:
# Namespace setup depends on self._system and self._ez_client
self._setup_namespace()
# And with _system and _ez_client we can ask for the real logging config
self._initialize_logging()
def run(self):
if not self._client:
raise AttributeError(
"Unable to start a Plugin without a Client. Please set the 'client' "
"attribute to an instance of a class decorated with @brewtils.system"
)
self._startup()
self._logger.info("Plugin %s has started", self.unique_name)
try:
# Need the timeout param so this works correctly in Python 2
while not self._shutdown_event.wait(timeout=0.1):
pass
except KeyboardInterrupt:
self._logger.debug("Received KeyboardInterrupt - shutting down")
except Exception as ex:
self._logger.exception("Exception during wait, shutting down: %s", ex)
self._shutdown()
self._logger.info("Plugin %s has terminated", self.unique_name)
@property
def client(self):
return self._client
@client.setter
def client(self, new_client):
if self._client:
raise AttributeError("Sorry, you can't change a plugin's client once set")
if new_client is None:
return
# Several _system properties can come from the client, so update if needed
if not self._system.name:
self._system.name = getattr(new_client, "_bg_name")
if not self._system.version:
self._system.version = getattr(new_client, "_bg_version")
if not self._system.description and new_client.__doc__:
self._system.description = new_client.__doc__.split("\n")[0]
# Now roll up / interpret all metadata to get the Commands
self._system.commands = _parse_client(new_client)
try:
# Put some attributes on the Client class
client_clazz = type(new_client)
client_clazz.current_request = property(
lambda _: request_context.current_request
)
# Add for back-compatibility
client_clazz._bg_name = self._system.name
client_clazz._bg_version = self._system.version
client_clazz._bg_commands = self._system.commands
client_clazz._current_request = client_clazz.current_request
except TypeError:
if sys.version_info.major != 2:
raise
self._logger.warning(
"Unable to assign attributes to Client class - current_request will "
"not be available. If you're using an old-style class declaration "
"it's recommended to switch to new-style if possible."
)
self._client = new_client
@property
def system(self):
return self._system
@property
def instance(self):
return self._instance
@property
def unique_name(self):
return "%s:%s[%s]-%s" % (
self._system.namespace,
self._system.name,
self._config.instance_name,
self._system.version,
)
@staticmethod
def _set_signal_handlers():
"""Ensure that SIGINT and SIGTERM will gracefully stop the Plugin"""
def _handler(_signal, _frame):
raise KeyboardInterrupt
signal.signal(signal.SIGINT, _handler)
signal.signal(signal.SIGTERM, _handler)
@staticmethod
def _set_exception_hook(logger):
"""Ensure uncaught exceptions are logged instead of being written to stderr"""
def _hook(exc_type, exc_value, traceback):
logger.error(
"An uncaught exception was raised in the plugin process:",
exc_info=(exc_type, exc_value, traceback),
)
sys.excepthook = _hook
def _startup(self):
"""Plugin startup procedure
This method actually starts the plugin. When it completes the plugin should be
considered in a "running" state - listening to the appropriate message queues,
connected to the Beer-garden server, and ready to process Requests.
This method should be the first time that a connection to the Beer-garden
server is *required*.
"""
self._logger.debug("About to start up plugin %s", self.unique_name)
if not self._ez_client.can_connect():
raise RestConnectionError("Cannot connect to the Beer-garden server")
# If namespace couldn't be determined at init try one more time
if not self._legacy and not self._config.namespace:
self._setup_namespace()
self._system = self._initialize_system()
self._instance = self._initialize_instance()
if self._config.working_directory is None:
app_parts = [self._system.name, self._instance.name]
if self._system.namespace:
app_parts.insert(0, self._system.namespace)
self._config.working_directory = appdirs.user_data_dir(
appname=os.path.join(*app_parts), version=self._system.version
)
workdir = Path(self._config.working_directory)
if not workdir.exists():
workdir.mkdir(parents=True)
self._logger.debug("Initializing and starting processors")
self._admin_processor, self._request_processor = self._initialize_processors()
self._admin_processor.startup()
self._request_processor.startup()
self._logger.debug("Setting signal handlers")
self._set_signal_handlers()
def _shutdown(self):
"""Plugin shutdown procedure
This method gracefully stops the plugin. When it completes the plugin should be
considered in a "stopped" state - the message processors shut down and all
connections closed.
"""
self._logger.debug("About to shut down plugin %s", self.unique_name)
self._shutdown_event.set()
self._logger.debug("Shutting down processors")
self._request_processor.shutdown()
self._admin_processor.shutdown()
try:
self._ez_client.update_instance(self._instance.id, new_status="STOPPED")
except Exception:
self._logger.warning(
"Unable to notify Beer-garden that this plugin is STOPPED, so this "
"plugin's status may be incorrect in Beer-garden"
)
self._logger.debug("Successfully shutdown plugin {0}".format(self.unique_name))
def _initialize_logging(self):
"""Configure logging with Beer-garden's configuration for this plugin.
This method will ask Beer-garden for a logging configuration specific to this
plugin and will apply that configuration to the logging module.
Note that this method will do nothing if the logging module's configuration was
already set or a logger kwarg was given during Plugin construction.
Returns:
None
"""
if self._custom_logger:
self._logger.debug("Skipping logging init: custom logger detected")
return
try:
log_config = self._ez_client.get_logging_config(
local=bool(self._config.runner_id)
)
except Exception as ex:
self._logger.warning(
"Unable to retrieve logging configuration from Beergarden, the default "
"configuration will be used instead. Caused by: {0}".format(ex)
)
return
try:
configure_logging(
log_config,
namespace=self._system.namespace,
system_name=self._system.name,
system_version=self._system.version,
instance_name=self._config.instance_name,
)
except Exception as ex:
# Reset to default config as logging can be seriously wrong now
logging.config.dictConfig(default_config(level=self._config.log_level))
self._logger.exception(
"Error encountered during logging configuration. This most likely "
"indicates an issue with the Beergarden server plugin logging "
"configuration. The default configuration will be used instead. Caused "
"by: {0}".format(ex)
)
return
# Finally, log uncaught exceptions using the configuration instead of stderr
self._set_exception_hook(self._logger)
def _initialize_system(self):
"""Let Beergarden know about System-level info
This will attempt to find a system with a name and version matching this plugin.
If one is found this will attempt to update it (with commands, metadata, etc.
from this plugin).
If a System is not found this will attempt to create one.
Returns:
Definition of a Beergarden System this plugin belongs to.
Raises:
PluginValidationError: Unable to find or create a System for this Plugin
"""
# Make sure that the system is actually valid before trying anything
self._validate_system()
# Do any necessary template resolution
self._system.template = resolve_template(self._system.template)
existing_system = self._ez_client.find_unique_system(
name=self._system.name,
version=self._system.version,
namespace=self._system.namespace,
)
if not existing_system:
try:
# If this succeeds can just finish here
return self._ez_client.create_system(self._system)
except ConflictError:
# If multiple instances are starting up at once and this is a new system
# the create can return a conflict. In that case just try the get again
existing_system = self._ez_client.find_unique_system(
name=self._system.name,
version=self._system.version,
namespace=self._system.namespace,
)
# If we STILL can't find a system something is really wrong
if not existing_system:
raise PluginValidationError(
"Unable to find or create system {0}".format(self._system)
)
# We always update with these fields
update_kwargs = {
"new_commands": self._system.commands,
"metadata": self._system.metadata,
"description": self._system.description,
"display_name": self._system.display_name,
"icon_name": self._system.icon_name,
"template": self._system.template,
}
# And if this particular instance doesn't exist we want to add it
if not existing_system.has_instance(self._config.instance_name):
update_kwargs["add_instance"] = Instance(name=self._config.instance_name)
return self._ez_client.update_system(existing_system.id, **update_kwargs)
def _initialize_instance(self):
"""Let Beer-garden know this instance is ready to process Requests"""
# Sanity check to make sure an instance with this name was registered
if not self._system.has_instance(self._config.instance_name):
raise PluginValidationError(
"Unable to find registered instance with name '%s'"
% self._config.instance_name
)
return self._ez_client.initialize_instance(
self._system.get_instance_by_name(self._config.instance_name).id,
runner_id=self._config.runner_id,
)
def _initialize_processors(self):
"""Create RequestProcessors for the admin and request queues"""
# If the queue connection is TLS we need to update connection params with
# values specified at plugin creation
connection_info = self._instance.queue_info["connection"]
if "ssl" in connection_info:
connection_info["ssl"].update(
{
"ca_cert": self._config.ca_cert,
"ca_verify": self._config.ca_verify,
"client_cert": self._config.client_cert,
}
)
# Each RequestProcessor needs a RequestConsumer, so start with those
common_args = {
"connection_type": self._instance.queue_type,
"connection_info": connection_info,
"panic_event": self._shutdown_event,
"max_reconnect_attempts": self._config.mq.max_attempts,
"max_reconnect_timeout": self._config.mq.max_timeout,
"starting_reconnect_timeout": self._config.mq.starting_timeout,
}
admin_consumer = RequestConsumer.create(
thread_name="Admin Consumer",
queue_name=self._instance.queue_info["admin"]["name"],
max_concurrent=1,
**common_args
)
request_consumer = RequestConsumer.create(
thread_name="Request Consumer",
queue_name=self._instance.queue_info["request"]["name"],
max_concurrent=self._config.max_concurrent,
**common_args
)
# Both RequestProcessors need an updater
updater = HTTPRequestUpdater(
self._ez_client,
self._shutdown_event,
max_attempts=self._config.max_attempts,
max_timeout=self._config.max_timeout,
starting_timeout=self._config.starting_timeout,
)
# Finally, create the actual RequestProcessors
admin_processor = AdminProcessor(
target=self,
updater=updater,
consumer=admin_consumer,
plugin_name=self.unique_name,
max_workers=1,
)
request_processor = RequestProcessor(
target=self._client,
updater=updater,
consumer=request_consumer,
validation_funcs=[self._correct_system, self._is_running],
plugin_name=self.unique_name,
max_workers=self._config.max_concurrent,
resolver=ResolutionManager(easy_client=self._ez_client),
system=self._system,
)
return admin_processor, request_processor
def _start(self):
"""Handle start Request"""
self._instance = self._ez_client.update_instance(
self._instance.id, new_status="RUNNING"
)
def _stop(self):
"""Handle stop Request"""
# Because the run() method is on a 0.1s sleep there's a race regarding if the
# admin consumer will start processing the next message on the queue before the
# main thread can stop it. So stop it here to prevent that.
self._request_processor.consumer.stop_consuming()
self._admin_processor.consumer.stop_consuming()
self._shutdown_event.set()
def _status(self):
"""Handle status Request"""
try:
self._ez_client.instance_heartbeat(self._instance.id)
except (RequestsConnectionError, RestConnectionError):
pass
def _read_log(self, **kwargs):
"""Handle read log Request"""
log_file = find_log_file()
if not log_file:
raise RequestProcessingError(
"Error attempting to retrieve logs - unable to determine log filename. "
"Please verify that the plugin is writing to a log file."
)
try:
return read_log_file(log_file=log_file, **kwargs)
except IOError as e:
raise RequestProcessingError(
"Error attempting to retrieve logs - unable to read log file at {0}. "
"Root cause I/O error {1}: {2}".format(log_file, e.errno, e.strerror)
)
def _correct_system(self, request):
"""Validate that a request is intended for this Plugin"""
request_system = getattr(request, "system") or ""
if request_system.upper() != self._system.name.upper():
raise DiscardMessageException(
"Received message for system {0}".format(request.system)
)
def _is_running(self, _):
"""Validate that this plugin is still running"""
if self._shutdown_event.is_set():
raise RequestProcessingError(
"Unable to process message - currently shutting down"
)
def _legacy_garden(self):
"""Determine if this plugin is connected to a legacy garden"""
legacy = False
try:
# Need to be careful since v2 doesn't have "beer_garden_version"
raw_version = self._ez_client.get_version()
if "beer_garden_version" in raw_version:
bg_version = Version(raw_version["beer_garden_version"])
else:
bg_version = Version(raw_version["brew_view_version"])
if bg_version < Version("3"):
legacy = True
_deprecate(
"Looks like your plugin is using version 3 brewtils but connecting "
"to a version 2 Beer Garden. Please be aware that this "
"functionality will stop being officially supported in the next "
"brewtils minor release."
)
self._logger.warning(
"This plugin is using brewtils version {0} but is connected to a "
"legacy Beer Garden (version {1}). Please be aware that certain "
"features such as namespaces and logging configuration will not "
"work correctly until the Beer Garden is upgraded.".format(
brewtils.__version__, bg_version
)
)
except Exception as ex:
self._logger.warning(
"An exception was raised while attempting to determine Beer Garden "
"version, assuming non-legacy."
)
self._logger.debug("Underlying exception: %s" % ex, exc_info=True)
return legacy
def _setup_logging(self, logger=None, **kwargs):
"""Set up logging configuration and get a logger for the Plugin
This method will configure Python-wide logging for the process if it has not
already been configured. Whether or not logging has been configured is
determined by the root handler count - if there aren't any then it's assumed
logging has not already been configured.
The configuration applied (again, if no configuration has already happened) is
a stream handler with elevated log levels for libraries that are verbose. The
overall level will be loaded as a configuration option, so it can be set as a
keyword argument, command line option, or environment variable.
A logger to be used by the Plugin will be returned. If the ``logger`` keyword
parameter is given then that logger will be used, otherwise a logger will be
generated from the standard ``logging`` module.
Finally, if a the ``logger`` keyword parameter is supplied it's assumed that
logging is already configured and no further configuration will be applied.
Args:
logger: A custom logger
**kwargs: Will be used to load the bootstrap config
Returns:
A logger for the Plugin
"""
if logger or len(logging.root.handlers) != 0:
self._custom_logger = True
else:
# log_level is the only bootstrap config item
boot_config = load_config(bootstrap=True, **kwargs)
logging.config.dictConfig(default_config(level=boot_config.log_level))
self._custom_logger = False
return logger or logging.getLogger(__name__)
def _setup_namespace(self):
"""Determine the namespace the Plugin is operating in
This function attempts to determine the correct namespace and ensures that
the value is set in the places it needs to be set.
First, look in the resolved system (self._system) to see if that has a
namespace. If it does, either:
- A complete system definition with a namespace was provided
- The namespace was resolved from the config
In the latter case nothing further needs to be done. In the former case we
need to set the global config namespace value to the system's namespace value
so that any SystemClients created after the plugin will have the correct value.
Because we have no way to know which case is correct we assume the former and
always set the config value.
If the system does not have a namespace then we attempt to use the EasyClient to
determine the "default" namespace. If successful we set both the global config
and the system namespaces to the default value.
If the attempt to determine the default namespace is not successful we log a
warning. We don't really want to *require* the connection to Beer-garden until
Plugin.run() is called. Raising an exception here would do that, so instead we
just log the warning. Another attempt will be made to determine the namespace
in Plugin.run() which will raise on failure (but again, SystemClients created
before the namespace is determined will have potentially incorrect namespaces).
"""
try:
ns = self._system.namespace or self._ez_client.get_config()["garden_name"]
self._system.namespace = ns
self._config.namespace = ns
CONFIG.namespace = ns
except Exception as ex:
self._logger.warning(
"Namespace value was not resolved from config sources and an exception "
"was raised while attempting to determine default namespace value. "
"Created SystemClients may have unexpected namespace values. "
"Underlying exception was:\n%s" % ex
)
def _setup_system(self, system, plugin_kwargs):
helper_keywords = {
"name",
"version",
"description",
"icon_name",
"display_name",
"max_instances",
"metadata",
"namespace",
"template",
}
if system:
if helper_keywords.intersection(plugin_kwargs.keys()):
raise ValidationError(
"Sorry, you can't provide a complete system definition as well as "
"system creation helper kwargs %s" % helper_keywords
)
if not system.instances:
raise ValidationError(
"Explicit system definition requires explicit instance "
"definition (use instances=[Instance(name='default')] for "
"default behavior)"
)
if not system.max_instances:
system.max_instances = len(system.instances)
else:
# Commands are not defined here - they're set in the client property setter
system = System(
name=self._config.name,
version=self._config.version,
description=self._config.description,
namespace=self._config.namespace,
metadata=json.loads(self._config.metadata),
instances=[Instance(name=self._config.instance_name)],
max_instances=self._config.max_instances,
icon_name=self._config.icon_name,
display_name=self._config.display_name,
template=self._config.template,
)
return system
def _validate_system(self):
"""Make sure the System definition makes sense"""
if not self._system.name:
raise ValidationError("Plugin system must have a name")
if not self._system.version:
raise ValidationError("Plugin system must have a version")
client_name = getattr(self._client, "_bg_name", None)
if client_name and client_name != self._system.name:
raise ValidationError(
"System name '%s' doesn't match name from client decorator: "
"@system(bg_name=%s)" % (self._system.name, client_name)
)
client_version = getattr(self._client, "_bg_version", None)
if client_version and client_version != self._system.version:
raise ValidationError(
"System version '%s' doesn't match version from client decorator: "
"@system(bg_version=%s)" % (self._system.version, client_version)
)
# These are provided for backward-compatibility
@property
def bg_host(self):
"""
.. deprecated:: 3.0
bg_host is now in ``_config`` (``plugin._config.bg_host``)
Provided for backward-comptibility
"""
_deprecate("bg_host is now in _config (plugin._config.bg_host)")
return self._config.bg_host
@property
def bg_port(self):
"""
.. deprecated:: 3.0
bg_port is now in _config (``plugin._config.bg_port``)
Provided for backward-comptibility
"""
_deprecate("bg_port is now in _config (plugin._config.bg_port)")
return self._config.bg_port
@property
def ssl_enabled(self):
"""
.. deprecated:: 3.0
ssl_enabled is now in ``_config`` (``plugin._config.ssl_enabled``)
Provided for backward-comptibility
"""
_deprecate("ssl_enabled is now in _config (plugin._config.ssl_enabled)")
return self._config.ssl_enabled
@property
def ca_cert(self):
"""
.. deprecated:: 3.0
ca_cert is now in ``_config`` (``plugin._config.ca_cert``)
Provided for backward-comptibility
"""
_deprecate("ca_cert is now in _config (plugin._config.ca_cert)")
return self._config.ca_cert
@property
def client_cert(self):
"""
.. deprecated:: 3.0
client_cert is now in ``_config`` (``plugin._config.client_cert``)
Provided for backward-comptibility
"""
_deprecate("client_cert is now in _config (plugin._config.client_cert)")
return self._config.client_cert
@property
def bg_url_prefix(self):
"""
.. deprecated:: 3.0
bg_url_prefix is now in ``_config`` (``plugin._config.bg_url_prefix``)
Provided for backward-comptibility
"""
_deprecate("bg_url_prefix is now in _config (plugin._config.bg_url_prefix)")
return self._config.bg_url_prefix
@property
def ca_verify(self):
"""
.. deprecated:: 3.0
ca_verify is now in ``_config`` (``plugin._config.ca_verify``)
Provided for backward-comptibility
"""
_deprecate("ca_verify is now in _config (plugin._config.ca_verify)")
return self._config.ca_verify
@property
def max_attempts(self):
"""
.. deprecated:: 3.0
max_attempts is now in ``_config`` (``plugin._config.max_attempts``)
Provided for backward-comptibility
"""
_deprecate("max_attempts is now in _config (plugin._config.max_attempts)")
return self._config.max_attempts
@property
def max_timeout(self):
"""
.. deprecated:: 3.0
max_timeout is now in ``_config`` (``plugin._config.max_timeout``)
Provided for backward-comptibility
"""
_deprecate("max_timeout has moved into _config (plugin._config.max_timeout)")
return self._config.max_timeout
@property
def starting_timeout(self):
"""
.. deprecated:: 3.0
starting_timeout is now in ``_config`` (``plugin._config.starting_timeout``)
Provided for backward-comptibility
"""
_deprecate(
"starting_timeout is now in _config (plugin._config.starting_timeout)"
)
return self._config.starting_timeout
@property
def max_concurrent(self):
"""
.. deprecated:: 3.0
max_concurrent is now in ``_config`` (``plugin._config.max_concurrent``)
Provided for backward-comptibility
"""
_deprecate("max_concurrent is now in _config (plugin._config.max_concurrent)")
return self._config.max_concurrent
@property
def instance_name(self):
"""
.. deprecated:: 3.0
instance_name is now in ``_config`` (``plugin._config.instance_name``)
Provided for backward-comptibility
"""
_deprecate("instance_name is now in _config (plugin._config.instance_name)")
return self._config.instance_name
@property
def connection_parameters(self):
"""
.. deprecated:: 3.0
connection_parameters has been removed. Please use ``_config``
Provided for backward-comptibility
"""
_deprecate("connection_parameters attribute was removed, please use '_config'")
return {key: self._config[key] for key in _CONNECTION_SPEC}
@property
def metadata(self):
"""
.. deprecated:: 3.0
metadata is now part of the ``system`` attribute (``plugin.system.metadata``)
Provided for backward-comptibility
"""
_deprecate("metadata is a part of the system attribute (plugin.system.metadata")
return self._system.metadata
@property
def bm_client(self):
"""
.. deprecated:: 3.0
bm_client attribute has been renamed to ``_ez_client``.
Provided for backward-comptibility
"""
_deprecate("bm_client attribute has been renamed to _ez_client")
return self._ez_client
@property
def shutdown_event(self):
"""
.. deprecated:: 3.0
shutdown_event attribute has been renamed to ``_shutdown_event``.
Provided for backward-comptibility
"""
_deprecate("shutdown_event attribute has been renamed to _shutdown_event")
return self._shutdown_event
@property
def logger(self):
"""
.. deprecated:: 3.0
logger attribute has been renamed to ``_logger``.
Provided for backward-comptibility
"""
_deprecate("logger attribute has been renamed to _logger")
return self._logger
# Alias old names
class PluginBase(Plugin):
"""
.. deprecated:: 3.0
Will be removed in version 4.0. Please use ``Plugin`` instead.
``Plugin`` alias Provided for backward-comptibility
"""
def __init__(self, *args, **kwargs):
_deprecate(
"Looks like you're creating a 'PluginBase'. Heads up - this name will be "
"removed in version 4.0, please use 'Plugin' instead. Thanks!"
)
super(PluginBase, self).__init__(*args, **kwargs)
class RemotePlugin(Plugin):
"""
.. deprecated:: 3.0
Will be removed in version 4.0. Please use ``Plugin`` instead.
``Plugin`` alias Provided for backward-comptibility
"""
def __init__(self, *args, **kwargs):
_deprecate(
"Looks like you're creating a 'RemotePlugin'. Heads up - this name will be "
"removed in version 4.0, please use 'Plugin' instead. Thanks!"
)
super(RemotePlugin, self).__init__(*args, **kwargs)
```
#### File: brewtils/test/schema_test.py
```python
import pytest
from mock import Mock
from pytest_lazyfixture import lazy_fixture
from brewtils.models import System
from brewtils.schemas import (
BaseSchema,
DateTime,
SystemSchema,
_deserialize_model,
_serialize_model,
model_schema_map,
)
from brewtils.schema_parser import SchemaParser
class TestSchemas(object):
def test_make_object(self):
base_schema = BaseSchema()
assert "input" == base_schema.make_object("input")
def test_make_object_with_model(self):
schema = SystemSchema(context={"models": {"SystemSchema": System}})
value = schema.make_object({"name": "name"})
assert isinstance(value, System)
def test_get_attributes(self):
attributes = SystemSchema.get_attribute_names()
assert "id" in attributes
assert "name" in attributes
assert "__model__" not in attributes
class TestFields(object):
@pytest.mark.parametrize(
"dt,localtime,expected",
[
(lazy_fixture("ts_dt"), False, lazy_fixture("ts_epoch")),
(lazy_fixture("ts_dt"), True, lazy_fixture("ts_epoch")),
(lazy_fixture("ts_dt_eastern"), False, lazy_fixture("ts_epoch_eastern")),
(lazy_fixture("ts_dt_eastern"), True, lazy_fixture("ts_epoch")),
(lazy_fixture("ts_epoch"), False, lazy_fixture("ts_epoch")),
(lazy_fixture("ts_epoch"), True, lazy_fixture("ts_epoch")),
],
)
def test_to_epoch(self, dt, localtime, expected):
assert DateTime.to_epoch(dt, localtime) == expected
@pytest.mark.parametrize(
"epoch,expected",
[
(lazy_fixture("ts_epoch"), lazy_fixture("ts_dt")),
(lazy_fixture("ts_dt"), lazy_fixture("ts_dt")),
],
)
def test_from_epoch(self, epoch, expected):
assert DateTime.from_epoch(epoch) == expected
def test_modelfield_serialize_invalid_type(self):
with pytest.raises(TypeError):
_serialize_model(
"ignored", Mock(payload_type="INVALID"), type_field="payload_type"
)
def test_modelfield_serialize_unallowed_type(self):
with pytest.raises(TypeError):
_serialize_model(
"ignored",
Mock(payload_type="foo"),
type_field="payload_type",
allowed_types=["bar"],
)
def test_modelfield_deserialize_invalid_type(self):
with pytest.raises(TypeError):
_deserialize_model(
"ignored", {"payload_type": "INVALID"}, type_field="payload_type"
)
def test_modelfield_deserialize_unallowed_type(self):
with pytest.raises(TypeError):
_deserialize_model(
"ignored",
{"payload_type": "foo"},
type_field="payload_type",
allowed_types=["bar"],
)
def test_deserialize_mapping(self):
models = list(set(model_schema_map[dic] for dic in model_schema_map))
assert len(models) == len(
SchemaParser._models
), "Missing mapped schema for deserialization"
``` |
{
"source": "jlrussin/RL_project",
"score": 3
} |
#### File: RL_project/models/CNN.py
```python
import torch.nn as nn
class CNN(nn.Module):
def __init__(self,in_channels,embedding_size,in_height,in_width):
super(CNN, self).__init__()
self.in_height = in_height
self.in_width = in_width
self.conv1 = nn.Conv2d(in_channels,32,kernel_size=4,stride=2)
self.conv2 = nn.Conv2d(32,64,kernel_size=4,stride=2)
self.conv3 = nn.Conv2d(64,64,kernel_size=3,stride=1)
fc1_in_channels = self.calculate_FC_in(in_height,in_width)
self.fc = nn.Linear(fc1_in_channels,512)
self.out = nn.Linear(512,embedding_size)
self.relu = nn.ReLU()
def calculate_FC_in(self,H,W):
def conv2d_out_shape(H_in,W_in,kernel_size,stride):
H_out = int((H_in + 2*0 - 1*(kernel_size - 1) - 1)/stride) + 1
W_out = int((W_in + 2*0 - 1*(kernel_size - 1) - 1)/stride) + 1
return (H_out,W_out)
H,W = conv2d_out_shape(H,W,4,2)
H,W = conv2d_out_shape(H,W,4,2)
H,W = conv2d_out_shape(H,W,3,1)
fc1_in_channels = H*W*64
return fc1_in_channels
def forward(self,observation):
N = observation.size(0) # batch size
embedding = self.relu(self.conv1(observation))
embedding = self.relu(self.conv2(embedding))
embedding = self.relu(self.conv3(embedding))
embedding = self.relu(self.fc(embedding.view(N,-1)))
embedding = self.relu(self.out(embedding))
return embedding
``` |
{
"source": "jlrussin/syntactic_attention",
"score": 2
} |
#### File: jlrussin/syntactic_attention/evaluate_bleu.py
```python
import os
import argparse
import json
import numpy as np
from nltk.translate import bleu_score
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from data import ScanDataset,MTDataset,SCAN_collate
from SyntacticAttention import *
from utils import *
parser = argparse.ArgumentParser()
# Data
parser.add_argument('--dataset', choices=['MT'],
default='MT',
help='MT only')
parser.add_argument('--flip', type=str2bool, default=False,
help='Flip source and target for MT dataset')
parser.add_argument('--train_data_file',
default='data/MT/train',
help='Path to test set')
parser.add_argument('--val_data_file',
default='data/MT/test_no_unk.txt',
help='Path to test set')
parser.add_argument('--test_data_file',
default='data/MT/test',
help='Path to test set')
parser.add_argument('--load_vocab_json',default='vocab_eng_MT_all.json',
help='Path to vocab json file')
# Model hyperparameters
parser.add_argument('--rnn_type', choices=['GRU', 'LSTM'],
default='LSTM', help='Type of rnn to use.')
parser.add_argument('--m_hidden_dim', type=int, default=120,
help='Number of hidden units in semantic embeddings')
parser.add_argument('--x_hidden_dim', type=int, default=200,
help='Number of hidden units in syntax rnn')
parser.add_argument('--n_layers', type=int, default=1,
help='Number of layers in RNNs')
parser.add_argument('--dropout_p', type=float, default=0.5,
help='Dropout rate')
parser.add_argument('--seq_sem', type=str2bool, default=False,
help='Semantic embeddings also processed with RNN.')
parser.add_argument('--syn_act', type=str2bool, default=False,
help='Syntactic information also used for action')
parser.add_argument('--sem_mlp', type=str2bool, default=False,
help='Nonlinear semantic layer with ReLU')
parser.add_argument('--load_weights_from', default=None,
help='Path to saved weights')
# Output options
#parser.add_argument('--results_dir', default='results',
# help='Results subdirectory to save results')
#parser.add_argument('--out_data_file', default='results.json',
# help='Name of output data file')
def get_reference_dict(dataset):
reference_dict = {}
for sample in dataset:
src = sample[2]
tar = sample[3]
assert src[0] == '<SOS>'
assert src[-1] == '<EOS>'
assert tar[0] == '<SOS>'
assert tar[-1] == '<EOS>'
src = src[1:-1]
tar = tar[1:-1]
key = '_'.join(src)
if key not in reference_dict:
reference_dict[key] = [tar]
else:
reference_dict[key].append(tar)
return reference_dict
def evaluate_bleu(dataloader,vocab,reference_dict,model,max_len,device):
# Setup
out_idx_to_token = vocab['out_idx_to_token']
model.max_len = max_len
print("Getting predictions...")
hypotheses = []
references_list = []
with torch.no_grad():
for sample_count,sample in enumerate(dataloader):
# Forward pass
instructions, true_actions, ins_tokens, act_tokens = sample
instructions = [ins.to(device) for ins in instructions]
true_actions = [ta.to(device) for ta in true_actions]
if len(true_actions[0]) < 6:
continue # Don't include if less than 4 words (without SOS, EOS)
actions,padded_true_actions = model(instructions,true_actions)
# Get hypothesis
max_actions = torch.argmax(actions,dim=1)
max_actions = max_actions.squeeze(0).cpu().numpy()
out_tokens = [out_idx_to_token[str(a)] for a in max_actions]
if '<EOS>' in out_tokens:
eos_index = out_tokens.index('<EOS>')
else:
eos_index = len(out_tokens)
hypothesis = out_tokens[:eos_index]
hypotheses.append(hypothesis)
# Get references
ins_words = ins_tokens[0][1:-1] # Remove <EOS> and <SOS>
key = '_'.join(ins_words)
references = reference_dict[key]
references_list.append(references)
# Compute BLEU
print("Computing BLEU score...")
bleu = bleu_score.corpus_bleu(references_list,hypotheses)
bleu = bleu*100
# Return model max_len to None
model.max_len = None
return bleu
def main(args):
# CUDA
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Vocab
with open(args.load_vocab_json,'r') as f:
vocab = json.load(f)
in_vocab_size = len(vocab['in_token_to_idx'])
out_vocab_size = len(vocab['out_idx_to_token'])
# Dataset
batch_size = 1
train_data = MTDataset(args.train_data_file,vocab,args.flip)
val_data = MTDataset(args.val_data_file,vocab,args.flip)
test_data = MTDataset(args.test_data_file,vocab,args.flip)
train_loader = DataLoader(train_data,batch_size,
shuffle=True,collate_fn=SCAN_collate)
val_loader = DataLoader(val_data,batch_size,
shuffle=True,collate_fn=SCAN_collate)
test_loader = DataLoader(test_data,batch_size,
shuffle=True,collate_fn=SCAN_collate)
# Max lengths
train_max_len = max([len(b[3]) for b in train_data])
val_max_len = max([len(b[3]) for b in val_data])
test_max_len = max([len(b[3]) for b in test_data])
# Reference dicts
train_ref_dict = get_reference_dict(train_data)
val_ref_dict = get_reference_dict(val_data)
test_ref_dict = get_reference_dict(test_data)
# Model
model = Seq2SeqSynAttn(in_vocab_size, args.m_hidden_dim, args.x_hidden_dim,
out_vocab_size, args.rnn_type, args.n_layers,
args.dropout_p, args.seq_sem, args.syn_act,
args.sem_mlp, None, device)
if args.load_weights_from is not None:
model.load_state_dict(torch.load(args.load_weights_from))
model.to(device)
model.eval()
# Get BLEU scores
train_bleu = evaluate_bleu(train_loader,vocab,train_ref_dict,model,
train_max_len, device)
print("Training set: %s" % args.train_data_file)
print("Train BLEU: %f" % train_bleu)
val_bleu = evaluate_bleu(val_loader,vocab,val_ref_dict,model,
val_max_len, device)
print("Validation set: %s" % args.val_data_file)
print("Validation BLEU: %f" % val_bleu)
test_bleu = evaluate_bleu(test_loader,vocab,test_ref_dict,model,
test_max_len, device)
print("Test set: %s" % args.test_data_file)
print("Test BLEU: %f" % test_bleu)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
main(args)
```
#### File: jlrussin/syntactic_attention/utils.py
```python
from data import ScanDataset,MTDataset
import json
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def generate_vocab_json(dataset,data_file,flip,vocab_out_json):
if dataset == 'SCAN':
data = ScanDataset(data_file)
elif dataset == 'MT':
data = MTDataset(data_file,vocab=None,flip=flip)
vocab = data.vocab
with open(vocab_out_json,'w') as f:
json.dump(vocab,f)
``` |
{
"source": "jlrzarcor/ITAM-dpa2021",
"score": 2
} |
#### File: src/api/flask_cfi.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_restplus import Api, Resource, fields
# gral libraries
import os
#import psycopg2
# Special routing for importing custom libraries
# Modify path
old_path = os.getcwd()
path = os.path.realpath('../..')
# Get imports
from src.utils.general import get_pg_service
pg = get_pg_service(path + '/conf/local/credentials.yaml')
#import src.utils.constants as ks
#from src.utils.general import get_db_conn_sql_alchemy
# Reset path
os.path.realpath(old_path)
# ================================= API ================================= #
# Connection to RDS-Postgress chicagofoodinsp db
#db_conn_str = 'postgresql://jl:alan_turing.13@rds-dpa-project.cudydvgqgf80.us-west-2.rds.amazonaws.com:5432/chicagofoodinsp'
# Connection to RDS-Postgress chicagofoodinsp db
#pg = get_pg_service('../../conf/local/credential.yaml')
#db_conn_str = 'postgresql://' + pg['user'] + ':' + pg['password'] + '@' + pg['host'] + ':' + str(pg['port']) + '/' + pg['dbname']
db_conn_str = 'postgresql://{}:{}@{}:{}/{}'.format(pg['user'], pg['password'], pg['host'], str(pg['port']), pg['dbname'])
# create flask app
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = db_conn_str
api = Api(app)
db = SQLAlchemy(app)
# deploy.scores
class Match(db.Model):
__table_args__ = {'schema':'api'}
__tablename__ = 'scores'
ingest_date = db.Column(db.DateTime)
index = db.Column(db.Integer, primary_key = True)
aka_name = db.Column(db.String)
license = db.Column(db.String)
score = db.Column(db.Float)
prediction = db.Column(db.Integer)
def __repr__(self):
return(u'<{self,__class__,__name__}:{self.id}>'.format(self=self))
# ======= ======= ======= Models ======= ======= =======
# swagger model, marshall outputs
model = api.model("scores_table", {
'ingest_date' : fields.DateTime,
'index' : fields.Integer,
'aka_name' : fields.String,
'license' : fields.String,
'score' : fields.Float,
'prediction' : fields.Integer
})
# outputs
model_customer = api.model("search_by_customer", {
'predictions' : fields.Nested(model)
})
model_insp_dates = api.model("search_by_insp_date", {
'ingest_date' : fields.DateTime,
'predictions' : fields.Nested(model)
})
# ======= ======= ======= Endpoints ======= ======= =======
@api.route('/cfi')
class Chicago(Resource):
def get(self):
return{'Hello':'Hello World'}
@api.route('/cfi_license/<string:license>')
class Chicago(Resource):
@api.marshal_with(model_customer, as_list = True)
def get(self, license):
match = Match.query.filter_by(license=license).order_by(Match.score.desc()).limit(1).all()
predictions = []
for element in match:
predictions.append({'ingest_date' : element.ingest_date,
'index' : element.index,
'aka_name' : element.aka_name,
'license' : element.license,
'score' : element.score,
'prediction' : element.prediction})
return{'license':license,'predictions':predictions}
@api.route('/cfi_prediction_date/<string:ingest_date>')
class Chicago(Resource):
@api.marshal_with(model_insp_dates, as_list = True)
def get(self, ingest_date):
match = Match.query.filter_by(ingest_date=ingest_date).order_by(Match.score.desc())
predictions = []
for element in match:
predictions.append({'ingest_date' : element.ingest_date,
'index' : element.index,
'aka_name' : element.aka_name,
'license' : element.license,
'score' : element.score,
'prediction' : element.prediction})
return{'license':license,'predictions':predictions}
if __name__ == '__main__':
app.run(debug = True)
```
#### File: src/etl/task_almacenamiento.py
```python
import luigi
import luigi.contrib.s3
import json
import os
import pickle as pkl
import pandas as pd
from datetime import datetime
# importing custom libraries and modules
from src.etl.task_ingesta import TaskIngest
from src.etl.task_ingestion_metadata import TaskIngestMeta
import src.pipeline.ingesta_almacenamiento as ial
# ================================= LUIGI TASK ================================= #
class TaskStore(luigi.Task):
bucket = luigi.Parameter(default = "data-product-architecture-equipo-5")
# bucket = luigi.Parameter(default = "temp-dev-dpa")
prc_path = luigi.Parameter(default = "ingestion")
todate = datetime.date(datetime.today())
year = luigi.IntParameter(default = todate.year)
month = luigi.IntParameter(default = todate.month)
day = luigi.IntParameter(default = todate.day)
flg_i0_c1 = luigi.IntParameter(default = 1)
def requires(self):
return {'a' : TaskIngest(self.year, self.month, self.day, self.flg_i0_c1),
'b' : TaskIngestMeta(self.year, self.month, self.day, self.flg_i0_c1)}
def run(self):
data = json.load(self.input()['a'].open('r'))
with self.output().open('w') as f:
pkl.dump(data, f)
# Lineage. Creating Metadata @ .csv file
str_date = str(datetime.date(datetime(self.year, self.month, self.day)))
if self.flg_i0_c1 == 0:
flag = "initial"
str_file = "historic-inspections-" + str_date + ".pkl"
path_S3 = "s3://{}/{}/{}/YEAR={}/MONTH={}/DAY={}/{}".\
format(self.bucket, self.prc_path, flag, self.year, self.month, self.day, str_file)
else:
flag = "consecutive"
str_file = "consecutive-inspections-" + str_date + ".pkl"
path_S3 = "s3://{}/{}/{}/YEAR={}/MONTH={}/DAY={}/{}".\
format(self.bucket, self.prc_path, flag, self.year, self.month, self.day, str_file)
str_file_csv = str_date + ".csv"
output_path = "src/temp/metadata/almacenamiento/type={}/".format(self.flg_i0_c1)
os.makedirs(output_path, exist_ok = True)
dic_par = {'year':str(self.year),'month':str(self.month),'day':str(self.day),'flg_i0_c1':str(self.flg_i0_c1)}
df = pd.DataFrame({'fecha': [self.todate], 'param_exec': [json.dumps(dic_par)],'usuario': ['luigi'],
'num_regs_almac': [len(data)], 'ruta_S3': [path_S3]})
df.to_csv(output_path + str_file_csv, index=False, header=False)
def output(self):
# Formatting date parameters into date-string
str_date = str(datetime.date(datetime(self.year, self.month, self.day)))
# Set path to S3
if self.flg_i0_c1 == 0:
flag = "initial"
str_file = "historic-inspections-" + str_date + ".pkl"
output_path = "s3://{}/{}/{}/YEAR={}/MONTH={}/DAY={}/{}".\
format(self.bucket, self.prc_path, flag, self.year, self.month, self.day, str_file)
else:
flag = "consecutive"
str_file = "consecutive-inspections-" + str_date + ".pkl"
output_path = "s3://{}/{}/{}/YEAR={}/MONTH={}/DAY={}/{}".\
format(self.bucket, self.prc_path, flag, self.year, self.month, self.day, str_file)
s3 = ial.get_luigi_s3client()
return luigi.contrib.s3.S3Target(path = output_path, client = s3, format = luigi.format.Nop)
```
#### File: src/pipeline/ingesta_almacenamiento.py
```python
import boto3
import pickle as pkl
from sodapy import Socrata
from luigi.contrib.s3 import S3Client
from datetime import datetime
import src.utils.constants as ks
from src.utils.general import get_api_token, get_s3_credentials
# Variables de entorno que se cargan por default al cargar la librería
# ingesta_almacenamiento.py
# ================================= FUNCTION ================================= #
def get_client():
'''
Función que recoge el Token de la API Chicago food inspections (contenida
en el archivo credencials.yaml), se conecta a ella y devuelve el cliente
con el que se harán las consultas.
outputs:
Cliente para conectarse a la API Chicago food inspections
'''
token = get_api_token(ks.path)
client = Socrata(ks.socrata_domain, token['api_token']) #timeout=10)
return client
# ================================= FUNCTION 2 ================================= #
def ingesta_inicial(client, delta_date = '2021-02-15T00:00:00.000', limit = 300000):
'''
Función que utiliza el cliente de la API Chicago food inspections, realiza
una consulta histórica de tamaño "limit" y devuelve una lista con los resultados
de la consulta. Si no hay ese número de registros devolverá el máximo que
encuentre en la BD.
inputs:
client: objeto con el cliente para conectarse a la API Chicago food inspections
limit: integer con el máximo de registros a consultar
outputs:
Lista con el resultado de la consulta inicial (histórica) a la BD
'''
return client.get(ks.socrata_ds_id, where = "inspection_date <= " + "'" + delta_date + "'", limit = limit)
# ================================= FUNCTION 3 ================================= #
def get_s3_resource():
'''
Función que se crea un objeto "s3" para conectarse al servicio s3 de AWS
a partir de las crdenciales que se encuentran en credentials.yaml
outputs:
Objeto s3 de AWS
'''
s3_credentials = get_s3_credentials(ks.path)
session = boto3.Session(
aws_access_key_id = s3_credentials['aws_access_key_id'],
aws_secret_access_key = s3_credentials['aws_secret_access_key']
)
s3 = session.resource('s3')
return s3
def get_luigi_s3client():
'''
Función que se crea un objeto client de luigi para conectarse al servicio s3 de AWS
a partir de las crdenciales que se encuentran en credentials.yaml
outputs:
Objeto S3Client de BOTO3
'''
s3_credentials = get_s3_credentials(ks.path)
client_s3_luigi = S3Client(
aws_access_key_id = s3_credentials['aws_access_key_id'],
aws_secret_access_key = s3_credentials['aws_secret_access_key']
)
return client_s3_luigi
# ================================= FUNCTION 4 ================================= #
# DEPRECATED FUNCTION
def guardar_ingesta(my_bucket, bucket_path, data):
'''
Función que recibe de argumentos el nombre del bucket s3 en el que se quiere
almacenar la consulta, la ruta en la que se guardará la información que se consulte
de la API Chicago food inspections y la consulta que se haya realizado previamente
(inicial o consecutiva). Pueden existir dos casos:
bucket_path = ingestion/initial/:
Se hará la consulta de todo lo que se encuentre en la BD, desde la fecha en
que se corre la función y hasta el valor de registros que se estableció
en la variable -limit- de la función -ingesta_inicial-.
bucket_path = ingestion/consecutive/:
Se hará la consulta de todo lo que se encuentre en la BD, desde la fecha que se
establezca en la variable -delta_date- y hasta el valor de registros que se estableció
en la variable -limit- de la función -ingesta_consecutiva-.
inputs:
my_bucket: string con el nombre del bucket
bucket_path: string con la ruta donde se guardaran los datos en el bucket
data: lista con la consulta realizada
outputs:
Almacenamiento en el bucket de s3 de la consulta realizada
'''
s3 = get_s3_resource()
pickle_buffer = pkl.dumps(data)
fecha = str(datetime.date(datetime.now()))
if bucket_path == "ingestion/initial":
nom_file = "ingestion/initial/" + "historic-inspections-" + fecha + ".pkl"
else:
nom_file = "ingestion/consecutive/" + "consecutive-inspections-" + fecha + ".pkl"
s3.Object(my_bucket, nom_file).put(Body = pickle_buffer)
return
# ================================= FUNCTION 5 ================================= #
def ingesta_consecutiva(client, delta_date = '2021-02-15T00:00:00.000', limit = 1000):
'''
Función que utiliza el cliente de la API Chicago food inspections, realiza
una consulta de tamaño "limit" a partir de la fecha establecida en la
variable -delta_date- y devuelve una lista con los resultados
de la consulta. Si no hay ese número de registros devolverá el máximo que haya
en la BD.
inputs:
client: objeto con el cliente para conectarse a la API Chicago food inspections
delta_date: date con la fecha desde la cual se desea hacer la consulta
limit: integer con el número de registros que se desean consultar
outputs:
Lista con el resultado de la consulta consecutiva a la BD
'''
return client.get(ks.socrata_ds_id, where = "inspection_date > " + "'" + delta_date + "'", limit = limit)
```
#### File: src/test/test_modelo.py
```python
import pandas as pd
import marbles.core
import pickle
import json
import boto3
from datetime import datetime
import os
# importing especific functions
from luigi.contrib.s3 import S3Target
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from sklearn.tree import DecisionTreeClassifier
# importing custom libraries
import src.utils.constants as ks
from src.utils.general import get_pg_service
import src.pipeline.ingesta_almacenamiento as ial
# Requires...
# ================================= FUNCTION: testing DF ================================= #
class TestModel(marbles.core.TestCase):
# Variables
todate = datetime.date(datetime.today())
test_meth = ''
status = ''
err_msg = ''
__name__ = 'TestModel'
def test_score(self):
S3_targ = pd.read_csv('src/test/trans_file.csv', header = None).iloc[0,0]
# Path from S3 client to open data @ S3
S3_targ_splt = S3_targ.split("/")
buck_path = S3_targ_splt[2]
key_path = '/'.join(S3_targ_splt[3:])
# Create a DataFrame from S3 data
s3 = ial.get_s3_resource()
datos = s3.meta.client.get_object(Bucket = buck_path, Key = key_path)
body = datos['Body'].read()
model_cfi = pickle.loads(body)
A = str(type(model_cfi))
comp_val = ""
if pd.read_csv('src/test/trans_file.csv', header = None).iloc[0,1] == 1:
comp_val = "<class 'sklearn.tree._classes.DecisionTreeClassifier'>"
print("\n\n", A, "\n\n",comp_val)
self.assertEqual(A, comp_val, note = "^^^^^^^^ El mejor modelo seleccionado no coincide con <class 'sklearn.tree._classes.DecisionTreeClassifier'>. ^^^^^^^^\n")
self.status = "TestPassed:)"
self.test_meth = "test_score"
```
#### File: src/utils/bias_fairness.py
```python
import pickle
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from aequitas.group import Group
from aequitas.bias import Bias
from aequitas.fairness import Fairness
from aequitas.plotting import Plot
def bias_fair(modelo,df_train_test,df_fe):
# Separamos los sets de entrenamiento y prueba
# Entrenamiento
X_train = df_train_test[df_train_test.Set == 'entrenamiento']
y_train = X_train.etiqueta
X_train = X_train.iloc[:,0:df_train_test.shape[1]-2]
# Prueba
X_test = df_train_test[df_train_test.Set == 'prueba']
y_test = X_test.etiqueta
X_test = X_test.iloc[:,0:df_train_test.shape[1]-2]
predicted_scores = modelo.predict_proba(X_test)
# Se conforma el DataFrame que necesita Aequitas para el sesgo e inequidad de la variable facility_type (class)
y_test2 = y_test.reset_index(drop=True)
df_dummy = pd.DataFrame()
df_dummy['scores'] = pd.Series(predicted_scores[:,1])
df_dummy['predic'] = np.where(df_dummy['scores'] < 0.7,0,1)
df_aeq = pd.DataFrame()
df_aeq['real'] = y_test2
df_aeq['prediccion'] = df_dummy.predic
df_aeq['faciliy_type'] = df_fe['class'].tail(len(df_dummy.predic)).reset_index(drop=True)
# Asignamos nuevos índices y los nombres de las columnas para que los reconozca la función
df_aeq = df_aeq.reset_index(drop=True)
df_aeq.columns = ['label_value','score','class']
# Se obtienen las métricas
g = Group()
xtab, attrbs = g.get_crosstabs(df_aeq)
absolute_metrics = g.list_absolute_metrics(xtab)
metrics1 = xtab[['attribute_name', 'attribute_value']+[col for col in xtab.columns if col in absolute_metrics]].round(2)
# Se conforma el DataFrame que necesita Aequitas para el sesgo e inequidad de la variable zip (level)
df_aeq2 = pd.DataFrame()
df_aeq2['real'] = y_test2
df_aeq2['prediccion'] = df_dummy.predic
df_aeq2['zip'] = df_fe['level'].tail(len(df_dummy.predic)).reset_index(drop=True)
# Asignamos nuevos índices y los nombres de las columnas para que los reconozca la función
df_aeq2 = df_aeq2.reset_index(drop=True)
df_aeq2.columns = ['label_value','score','level']
# Se obtienen las métricas
g2 = Group()
xtab2, attrbs2 = g2.get_crosstabs(df_aeq2)
absolute_metrics2 = g2.list_absolute_metrics(xtab2)
metrics2 = xtab2[['attribute_name', 'attribute_value']+[col for col in xtab2.columns if col in absolute_metrics2]].round(2)
df_labels = pd.DataFrame()
df_labels['scores'] = pd.Series(predicted_scores[:,1])
df_labels['predicted'] = np.where(df_dummy['scores'] < 0.7,0,1)
df_labels['label'] = y_test2
metrics = pd.concat([metrics1,metrics2]).reset_index(drop = True)
metrics = metrics.fillna(0)
# Metadata
n_groups = len(metrics1.attribute_value) + len(metrics2.attribute_value)
n_attribute = metrics.attribute_name.nunique()
prop_pos_pred = df_labels.predicted.sum()/len(df_labels.predicted)
prop_pos_real = df_labels.label.sum()/len(df_labels.label)
return df_labels, metrics, n_groups, n_attribute, prop_pos_pred, prop_pos_real
```
#### File: src/utils/predict.py
```python
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree import DecisionTreeClassifier
def predict(df_fe, model):
var = df_fe[['aka_name', 'license']]
df_fe.drop(['ingest_date', 'aka_name', 'license'], axis=1, inplace=True)
data_input_ohe = pd.get_dummies(df_fe)
etiqueta = data_input_ohe.label_results
data_input_ohe= data_input_ohe.drop('label_results', axis = 1)
base = pd.DataFrame({'label_risk':0,'level_downtown':0,'level_high':0,'level_low-mid':0,'level_other':0,
"class_children's services facility":0,'class_daycare':0,'class_grocery store':0,
'class_other':0,'class_restaurant':0,'class_school':0}, index = [0])
b = list(base.columns)
orig = data_input_ohe.columns.tolist()
miss_col = []
cont = 0
for item in b:
if item not in orig:
miss_col.append(cont)
cont = cont + 1
else:
cont = cont + 1
for index in miss_col:
data_input_ohe.insert(index,base.columns[index],0)
predicted_scores = pd.DataFrame(model.predict_proba(data_input_ohe))
predicted_scores['predic'] = np.where(predicted_scores[1] < 0.7,0,1)
salida = var.loc[data_input_ohe.index,['aka_name','license']].reset_index()
salida['score'] = predicted_scores.iloc[:,1]
salida['prediction'] = predicted_scores.iloc[:,2]
return salida
``` |
{
"source": "jls713/jfactors",
"score": 2
} |
#### File: jfactors/flattened/process_data.py
```python
import numpy as np
import pandas as pd
import sys
import os.path
sys.path.append('/home/jls/work/data/jfactors/spherical/')
from J_D_table import posh_latex_names
## ============================================================================
def load_files(name):
''' Load in three sample files for dwarf <name> '''
name='triaxial_results/'+name
if os.path.isfile(name+'_nop') and os.path.isfile(name+'_ma') and os.path.isfile(name+'_sj'):
return np.genfromtxt(name+'_nop'),np.genfromtxt(name+'_ma'),np.genfromtxt(name+'_sj')
else:
return None,None,None
def write(l):
''' Output median and \pm 1\sigma errors for correction factors in ascii
form '''
l = l.T[4]
return r'$%0.2f^{+%0.2f}_{-%0.2f}$'%(np.median(l),np.percentile(l,84.1)-np.median(l),np.median(l)-np.percentile(l,15.9))
def write_ascii(l):
''' Output median and \pm 1\sigma errors for correction factors in latex
form '''
l = l.T[4]
return '%0.2f %0.2f %0.2f '%(np.median(l),np.percentile(l,84.1)-np.median(l),np.median(l)-np.percentile(l,15.9))
## ============================================================================
## 1. Read in data file and write headers to tables
data = pd.read_csv('../data/data.dat',sep=' ')
ff = open('corr_triax_table.dat','w')
ffa = open('corr_triax_table_ascii.dat','w')
ff.write('\\begin{tabular}{lcccc}\n')
ff.write('\\hline\n\\hline\n')
ff.write('Name & Ellipticity & $\mathcal{F}_{\mathrm{J},U}$& $\mathcal{F}_{\mathrm{J},R}$& $\mathcal{F}_{\mathrm{J},T}$\\\\ \n')
ff.write('\\hline\n')
## 2. Loop over dwarfs and compute median and \pm 1 \sigma for correction factors
for i in data.ellip.argsort():
d,e,f=load_files(data.Name[i])
ellip_string='&$%0.2f^{+%0.2f}_{-%0.2f}$&'%(data.ellip[i],data.ellip_e1[i],data.ellip_e2[i])
if(data.ellip_e1[i]!=data.ellip_e1[i]):
ellip_string='&$<%0.2f$&'%(data.ellip[i])
if(d==None):
ff.write(posh_latex_names[data['Name'][i]]+ellip_string+'NaN&NaN&NaN\\\\\n')
ffa.write(data['Name'][i]+' %0.2f %0.2f %0.2f '%(data.ellip[i],data.ellip_e1[i],data.ellip_e2[i])+'NaN '*9+'\n')
else:
ff.write(posh_latex_names[data['Name'][i]]+ellip_string
+write(d)+'&'+write(e)+'&'+write(f)+'\\\\\n')
ffa.write(data['Name'][i]+' %0.2f %0.2f %0.2f '%(data.ellip[i],data.ellip_e1[i],data.ellip_e2[i])+write_ascii(d)+write_ascii(e)+write_ascii(f)+'\n')
ff.write('\\hline\n\\end{tabular}\n')
ff.close()
ffa.close()
## ============================================================================
```
#### File: jfactors/flattened/triax_cusps.py
```python
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import quad
from matplotlib.patches import Ellipse
import flattened as fJ
from scipy.optimize import curve_fit
import seaborn as sns
## ============================================================================
def geometric_factor(q,gamma):
''' Geometric factor for infinite axisymmetric cusp '''
## \int dt (cos^2(t)+sin^2(t)/q^2)^{1/2-gamma}
return quad(lambda t: np.power(np.power(np.cos(t),2.)+np.power(np.sin(t),2.)/q/q,0.5-gamma),0.,2.*np.pi)[0]
def f1(p,q,gamma):
''' Virial ratio for infinite triaxial cusp '''
return quad(lambda phi:quad(lambda t: np.cos(phi)**2*np.sin(t)**3*(np.sin(t)**2*np.cos(phi)**2+np.sin(t)**2*np.sin(phi)**2/p/p+np.cos(t)**2./q/q)**(-gamma/2.),0.,np.pi)[0],0.,2.*np.pi)[0]/quad(lambda phi:quad(lambda t: np.sin(t)*np.cos(t)**2*(np.sin(t)**2*np.cos(phi)**2+np.sin(t)**2*np.sin(phi)**2/p/p+np.cos(t)**2./q/q)**(-gamma/2.),0.,np.pi)[0],0.,2.*np.pi)[0]
def f2(p,q,gamma):
''' Virial ratio for infinite triaxial cusp '''
return quad(lambda phi:quad(lambda t: np.sin(phi)**2*np.sin(t)**3*(np.sin(t)**2*np.cos(phi)**2+np.sin(t)**2*np.sin(phi)**2/p/p+np.cos(t)**2./q/q)**(-gamma/2.),0.,np.pi)[0],0.,2.*np.pi)[0]/quad(lambda phi:quad(lambda t: np.sin(t)*np.cos(t)**2*(np.sin(t)**2*np.cos(phi)**2+np.sin(t)**2*np.sin(phi)**2/p/p+np.cos(t)**2./q/q)**(-gamma/2.),0.,np.pi)[0],0.,2.*np.pi)[0]
def jkin(p,q,gamma,th,ph):
''' Kinematic factor for infinite triaxial cusp '''
P = p/fJ.qpot_from_q(p)
Q = q/fJ.qpot_from_q(q)
ff1 = f1(P,Q,gamma)
ff2 = f2(P,Q,gamma)
return ((1.+ff1+ff2)/(np.cos(th)**2+ff1*np.sin(th)**2*np.cos(ph)**2+ff2*np.sin(th)**2*np.sin(ph)**2)/3.)**2
## ============================================================================
def jgeo_x(p,q,gamma):
return p/q/q*geometric_factor(q/p,gamma)
def jgeo_y(p,q,gamma):
return 1./p/q/q*geometric_factor(q,gamma)
def jgeo_z(p,q,gamma):
return 1./p/p/q*geometric_factor(p,gamma)
def jkin_x(p,q,gamma):
return jkin(p,q,gamma,.5*np.pi,0.)
def jkin_y(p,q,gamma):
return jkin(p,q,gamma,.5*np.pi,.5*np.pi)
def jkin_z(p,q,gamma):
return jkin(p,q,gamma,0.,0.)
def jtot_x(p,q,gammaDM,gammaST):
return jgeo_x(p,q,gammaDM)*jkin_x(p,q,gammaST)
def jtot_y(p,q,gammaDM,gammaST):
return jgeo_y(p,q,gammaDM)*jkin_y(p,q,gammaST)
def jtot_z(p,q,gammaDM,gammaST):
return jgeo_z(p,q,gammaDM)*jkin_z(p,q,gammaST)
if __name__ == '__main__':
q = 0.7
p = 0.8
gg = np.linspace(0.,5.,10)
ggst = 3.
qq = np.linspace(0.1,p,10)
# plt.plot(gg,map(lambda g:jgeo_x(p,q,g),gg))
# plt.plot(gg,map(lambda g:jgeo_y(p,q,g),gg))
# plt.plot(gg,map(lambda g:jgeo_z(p,q,g),gg))
plt.plot(qq,map(lambda g:jgeo_x(p,g,1.),qq))
plt.plot(qq,map(lambda g:jgeo_y(p,g,1.),qq))
plt.plot(qq,map(lambda g:jgeo_z(p,g,1.),qq))
# plt.plot(gg,map(lambda g:jkin_x(p,g,ggst),qq))
# plt.plot(gg,map(lambda g:jkin_y(p,g,ggst),qq))
# plt.plot(gg,map(lambda g:jkin_z(p,g,ggst),qq))
# plt.plot(gg,map(lambda g:jtot_x(p,q,g,ggst),gg))
# plt.plot(gg,map(lambda g:jtot_y(p,q,g,ggst),gg))
# plt.plot(gg,map(lambda g:jtot_z(p,q,g,ggst),gg))
plt.savefig('tmp.pdf',bbox_inches='tight')
```
#### File: jfactors/flattened/unit_sphere.py
```python
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
import sys
sys.path.append('../../../code/jfactors/')
sys.path.append('../../../code/m2m/')
import jfactors_py as cJ
import seaborn as sns
from matplotlib import _cntr as cntr
from collections import namedtuple
## ============================================================================
## Ret 2 properties
## Need to repeat this here rather than importing from ret_2 because there
## is some problem with the 3D plotting otherwise...
DwarfProperties = namedtuple("DwarfProperties","r_maj_exp r_maj Distance Velocity_dispersion e")
RetII = DwarfProperties(3.37,3.37*1.67,30.,3.22,0.6) ## in arcmin, kpc, km/s
r_maj_exp = RetII.r_maj_exp ## arcmin
r_maj = RetII.r_maj #arcmin
D = RetII.Distance #kpc
slos = RetII.Velocity_dispersion #km/s
rh= (r_maj/60./180.*np.pi)*D ## in units kpc
## ============================================================================
def unit_sphere(outfile,gf=True,ell=False):
''' Compute stuff on a unit sphere '''
''' If ell = True, computes observed ellipticity '''
''' If ell = False, computes correction factors '''
''' Here we have specialized to the triaxial case '''
## =======================================================================
## 1. Create maps of the correction factor and ellipticity over sphere
## Using T = 0.55
ba=0.733
ca=0.4
ang = 0.5
(n, m) = (160,160)
fac=1.
# Meshing a unit sphere according to n, m
phi = np.linspace(0, 2 * np.pi, num=n, endpoint=False)
theta = np.linspace(np.pi * 1./(m+1), np.pi*(1-1./(m+1)), num=m, endpoint=False)
phi, theta = np.meshgrid(phi, theta)
phi, theta = phi.ravel(), theta.ravel()
phi = np.append(phi, [0.]) # Adding the north pole...
theta = np.append(theta, [0.])
phi = np.append(phi, [0.]) # Adding the south pole...
theta = np.append(theta, [np.pi])
mesh_x, mesh_y = (theta*np.cos(phi), theta*np.sin(phi))
triangles = mtri.Triangulation(mesh_x, mesh_y).triangles
x, y, z = fac*np.sin(theta)*np.cos(phi), fac*np.sin(theta)*np.sin(phi), fac*np.cos(theta)
# Defining a custom color scalar field
sphM = cJ.PaperModel(1.,0.999,rh,slos,True)
sph = sphM.J_factor(0.,0.,D,ang,False,False)[0]
pM = cJ.PaperModel(ba,ca,rh,slos,True)
## We have the option of colouring the sphere according to J-factor or
## ellipticity -- if ell = True colour by ellipticity
def fnn(M,Th,Ph):
if(ell):
return M.ellipticity(Th,Ph)
else:
sphl = sph
if(gf):
sm = cJ.PaperModel(1.,1.,rh*np.sqrt(M.ellipticity(Th,Ph)),slos,True)
sphl = sm.J_factor(Th,Ph,D,ang,False,False)[0]
print Th,Ph,sphl
return M.J_factor(Th,Ph,D,ang,False,False)[0]/sphl
## Create a mesh that only covers an octant of the sphere -- as triaxial
phi2 = np.linspace(0, np.pi/2., num=n/4+1, endpoint=True)
theta2 = np.linspace(np.pi * 1./(m+1), np.pi/2., num=m/2+1, endpoint=True)
phi2, theta2 = np.meshgrid(phi2, theta2)
phi2, theta2 = phi2.ravel(), theta2.ravel()
phi2 = np.append(phi2, [0.]) # Adding the north pole...
theta2 = np.append(theta2, [0.])
vals = np.array(map(lambda t,p:fnn(pM,t,p),theta2,phi2))
## Now stack the results to cover the whole sphere
allvals= np.reshape(vals[:-1],(m/2+1,n/4+1))
allvals = np.hstack((allvals,allvals[:,::-1][:,1:]))
allvals = np.hstack((allvals,allvals[:,1:-1]))
allvals = np.vstack((allvals,allvals[::-1,:][1:-1,:]))
allvals = np.append(np.append(allvals,vals[-1]),vals[-1])
allvals = allvals.ravel()
## The colour is the average over the values on the triangle
colors = np.mean(np.log10(allvals[triangles]), axis=1)
if(ell):
colors = np.mean(allvals[triangles], axis=1)
## Create a map of the ellipticity on the sphere
ellip = np.array(map(lambda t,p:pM.ellipticity(t,p),theta2,phi2))
ellip_all= np.reshape(ellip[:-1],(m/2+1,n/4+1))
ellip_all = np.hstack((ellip_all,ellip_all[:,::-1][:,1:]))
ellip_all = np.hstack((ellip_all,ellip_all[:,1:-1]))
ellip_all = np.vstack((ellip_all,ellip_all[::-1,:][1:-1,:]))
ellip_all = np.append(np.append(ellip_all,ellip[-1]),ellip[-1])
ellip_all = ellip_all.ravel()
## =======================================================================
## 2. Plot the colour map
fig = plt.figure(figsize=[3.5,4.])
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
cmap = sns.cubehelix_palette(8,start=.5,rot=-.75,as_cmap=True)
triang = mtri.Triangulation(x, y, triangles)
collec = ax.plot_trisurf(triang, z, cmap=cmap, shade=False, linewidth=0.,zorder=0)
collec.set_array(colors)
collec.autoscale()
lbl = r'$\mathcal{F}_\mathrm{J}$'
if(ell):
lbl = r'$1-e$'
plt.colorbar(collec,orientation='horizontal',label=lbl)
# ax = plt.gca()
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist=7.
max_range = np.array([x.max()-x.min(), y.max()-y.min(), z.max()-z.min()]).max() / 2.0
mean_x = x.mean()
mean_y = y.mean()
mean_z = z.mean()
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
## =======================================================================
## 3. Add axes
#draw a vector
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
a = Arrow3D([-.8,-.8],[-.85,-.85],[.85,1.2], mutation_scale=40, lw=1, arrowstyle="-|>", color="k")
ax.add_artist(a)
ax.text(-.83,-.95,1.125, r"$z$")
a = Arrow3D([-.8,-.45],[-.85,-.85],[.85,.85], mutation_scale=40, lw=1, arrowstyle="-|>", color="k")
ax.text(-.62,-.9,0.77, r"$x$")
ax.add_artist(a)
a = Arrow3D([-.8,-.8],[-.85,-.5],[.85,.85], mutation_scale=40, lw=1, arrowstyle="-|>", color="k")
ax.text(-.83,-.62,0.9, r"$y$")
ax.add_artist(a)
## =======================================================================
## 4. Make a small ellipsoidal inset that shows isodensity
phis = np.linspace(0, 2 * np.pi, num=n/(n/40), endpoint=False)
thetas = np.linspace(np.pi * 1./(m+1), np.pi*(1-1./(m+1)), num=m/(m/40), endpoint=False)
phis, thetas = np.meshgrid(phis, thetas)
phis, thetas = phis.ravel(), thetas.ravel()
phis = np.append(phis, [0.]) # Adding the north pole...
thetas = np.append(thetas, [0.])
phis = np.append(phis, [0.]) # Adding the south pole...
thetas = np.append(thetas, [np.pi])
xs, ys, zs = fac*np.sin(thetas)*np.cos(phis), fac*np.sin(thetas)*np.sin(phis), fac*np.cos(thetas)
mesh_xs, mesh_ys = (thetas*np.cos(phis)-.7, thetas*np.sin(phis)*ba-.7)
triangles = mtri.Triangulation(mesh_xs, mesh_ys).triangles
fac = 0.3
triangs = mtri.Triangulation(fac*xs-.7, fac*ba*ys-.7, triangles)
collec = ax.plot_trisurf(triangs, fac*ca*zs-.9, color=sns.color_palette()[0],shade=True, linewidth=0.1,zorder=1)
ax.view_init(28,-62)
ax.dist=7.
## =======================================================================
## 5. Plot contours of constant observed ellipticity -- note we need to
## mask out those behind the sphere
ae = ax.azim,ax.elev
ae = np.deg2rad(ae)
ae[1] = np.pi/2.-ae[1]
ae[0] = ae[0]-np.pi
def dotpp(Th,Ph):
r = np.array([np.sin(ae[1])*np.cos(ae[0]),np.sin(ae[1])*np.sin(ae[0]),np.cos(ae[1])])
vv = np.array([np.sin(Th)*np.cos(Ph),np.sin(Th)*np.sin(Ph),np.cos(Th)])
return np.dot(r,vv)
phi = np.reshape(phi[:-2],(n,m))
ellip_all = np.reshape(ellip_all[:-2],(n,m))
theta = np.reshape(theta[:-2],(n,m))
phi = phi[:,3*n/40:25*n/40]
theta = theta[:,3*n/40:25*n/40]
ellip_all = ellip_all[:,3*n/40:25*n/40]
## add contours
C=cntr.Cntr(phi,theta,ellip_all)
contour_list = [0.5,0.6,0.7,0.8,0.9]
def plot_contour(th,ph):
doo = np.array(map(lambda t,p:dotpp(t,p),th,ph))
th = th[doo>0.]
ph = ph[doo>0.]
fac = 1.
x,y,z = fac*np.sin(th)*np.cos(ph-np.pi), fac*np.sin(th)*np.sin(ph-np.pi), fac*np.cos(th)
ax.plot(x,y,z,color='k',zorder=1,lw=0.5)
for cc in contour_list:
res = C.trace(cc)
nseg = len(res) // 2
segs, codes = res[:nseg], res[nseg:]
ss = segs[0]
ss = res[0]
for j in ss:
th,ph=ss.T[1],ss.T[0]
plot_contour(th,ph)
plot_contour(np.pi-th,ph)
plot_contour(th,np.pi-ph)
plot_contour(np.pi-th,np.pi-ph)
# plot_contour(th,ph+np.pi)
plot_contour(np.pi-th,ph+np.pi)
plot_contour(th,2.*np.pi-ph)
plot_contour(np.pi-th,2.*np.pi-ph)
th,ph = np.pi/2.-0.03,-np.pi/2.-0.3
ax.text3D(np.sin(th)*np.cos(ph),np.sin(th)*np.cos(ph),np.cos(th),r'$e=0.5$',zdir=np.array([-np.sin(ph),np.cos(ph),0.1]),fontsize=6,color='k')
th,ph = np.pi/2.-0.4,-np.pi/2.-0.3
ax.text3D(np.sin(th)*np.cos(ph),np.sin(th)*np.cos(ph),np.cos(th),r'$e=0.4$',zdir=np.array([-np.sin(ph),np.cos(ph),0.]),fontsize=6,color='k')
th,ph = np.pi/2.-0.68,-np.pi/2.-0.1
ax.text3D(np.sin(th)*np.cos(ph),np.sin(th)*np.cos(ph),np.cos(th),r'$e=0.3$',zdir=np.array([-np.sin(ph),np.cos(ph),-0.65]),fontsize=6,color='k')
th,ph = np.pi/2.-0.6,-np.pi/2.+0.22
ax.text3D(np.sin(th)*np.cos(ph),np.sin(th)*np.cos(ph),np.cos(th),r'$e=0.2$',zdir=np.array([-np.sin(ph),np.cos(ph),-1.85]),fontsize=6,color='k')
th,ph = np.pi/2.-0.5,-np.pi/2.+0.36
ax.text3D(np.sin(th)*np.cos(ph),np.sin(th)*np.cos(ph),np.cos(th),r'$e=0.1$',zdir=np.array([-np.sin(ph),np.cos(ph),-1.8]),fontsize=6,color='k')
plt.savefig(outfile,bbox_inches='tight',dpi=1000)
## ===========================================================================
if __name__ == '__main__':
unit_sphere('triax_figure_gf.png',gf=True,ell=False)
unit_sphere('triax_figure_ell.png',ell=True)
## ===========================================================================
```
#### File: jfactors/spherical/J_D_table.py
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy.special import gamma as Gamma
from spherical_Jfactors import *
### A set of strings to convert pandas table names into nicer display names
posh_names= {'BootesI':u'Boötes I',
'Carina':'Carina',
'Coma':'Coma Berenices',
'CVnI':'Canes Venatici I',
'CVnII':'Canes Venatici II',
'Draco':'Draco',
'Fornax':'Fornax',
'Hercules':'Hercules',
'LeoI':'Leo I',
'LeoII':'Leo II',
'LeoIV':'Leo IV',
'LeoV':'Leo V',
'LeoT':'Leo T',
'Sculptor':'Sculptor',
'Segue1':'Segue 1',
'Segue2':'Segue 2',
'Sextans':'Sextans',
'UrsaMajorI':'Ursa Major I',
'UrsaMajorII':'Ursa Major II',
'UrsaMinor':'Ursa Minor',
'ReticulumII': 'Reticulum II',
'TucanaII':'Tucana II',
'HydraII':'Hydra II',
'HorologiumI':'Horologium I',
'PiscesII':'Pisces II',
'GruI':'Grus I',
'Willman1':'Willman 1'}
posh_latex_names= {'BootesI':u'Bo\\"otes I',
'Carina':'Carina',
'Coma':'Coma Berenices',
'CVnI':'Canes Venatici I',
'CVnII':'Canes Venatici II',
'Draco':'Draco',
'Fornax':'Fornax',
'Hercules':'Hercules',
'LeoI':'Leo I',
'LeoII':'Leo II',
'LeoIV':'Leo IV',
'LeoV':'Leo V',
'LeoT':'Leo T',
'Sculptor':'Sculptor',
'Segue1':'Segue 1',
'Segue2':'Segue 2',
'Sextans':'Sextans',
'UrsaMajorI':'Ursa Major I',
'UrsaMajorII':'Ursa Major II',
'UrsaMinor':'Ursa Minor',
'ReticulumII': 'Reticulum II',
'TucanaII':'Tucana II',
'HydraII':'Hydra II',
'HorologiumI':'Horologium I',
'PiscesII':'Pisces II',
'GruI':'Grus I',
'Willman1':'<NAME>'}
bonnivard_names = {'BootesI':'boo1',
'Carina':'car',
'Coma':'coma',
'CVnI':'cvn1',
'CVnII':'cvn2',
'Draco':'dra',
'Fornax':'for',
'Hercules':'her',
'LeoI':'leo1',
'LeoII':'leo2',
'LeoIV':'leo4',
'LeoV':'leo5',
'LeoT':'leot',
'Sculptor':'scl',
'Segue1':'seg1',
'Segue2':'seg2',
'Sextans':'sex',
'UrsaMajorI':'uma1',
'UrsaMajorII':'uma2',
'UrsaMinor':'umi',
'Willman1':'wil1'}
def read_bonnivard_table(Name):
''' Reads annihilation data from Bonnivard (2015) '''
GEV2cm5toMsol2kpc5 = 2.2482330e-07
if Name in bonnivard_names:
data = np.genfromtxt('../data/bonnivard/'+bonnivard_names[Name]+'_Jalphaint_cls.output',
skip_header=5)
data = np.delete(data,[2,5],1)
df = pd.DataFrame(data,columns=['alpha','J','eJm68','eJp68','eJm95','eJp95'])
df['J']=np.log10(df['J']/GEV2cm5toMsol2kpc5)
df['eJm68']=np.log10(df['eJm68']/GEV2cm5toMsol2kpc5)
df['eJp68']=np.log10(df['eJp68']/GEV2cm5toMsol2kpc5)
df['eJm95']=np.log10(df['eJm95']/GEV2cm5toMsol2kpc5)
df['eJp95']=np.log10(df['eJp95']/GEV2cm5toMsol2kpc5)
return df
else:
return pd.DataFrame()
def read_bonnivard_table_decay(Name):
''' Reads decay data from Bonnivard (2015)'''
GEVcm2toMsolkpc2 = 8.5358230e-15
if Name in bonnivard_names:
data = np.genfromtxt('../data/bonnivard/'+bonnivard_names[Name]+'_Dalphaint_cls.output',
skip_header=5)
data = np.delete(data,[2,5],1)
df = pd.DataFrame(data,columns=['alpha','D','eDm68','eDp68','eDm95','eDp95'])
df['D']=np.log10(df['D']/GEVcm2toMsolkpc2 )
df['eDm68']=np.log10(df['eDm68']/GEVcm2toMsolkpc2)
df['eDp68']=np.log10(df['eDp68']/GEVcm2toMsolkpc2)
df['eDm95']=np.log10(df['eDm95']/GEVcm2toMsolkpc2)
df['eDp95']=np.log10(df['eDp95']/GEVcm2toMsolkpc2)
return df
else:
return pd.DataFrame()
def read_ackermann_data():
''' Reads data from the Ackermann Fermi-LAT paper '''
names = np.genfromtxt('../data/ackermann/ackermann_dwarfs.dat',skip_header=2,usecols=0,dtype=str)
data = np.genfromtxt('../data/ackermann/ackermann_dwarfs.dat',skip_header=2)[:,4:6]
df = pd.DataFrame(data,columns=['J','eJ'])
df['name']=names
return df
def make_table(data,geo_factor=True):
''' Outputs two tables of J- and D-factors for the dwarfs using the NFW
formula with rs = 5 R_half.
geo_factor multiplies the half-light radii by a factor sqrt(1-e) to
ellipticity correct them for use in the spherical formulae '''
geof = np.ones(len(data))
if(geo_factor):
geof = np.sqrt(1.-data['ellip'])
rnfwrs = 5.
N=100000
WEJ2 = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half_05',N=N,
nfw=rnfwrs*data['R_half']*geof/1000.,
geo_factor=geo_factor,
walker_or_wolf="walker")
WED2 = wyns_formulaD_error_sample(data,gamma=1.,angle='Half_05',N=N,
nfw=rnfwrs*data['R_half']*geof/1000.,
geo_factor=geo_factor,
walker_or_wolf="walker")
WEJ3 = wyns_formulaJ_error_sample(data,gamma=1.,angle='Max',N=N,
nfw=rnfwrs*data['R_half']*geof/1000.,
geo_factor=geo_factor,
walker_or_wolf="walker")
WED3 = wyns_formulaD_error_sample(data,gamma=1.,angle='Max',N=N,
nfw=rnfwrs*data['R_half']*geof/1000.,
geo_factor=geo_factor,
walker_or_wolf="walker")
outfile=open('dwarfs_Jfactors.dat','w')
outfile.write('\\begin{tabular}{llccccc}\n')
outfile.write('\\hline\n\\hline\n')
outfile.write('Name & Distance & $\\theta_\mathrm{max}$ & $\log_{10} J(\\theta_\mathrm{max})$ & $\log_{10} J(0.5^\circ)$ & $\log_{10} D(\\theta_\mathrm{max})$ & $\log_{10} D(0.5^\circ)$\\\\ \n')
outfile.write('&[$\mathrm{kpc}$]& [$^\circ$] & [$\mathrm{GeV^2\,cm}^{-5}$] & [$\mathrm{GeV^2\,cm}^{-5}$] & [$\mathrm{GeV\,cm}^{-2}$] & [$\mathrm{GeV\,cm}^{-2}$]\\\\\n')
outfile.write('\\hline\n')
for i in range(len(WEJ2)):
string= posh_latex_names[data['Name'][i]]+\
"&$%0.0f\pm%0.0f$"%(data['D'][i],data['eD'][i])+" & $"+\
str(data['theta_max'][i])+"$&"+\
"$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WEJ3[i][0],WEJ3[i][1],WEJ3[i][2])
if(i>22):
string+="-&"
else:
string+="$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WEJ2[i][0],WEJ2[i][1],WEJ2[i][2])
string+="$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WED3[i][0],WED3[i][1],WED3[i][2])
if(i>22):
string+="-"+"\\\\\n"
else:
string+="$%0.2f_{-%0.2f}^{+%0.2f}$"%(WED2[i][0],WEJ3[i][1],WEJ3[i][2])+"\\\\\n"
if(i==8 or i==23):
outfile.write('\\hline\n')
outfile.write(string)
outfile.write('\\hline\n')
outfile.write('\end{tabular}\n')
outfile.close()
outfile=open('dwarfs_Jfactors_ascii.dat','w')
outfile.write('#Name D eD thetamax Jmax eJmax1 eJmax2 J05 eJ051 eJ052 Dmax eDmax1 eDmax2 D05 eD051 eD052\n')
for i in range(len(WEJ2)):
string= data['Name'][i]+\
" %0.0f %0.0f "%(data['D'][i],data['eD'][i])+\
str(data['theta_max'][i])+" "+\
"%0.2f %0.2f %0.2f "%(WEJ3[i][0],WEJ3[i][1],WEJ3[i][2])
if(i>22):
string+="%0.2f %0.2f %0.2f "%(WEJ3[i][0],WEJ3[i][1],WEJ3[i][2])
else:
string+="%0.2f %0.2f %0.2f "%(WEJ2[i][0],WEJ2[i][1],WEJ2[i][2])
string+="%0.2f %0.2f %0.2f "%(WED3[i][0],WED3[i][1],WED3[i][2])
if(i>22):
string+="%0.2f %0.2f %0.2f\n"%(WED3[i][0],WED3[i][1],WED3[i][2])
else:
string+="%0.2f %0.2f %0.2f\n"%(WED2[i][0],WEJ3[i][1],WEJ3[i][2])
outfile.write(string)
outfile.close()
def add_thetas(ax,xrang,thetalist):
''' Add theta values to a plot '''
ylim=ax.get_ylim()
ax.set_ylim(ylim[0]-0.5,ylim[1])
for x,t in zip(xrang,thetalist):
ax.annotate(str(t)+r'$^\circ$',xy=(x,ylim[0]),horizontalalignment='center',verticalalignment='bottom',rotation=90)
def summary_data_plot():
''' Makes plots of data -- half-light radii, velocity dispersions and distances, along with J-factor estimates from the various methods '''
gs_gammas=np.genfromtxt('geringer_sameth_gamma.dat',skip_header=49)
cd=data[data.Class=='CD']
uf=data[data.Class=='UF']
labelrange=np.linspace(0.,len(data),len(data))
labelscd=labelrange[:len(cd)]
labelsuf=labelrange[len(cd):]
f,a=plt.subplots(2,4,figsize=(16,8))
plt.subplots_adjust(hspace=0.5)
for ai in a:
for aj in ai:
aj.set_xticks(labelrange)
aj.set_xticklabels(data.Name.values,rotation=90)
aj.set_xlim(labelrange[0]-1,labelrange[-1]+1)
for i in a[1]:
ls=i.axvline(labelscd[-1]+.5,c='k',ls='dashed')
ls.set_dashes((2,1))
ls=i.axvline(labelsuf[13]+.5,c='k',ls='dashed')
ls.set_dashes((2,1))
a[0][0].errorbar(labelscd,cd.D,yerr=cd.eD,fmt='.')
a[0][0].errorbar(labelsuf,uf.D.values,yerr=uf.eD.values,fmt='.')
a[0][0].set_ylabel(r'Distance/kpc')
a[0][1].errorbar(labelscd,cd.R_half,yerr=[cd.eR_half2,cd.eR_half1],fmt='.')
a[0][1].errorbar(labelsuf,uf.R_half,yerr=[uf.eR_half2,uf.eR_half1],fmt='.')
a[0][1].set_ylabel(r'$R_{\mathrm{half}}/\mathrm{pc}$')
a[0][2].errorbar(labelscd,cd.sigma_los,yerr=[cd.esigma_los2,cd.esigma_los1],fmt='.')
a[0][2].errorbar(labelsuf,uf.sigma_los,yerr=[uf.esigma_los2,uf.esigma_los1],fmt='.')
a[0][2].arrow(labelsuf[9],uf.sigma_los.values[9],0.,-0.5,fc=sns.color_palette()[1],ec=sns.color_palette()[1],head_length=0.2,head_width=0.3)
a[0][2].arrow(labelsuf[15],uf.sigma_los.values[15],0.,-0.5,fc=sns.color_palette()[1],ec=sns.color_palette()[1],head_length=0.2,head_width=0.3)
a[0][2].arrow(labelsuf[17],uf.sigma_los.values[17],0.,-0.5,fc=sns.color_palette()[1],ec=sns.color_palette()[1],head_length=0.2,head_width=0.3)
a[0][2].set_ylabel(r'$\sigma_{\mathrm{los}}/\mathrm{km\,s}^{-1}$')
a[1][0].errorbar(labelscd,cd.Jmax,yerr=[cd.eJmax2,cd.eJmax1],fmt='.',color='k')
a[1][0].errorbar(labelsuf,uf.Jmax,yerr=[uf.eJmax2,uf.eJmax1],fmt='.',color='k')
WE = wyns_formulaJ_error_sample(data,gamma=1.)
for i in range(len(data)):
a[1][0].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[2])
WE = wyns_formulaJ_error_sample(data,gamma=0.51)
for i in range(len(data)):
a[1][0].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[4])
WE = wyns_formulaJ_error_sample(data,gamma=1.,nfw=5.*data['R_half']/1000.)
for i in range(len(data)):
a[1][0].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[0])
add_thetas(a[1][0],labelrange,data.theta_max)
a[1][0].set_ylabel(r'$\log_{10}(J_\mathrm{max}/\,\mathrm{GeV^2\,cm}^{-5})$')
a[1][1].errorbar(labelscd,cd.Jmax.values-np.log10(2.),yerr=[cd.eJmax2,cd.eJmax1],fmt='.',label="",color='k')
a[1][1].errorbar(labelsuf,uf.Jmax.values-np.log10(2.),yerr=[uf.eJmax2,uf.eJmax1],fmt='.',label="",color='k')
WE = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half')
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma=1$'
a[1][1].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[2],label=label)
WE = wyns_formulaJ_error_sample(data,gamma=0.51,angle='Half')
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma=0.51$'
a[1][1].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[4],label=label)
WE = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half',nfw=5.*data['R_half']/1000.)
for i in range(len(data)):
label=None
if(i==0):
label=r'NFW'
a[1][1].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[0],label=label)
gammas = gs_gammas.T[23]
while(len(gammas)<len(data)):
gammas = np.append(gammas,0.8)
WE = wyns_formulaJ_error_sample(data,gammaarray=gammas,angle='Half')
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma_\mathrm{GS}$'
a[1][1].fill_between([labelrange[i]-0.3,labelrange[i]+0.3], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=1.,facecolor="None",label=label)
add_thetas(a[1][1],labelrange,data.theta_half)
a[1][1].legend(loc="lower center",ncol=2, bbox_to_anchor=(0.5, 1.0))
a[1][1].set_ylabel(r'$\log_{10}(J_\mathrm{half}/\,\mathrm{GeV^2\,cm}^{-5})$')
a[1][2].errorbar(labelscd,cd.dJmax.values-np.log10(2.),yerr=[cd.eJmax2,cd.edJmax1],fmt='.',color='k')
a[1][2].errorbar(labelsuf,uf.dJmax.values-np.log10(2.),yerr=[uf.edJmax2,uf.edJmax1],fmt='.',color='k')
WE = wyns_formulaD_error_sample(data,gamma=1.)
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma=1.$'
a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[2],label=label)
WE = wyns_formulaD_error_sample(data,gamma=1.49)
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma=1.49$'
a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[4],label=label)
WE = wyns_formulaD_error_sample(data,gamma=1.,nfw=5.*data['R_half']/1000.)
for i in range(len(data)):
label=None
if(i==0):
label=r'NFW'
a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[0],label=label)
WE = wyns_formulaD_error_sample(data,gammaarray=gammas,angle='Half')
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma_\mathrm{GS}$'
a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=1.,facecolor="None",label=label)
add_thetas(a[1][2],labelrange,data.dtheta_half)
a[1][2].legend(loc="lower center",ncol=2, bbox_to_anchor=(0.5, 1.0))
a[1][2].set_ylabel(r'$\log_{10}(D_\mathrm{half}/\,\mathrm{GeV\,cm}^{-2})$')
a[1][3].errorbar(labelscd,cd.Jhalf.values,yerr=[cd.eJhalf2,cd.eJhalf1],fmt='.',label="",color='k')
a[1][3].errorbar(labelsuf,uf.Jhalf.values,yerr=[uf.eJhalf2,uf.eJhalf1],fmt='.',label="",color='k')
WE = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half' )
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma=1$'
a[1][3].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[2],label=label)
WE = wyns_formulaJ_error_sample(data,gamma=0.51,angle='Half_05')
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma=0.51$'
a[1][3].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[4],label=label)
WE = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half_05',nfw=5.*data['R_half']/1000.)
for i in range(len(data)):
label=None
if(i==0):
label=r'NFW'
a[1][3].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[0],label=label)
gammas = gs_gammas.T[23]
while(len(gammas)<len(data)):
gammas = np.append(gammas,0.8)
WE = wyns_formulaJ_error_sample(data,gammaarray=gammas,angle='Half_05')
for i in range(len(data)):
label=None
if(i==0):
label=r'$\gamma_\mathrm{GS}$'
a[1][3].fill_between([labelrange[i]-0.3,labelrange[i]+0.3], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=1.,facecolor="None",label=label)
add_thetas(a[1][3],labelrange,np.ones(0.5)*len(data))
a[1][3].legend(loc="lower center",ncol=2, bbox_to_anchor=(0.5, 1.0))
a[1][3].set_ylabel(r'$\log_{10}(J(0.5^\circ)/\,\mathrm{GeV^2\,cm}^{-5})$')
plt.savefig('dwarfs_data.pdf',bbox_inches='tight')
if __name__ == '__main__':
data = pd.read_csv('../data/data.dat',sep=' ')
make_table(data)
exit()
```
#### File: jfactors/spherical/spherical_Jfactors.py
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy.special import gamma as Gamma
from scipy.integrate import quad
G = 4.300918e-6 ## in units solar mass, km/s kpc
GEV2cm5toMsol2kpc5 = 2.2482330e-07
GEVcm2toMsolkpc2 = 8.5358230e-15
def integrate_J_spherical_alphabetagamma(thetamax,D,rho0,rs,alpha,beta,gamma,rt):
''' J for spherical alpha,beta,gamma model '''
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(ll,th):
z = ll
b = np.tan(th)*D
x = np.sqrt(b*b+z*z)
return np.sin(th)*(rho(x)**2)
return np.log10(rho0*rho0*2.*np.pi*quad(lambda y: quad(lambda z: J(y,z), 0., thetamax)[0],-np.inf,np.inf)[0]/GEV2cm5toMsol2kpc5)
def integrate_J_farfield_spherical_alphabetagamma(thetamax,D,rho0,rs,alpha,beta,gamma,rt):
''' J for spherical alpha,beta,gamma model in far-field limit'''
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(ll,b):
z = ll
x = np.sqrt(b*b+z*z)
return b*(rho(x)**2)
return np.log10(rho0*rho0*2.*np.pi*quad(lambda y: quad(lambda z: J(y,z), 0., thetamax*D)[0],-np.inf,np.inf)[0]/D/D/GEV2cm5toMsol2kpc5)
def integrate_D_spherical_alphabetagamma(thetamax,D,rho0,rs,alpha,beta,gamma,rt):
''' D for spherical alpha,beta,gamma model'''
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(ll,th):
z = ll
b = np.tan(th)*D
x = np.sqrt(b*b+z*z)
return np.sin(th)*rho(x)
return np.log10(rho0*2.*np.pi*quad(lambda y: quad(lambda z: J(y,z), 0., thetamax)[0],-np.inf,np.inf)[0]/GEVcm2toMsolkpc2)
def integrate_D_farfield_spherical_alphabetagamma(thetamax,D,rho0,rs,alpha,beta,gamma,rt):
''' D for spherical alpha,beta,gamma model in far-field limit'''
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(ll,b):
z = ll
x = np.sqrt(b*b+z*z)
return b*rho(x)
return np.log10(rho0*2.*np.pi*quad(lambda y: quad(lambda z: J(y,z), 0., thetamax*D)[0],-np.inf,np.inf)[0]/D/D/GEVcm2toMsolkpc2)
def integrate_rho_spherical_alphabetagamma(R,rho0,rs,alpha,beta,gamma,rt):
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(x):
return x*x*rho(x)
return 4.*np.pi*rho0*quad(J, 0., R)[0]
def asymmetric_gaussian_samples(mean,sigma,N=1):
''' sigmas = [lower error bar, upper error bar] '''
updown=(np.random.uniform(size=N)>0.5)
sigmas=[sigma[i] for i in updown]
return mean+(2*updown-1.)*np.fabs(np.random.normal(loc=0.,scale=sigmas))
def barlow_asymmetric_gaussian_samples(mean,sigma,N=1):
## Taken from Barlow (2003)
## This produces very asymmetric looking distributions with sharp cut-offs
## on the smaller error side
## The bifurcated (or dimidated as Barlow corrects us) Gaussian
## (implemented above) is in my mind better.
alpha = .5*(sigma[1]-sigma[0])
sig = .5*(sigma[1]+sigma[0])
u = np.random.normal(loc=0.,scale=1.,size=N)
return sig*u+mean+alpha*u*u
def HernquistX(s):
"""
Computes X function from equations (33) & (34) of Hernquist (1990)
"""
if(s<0.):
raise ValueError("s must be positive in Hernquist X function")
elif(s<1.):
return np.log((1+np.sqrt(1-s*s))/s)/np.sqrt(1-s*s)
elif(s==1.):
return 1.
else:
return np.arccos(1./s)/np.sqrt(s*s-1)
def wyns_formulaJ_NFW(rho0,r_s,distance,angle):
''' Analytic integration of J factor for NFW '''
Delta2 = r_s**2-distance**2*angle**2
X = distance*angle/r_s
J = 2.*distance*angle*(7.*distance*r_s**3*angle-4.*distance**3*r_s*angle**3+3.*np.pi*Delta2**2)+6./r_s*(2*Delta2**3-2*r_s**4*Delta2-distance**4*r_s**2*angle**4)*np.array(map(lambda s:HernquistX(s),X))
J *= np.pi*rho0**2*r_s**2/(3.*distance**2*Delta2**2)
return np.log10(J/GEV2cm5toMsol2kpc5)
def wyns_formulaJ_NFW_data(sigma_los,r_half,distance,angle,r_s,walker_or_wolf="wolf"):
'''
J factor from M_half for NFW profile
sigma_los in km/s, r_half in pc, distance in kpc, angle in deg, r_s in kpc
'''
r_half=0.001*r_half
angle=np.deg2rad(angle)
delta_Omega = 2.*np.pi*(1-np.cos(angle))
if(walker_or_wolf=="wolf"):
Mhalf = 4.*sigma_los**2*r_half/G
r_half=4./3.*r_half
rho0 = Mhalf/4./np.pi/r_s**3/(np.log((r_s+r_half)/r_s)-r_half/(r_s+r_half))
return wyns_formulaJ_NFW(rho0,r_s,distance,angle)
else:
Mhalf = 2.5*sigma_los**2*r_half/G
rho0 = Mhalf/4./np.pi/r_s**3/(np.log((r_s+r_half)/r_s)-r_half/(r_s+r_half))
return wyns_formulaJ_NFW(rho0,r_s,distance,angle)
def wyns_formulaD_NFW(rho0,r_s,distance,angle):
''' Analytic integration of J factor for NFW '''
X = distance*angle/r_s
D = np.log(X/2.)+np.array(map(lambda s:HernquistX(s),X))
D *= 4.*np.pi*rho0*r_s**3/distance**2
return np.log10(D/GEVcm2toMsolkpc2)
def wyns_formulaD_NFW_data(sigma_los,r_half,distance,angle,r_s,walker_or_wolf="wolf"):
'''
D factor from M_half for NFW profile
sigma_los in km/s, r_half in pc, distance in kpc, angle in deg, r_s in kpc
'''
r_half=0.001*r_half
angle=np.deg2rad(angle)
delta_Omega = 2.*np.pi*(1-np.cos(angle))
if(walker_or_wolf=="wolf"):
Mhalf = 4.*sigma_los**2*r_half/G
r_half=4./3.*r_half
rho0 = Mhalf/4./np.pi/r_s**3/(np.log((r_s+r_half)/r_s)-r_half/(r_s+r_half))
return wyns_formulaD_NFW(rho0,r_s,distance,angle)
else:
Mhalf = 2.5*sigma_los**2*r_half/G
rho0 = Mhalf/4./np.pi/r_s**3/(np.log((r_s+r_half)/r_s)-r_half/(r_s+r_half))
return wyns_formulaD_NFW(rho0,r_s,distance,angle)
def wyns_formulaJ(sigma_los,r_half,distance,angle,gamma=1.,walker_or_wolf="wolf"):
'''
J factor from M_half for power-law profile (slope = gamma)
sigma_los in km/s, r_half in pc, distance in kpc, angle in deg, r_s in kpc
'''
r_half=0.001*r_half
angle=np.deg2rad(angle)
delta_Omega = 2.*np.pi*(1-np.cos(angle))
fac = (2.5/4.)**2
if(walker_or_wolf=="wolf"):
fac=(0.25*(27./16.)*(4./3.)**gamma)**2
if(gamma!=1. and gamma>.5 and gamma<1.5):
factor = 2.*(3.-gamma)**2*Gamma(gamma-0.5)/(np.pi**(2-gamma)*(3.-2*gamma)*Gamma(gamma))
return np.log10(factor*sigma_los**4*delta_Omega**(1.5-gamma)/G**2*distance**(1-2.*gamma)*r_half**(2*gamma-4.)/GEV2cm5toMsol2kpc5)+np.log10(fac)
else:
return np.log10(8./np.sqrt(np.pi)*sigma_los**4*np.sqrt(delta_Omega)/G**2/distance/(r_half**2)/GEV2cm5toMsol2kpc5)+np.log10(fac)
def wyns_formulaD(sigma_los,r_half,distance,angle,gamma=1.,walker_or_wolf="wolf"):
'''
D factor from M_half for power-law profile (slope = gamma)
sigma_los in km/s, r_half in pc, distance in kpc, angle in deg, r_s in kpc
'''
r_half=0.001*r_half
angle=np.deg2rad(angle)
delta_Omega = 2.*np.pi*(1-np.cos(angle))
fac = (2.5/4.)
if(walker_or_wolf=="wolf"):
fac=(0.25*(27./16.)*(4./3.)**gamma)
if(gamma>1. and gamma<3.):
factor = 2.*Gamma(gamma*0.5-0.5)/(np.pi**(1-0.5*gamma)*Gamma(gamma*0.5))
return np.log10(factor*sigma_los**2*delta_Omega**(1.5-gamma*0.5)/G*distance**(1.-gamma)*r_half**(gamma-2.)/GEVcm2toMsolkpc2)+np.log10(fac)
def sample_errorsJ(sigma_los,esigma_los,r_half,er_half,distance,edistance,angle,eangle,gamma=1.,N=1000,nfw=-1.,walker_or_wolf="wolf"):
''' Samples from sigma_los (km/s), r_half (pc), distance (kpc) and angle (deg) pdfs (gaussians) and returns median J value and pm 1 sigma '''
if(esigma_los[0]==0.):
## In this case sigma_los is the 95% upper limit. We sample from a uniform distribution from 0.1 km/s to sigma_los/0.95
s=np.random.uniform(0.1,sigma_los/0.95,N)
else:
# s=asymmetric_gaussian_samples(sigma_los,esigma_los,N)
s=np.exp(asymmetric_gaussian_samples(np.log(sigma_los),esigma_los/sigma_los,N))
# r=asymmetric_gaussian_samples(r_half,er_half,N)
r=np.exp(asymmetric_gaussian_samples(np.log(r_half),er_half/r_half,N))
a=np.exp(asymmetric_gaussian_samples(np.log(angle),eangle/angle,N))
d=np.random.normal(loc=distance,scale=edistance,size=N)
if(nfw>0.):
wf = wyns_formulaJ_NFW_data(s,r,d,a,nfw,walker_or_wolf)
else:
wf = wyns_formulaJ(s,r,d,a,gamma,walker_or_wolf)
mean=np.nanmedian(wf)
return np.array([mean,mean-np.nanpercentile(wf,15.87),np.nanpercentile(wf,84.13)-mean])
def wyns_formulaJ_error_sample(data,gamma=1.,gammaarray=None,angle='Max',nfw=[0.],N=1000,geo_factor=True,walker_or_wolf="wolf"):
''' Performs J sampling for a set of data '''
if(len(nfw)<len(data)):
nfw=-1.*np.ones(len(data))
angles=data.theta_max
angerrs=[[1e-15,1e-15] for i in range(len(data))]
if(angle=='Half'):
angles=data.theta_half
angerrs=[[data.etheta_half2[i],data.etheta_half1[i]] for i in range(len(data))]
if(angle=='Half_05'):
angles=0.5*np.ones(len(data))
angerrs=[[1e-15,1e-15] for i in range(len(data))]
geof=np.ones(len(data))
if geo_factor:
geof = np.sqrt(1.-data.ellip)
if(isinstance(gammaarray,np.ndarray)):
return np.array([
sample_errorsJ(data.sigma_los[i],
[data.esigma_los2[i],data.esigma_los1[i]],
data.R_half[i]*geof[i],
[data.eR_half2[i]*geof[i],
data.eR_half1[i]*geof[i]],
data.D[i],
data.eD[i],
angles[i],
angerrs[i],
gammaarray[i],
N=N,
nfw=nfw[i],
walker_or_wolf=walker_or_wolf) for i in range(len(data))])
return np.array([sample_errorsJ(data.sigma_los[i],
[data.esigma_los2[i],data.esigma_los1[i]],
data.R_half[i]*geof[i],
[data.eR_half2[i]*geof[i],
data.eR_half1[i]*geof[i]],
data.D[i],
data.eD[i],
angles[i],
angerrs[i],
gamma,
N=N,
nfw=nfw[i],
walker_or_wolf=walker_or_wolf) for i in range(len(data))])
def sample_errorsD(sigma_los,esigma_los,r_half,er_half,distance,edistance,angle,eangle,gamma=1.,N=1000,nfw=-1.,walker_or_wolf="wolf"):
''' Samples from sigma_los (km/s), r_half (pc), distance (kpc) and angle (deg) pdfs (gaussians) and returns median D value and pm 1 sigma '''
if(esigma_los[0]==0.):
## In this case sigma_los is the 95% upper limit. We sample from a uniform distribution from 0.1 km/s to sigma_los/0.95
s=np.random.uniform(0.1,sigma_los,N)
else:
# s=asymmetric_gaussian_samples(sigma_los,esigma_los,N)
s=np.exp(asymmetric_gaussian_samples(np.log(sigma_los),esigma_los/sigma_los,N))
# r=asymmetric_gaussian_samples(r_half,er_half,N)
r=np.exp(asymmetric_gaussian_samples(np.log(r_half),er_half/r_half,N))
a=np.exp(asymmetric_gaussian_samples(np.log(angle),eangle/angle,N))
d=np.random.normal(loc=distance,scale=edistance,size=N)
if(nfw>0.):
wf = wyns_formulaD_NFW_data(s,r,d,a,nfw,walker_or_wolf)
else:
wf = wyns_formulaD(s,r,d,a,gamma,walker_or_wolf)
mean=np.nanmedian(wf)
return np.array([mean,mean-np.nanpercentile(wf,15.87),np.nanpercentile(wf,84.13)-mean])
def wyns_formulaD_error_sample(data,gamma=1.,gammaarray=None,angle='Max',nfw=[0.],N=1000,geo_factor=True,walker_or_wolf="wolf"):
''' Performs D sampling for a set of data '''
if(len(nfw)<len(data)):
nfw=-1.*np.ones(len(data))
angles=data.theta_max
angerrs=[[1e-15,1e-15] for i in range(len(data))]
if(angle=='Half'):
angles=data.dtheta_half
angerrs=[[data.edtheta_half2[i],data.edtheta_half1[i]] for i in range(len(data))]
if(angle=='Half_05'):
angles=0.5*np.ones(len(data))
angerrs=[[1e-15,1e-15] for i in range(len(data))]
geof=np.ones(len(data))
if geo_factor:
geof = np.sqrt(1.-data.ellip)
if(isinstance(gammaarray,np.ndarray)):
return np.array([
sample_errorsD(data.sigma_los[i],
[data.esigma_los2[i],data.esigma_los1[i]],
data.R_half[i]*geof[i],
[data.eR_half2[i]*geof[i],
data.eR_half1[i]*geof[i]],
data.D[i],
data.eD[i],
angles[i],
angerrs[i],
gammaarray[i],
N=N,
nfw=nfw[i],
walker_or_wolf=walker_or_wolf) for i in range(len(data))])
return np.array([sample_errorsD(data.sigma_los[i],
[data.esigma_los2[i],data.esigma_los1[i]],
data.R_half[i]*geof[i],
[data.eR_half2[i]*geof[i],
data.eR_half1[i]*geof[i]],
data.D[i],
data.eD[i],
angles[i],
angerrs[i],
gamma,
N=N,
nfw=nfw[i],
walker_or_wolf=walker_or_wolf) for i in range(len(data))])
# def add_thetas(ax,xrang,thetalist):
# ylim=ax.get_ylim()
# ax.set_ylim(ylim[0]-0.5,ylim[1])
# for x,t in zip(xrang,thetalist):
# ax.annotate(str(t)+r'$^\circ$',xy=(x,ylim[0]),horizontalalignment='center',verticalalignment='bottom',rotation=90)
# def make_table(data):
# WEJ2 = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half_05',N=10000,nfw=5.*data['R_half']/1000.)
# WED2 = wyns_formulaD_error_sample(data,gamma=1.,angle='Half_05',N=10000,nfw=5.*data['R_half']/1000.)
# WEJ3 = wyns_formulaJ_error_sample(data,gamma=1.,angle='Max',N=10000,nfw=5.*data['R_half']/1000.)
# WED3 = wyns_formulaD_error_sample(data,gamma=1.,angle='Max',N=10000,nfw=5.*data['R_half']/1000.)
# # outfile=open('dwarfs_Jfactors.dat','w')
# # outfile.write('\\begin{tabular}{lccccccc}\n')
# # outfile.write('\\hline\n\\hline\n')
# # outfile.write('Name & $\\theta_\mathrm{max}$ & $\\theta_{0.5}$ & $\\theta_{0.5, \mathrm{decay}}$ & $\log_{10} J(\\theta_\mathrm{max})$ & $\log_{10} J(0.5^\circ)$ & $\log_{10} D(\\theta_\mathrm{max})$ & $\log_{10} D(0.5^\circ)$\\\\ \n')
# # outfile.write('& [$^\circ$] & [$^\circ$] & [$^\circ$] & [$\mathrm{GeV^2\,cm}^{-5}$] & [$\mathrm{GeV^2\,cm}^{-5}$] & [$\mathrm{GeV\,cm}^{-2}$] & [$\mathrm{GeV\,cm}^{-2}$]\\\\\n')
# # outfile.write('\\hline\n')
# # for i in range(len(WEJ)):
# # string= str(data['Name'][i])+" & $"+\
# # str(data['theta_max'][i])+"$&$"+\
# # str(data['theta_half'][i])+"$&$"+\
# # str(data['dtheta_half'][i])+"$&"+\
# # "$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WEJ3[i][0],WEJ3[i][1],WEJ3[i][2])
# # if(i>21):
# # string+="-&"
# # else:
# # string+="$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WEJ2[i][0],WEJ2[i][1],WEJ2[i][2])
# # string+="$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WED3[i][0],WED3[i][1],WED3[i][2])
# # if(i>21):
# # string+="-&"
# # else:
# # string+="$%0.2f_{-%0.2f}^{+%0.2f}$"%(WED2[i][0],WEJ3[i][1],WEJ3[i][2])+"\\\\\n"
# # if(i==7 or i==22):
# # outfile.write('\\hline\n')
# # outfile.write(string)
# # outfile.write('\\hline\n')
# # outfile.write('\end{tabular}\n')
# # outfile.close()
# outfile=open('dwarfs_Jfactors.dat','w')
# outfile.write('\\begin{tabular}{lccccc}\n')
# outfile.write('\\hline\n\\hline\n')
# outfile.write('Name & $\\theta_\mathrm{max}$ & $\log_{10} J(\\theta_\mathrm{max})$ & $\log_{10} J(0.5^\circ)$ & $\log_{10} D(\\theta_\mathrm{max})$ & $\log_{10} D(0.5^\circ)$\\\\ \n')
# outfile.write('& [$^\circ$] & [$\mathrm{GeV^2\,cm}^{-5}$] & [$\mathrm{GeV^2\,cm}^{-5}$] & [$\mathrm{GeV\,cm}^{-2}$] & [$\mathrm{GeV\,cm}^{-2}$]\\\\\n')
# outfile.write('\\hline\n')
# for i in range(len(WEJ2)):
# string= str(data['Name'][i])+" & $"+\
# str(data['theta_max'][i])+"$&"+\
# "$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WEJ3[i][0],WEJ3[i][1],WEJ3[i][2])
# if(i>21):
# string+="-&"
# else:
# string+="$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WEJ2[i][0],WEJ2[i][1],WEJ2[i][2])
# string+="$%0.2f_{-%0.2f}^{+%0.2f}$&"%(WED3[i][0],WED3[i][1],WED3[i][2])
# if(i>21):
# string+="-"+"\\\\\n"
# else:
# string+="$%0.2f_{-%0.2f}^{+%0.2f}$"%(WED2[i][0],WEJ3[i][1],WEJ3[i][2])+"\\\\\n"
# if(i==7 or i==22):
# outfile.write('\\hline\n')
# outfile.write(string)
# outfile.write('\\hline\n')
# outfile.write('\end{tabular}\n')
# outfile.close()
# if __name__ == '__main__':
# data = pd.read_csv('data.dat',sep=' ')
# make_table(data)
# gs_gammas=np.genfromtxt('geringer_sameth_gamma.dat',skip_header=49)
# # for i in range(len(gs_gammas)):
# # if(gs_gammas[i][23]<0.5):
# # gs_gammas[i][23]=0.50005
# cd=data[data.Class=='CD']
# uf=data[data.Class=='UF']
# labelrange=np.linspace(0.,len(data),len(data))
# labelscd=labelrange[:len(cd)]
# labelsuf=labelrange[len(cd):]
# f,a=plt.subplots(2,4,figsize=(16,8))
# plt.subplots_adjust(hspace=0.5)
# for ai in a:
# for aj in ai:
# aj.set_xticks(labelrange)
# aj.set_xticklabels(data.Name.values,rotation=90)
# aj.set_xlim(labelrange[0]-1,labelrange[-1]+1)
# for i in a[1]:
# ls=i.axvline(labelscd[-1]+.5,c='k',ls='dashed')
# ls.set_dashes((2,1))
# ls=i.axvline(labelsuf[13]+.5,c='k',ls='dashed')
# ls.set_dashes((2,1))
# a[0][0].errorbar(labelscd,cd.D,yerr=cd.eD,fmt='.')
# a[0][0].errorbar(labelsuf,uf.D.values,yerr=uf.eD.values,fmt='.')
# a[0][0].set_ylabel(r'Distance/kpc')
# a[0][1].errorbar(labelscd,cd.R_half,yerr=[cd.eR_half2,cd.eR_half1],fmt='.')
# a[0][1].errorbar(labelsuf,uf.R_half,yerr=[uf.eR_half2,uf.eR_half1],fmt='.')
# a[0][1].set_ylabel(r'$R_{\mathrm{half}}/\mathrm{pc}$')
# a[0][2].errorbar(labelscd,cd.sigma_los,yerr=[cd.esigma_los2,cd.esigma_los1],fmt='.')
# a[0][2].errorbar(labelsuf,uf.sigma_los,yerr=[uf.esigma_los2,uf.esigma_los1],fmt='.')
# a[0][2].arrow(labelsuf[9],uf.sigma_los.values[9],0.,-0.5,fc=sns.color_palette()[1],ec=sns.color_palette()[1],head_length=0.2,head_width=0.3)
# a[0][2].arrow(labelsuf[15],uf.sigma_los.values[15],0.,-0.5,fc=sns.color_palette()[1],ec=sns.color_palette()[1],head_length=0.2,head_width=0.3)
# a[0][2].arrow(labelsuf[17],uf.sigma_los.values[17],0.,-0.5,fc=sns.color_palette()[1],ec=sns.color_palette()[1],head_length=0.2,head_width=0.3)
# a[0][2].set_ylabel(r'$\sigma_{\mathrm{los}}/\mathrm{km\,s}^{-1}$')
# a[1][0].errorbar(labelscd,cd.Jmax,yerr=[cd.eJmax2,cd.eJmax1],fmt='.',color='k')
# a[1][0].errorbar(labelsuf,uf.Jmax,yerr=[uf.eJmax2,uf.eJmax1],fmt='.',color='k')
# WE = wyns_formulaJ_error_sample(data,gamma=1.)
# for i in range(len(data)):
# a[1][0].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[2])
# # WE = wyns_formulaJ_error_sample(data,gamma=0.75)
# # for i in range(len(data)):
# # a[1][0].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[3])
# WE = wyns_formulaJ_error_sample(data,gamma=0.51)
# for i in range(len(data)):
# a[1][0].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[4])
# WE = wyns_formulaJ_error_sample(data,gamma=1.,nfw=5.*data['R_half']/1000.)
# for i in range(len(data)):
# a[1][0].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[0])
# add_thetas(a[1][0],labelrange,data.theta_max)
# a[1][0].set_ylabel(r'$\log_{10}(J_\mathrm{max}/\,\mathrm{GeV^2\,cm}^{-5})$')
# a[1][1].errorbar(labelscd,cd.Jmax.values-np.log10(2.),yerr=[cd.eJmax2,cd.eJmax1],fmt='.',label="",color='k')
# a[1][1].errorbar(labelsuf,uf.Jmax.values-np.log10(2.),yerr=[uf.eJmax2,uf.eJmax1],fmt='.',label="",color='k')
# WE = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half')
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma=1$'
# a[1][1].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[2],label=label)
# # WE = wyns_formulaJ_error_sample(data,gamma=0.75,angle='Half')
# # for i in range(len(data)):
# # label=None
# # if(i==0):
# # label=r'$\gamma=0.75$'
# # a[1][1].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[3],label=label)
# WE = wyns_formulaJ_error_sample(data,gamma=0.51,angle='Half')
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma=0.51$'
# a[1][1].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[4],label=label)
# WE = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half',nfw=5.*data['R_half']/1000.)
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'NFW'
# a[1][1].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[0],label=label)
# gammas = gs_gammas.T[23]
# while(len(gammas)<len(data)):
# gammas = np.append(gammas,0.8)
# WE = wyns_formulaJ_error_sample(data,gammaarray=gammas,angle='Half')
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma_\mathrm{GS}$'
# a[1][1].fill_between([labelrange[i]-0.3,labelrange[i]+0.3], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=1.,facecolor="None",label=label)
# add_thetas(a[1][1],labelrange,data.theta_half)
# a[1][1].legend(loc="lower center",ncol=2, bbox_to_anchor=(0.5, 1.0))
# a[1][1].set_ylabel(r'$\log_{10}(J_\mathrm{half}/\,\mathrm{GeV^2\,cm}^{-5})$')
# a[1][2].errorbar(labelscd,cd.dJmax.values-np.log10(2.),yerr=[cd.eJmax2,cd.edJmax1],fmt='.',color='k')
# a[1][2].errorbar(labelsuf,uf.dJmax.values-np.log10(2.),yerr=[uf.edJmax2,uf.edJmax1],fmt='.',color='k')
# WE = wyns_formulaD_error_sample(data,gamma=1.)
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma=1.$'
# a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[2],label=label)
# # WE = wyns_formulaD_error_sample(data,gamma=1.25)
# # for i in range(len(data)):
# # label=None
# # if(i==0):
# # label=r'$\gamma=1.25$'
# # a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[3],label=label)
# WE = wyns_formulaD_error_sample(data,gamma=1.49)
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma=1.49$'
# a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[4],label=label)
# WE = wyns_formulaD_error_sample(data,gamma=1.,nfw=5.*data['R_half']/1000.)
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'NFW'
# a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[0],label=label)
# WE = wyns_formulaD_error_sample(data,gammaarray=gammas,angle='Half')
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma_\mathrm{GS}$'
# a[1][2].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=1.,facecolor="None",label=label)
# add_thetas(a[1][2],labelrange,data.dtheta_half)
# a[1][2].legend(loc="lower center",ncol=2, bbox_to_anchor=(0.5, 1.0))
# a[1][2].set_ylabel(r'$\log_{10}(D_\mathrm{half}/\,\mathrm{GeV\,cm}^{-2})$')
# a[1][3].errorbar(labelscd,cd.Jhalf.values,yerr=[cd.eJhalf2,cd.eJhalf1],fmt='.',label="",color='k')
# a[1][3].errorbar(labelsuf,uf.Jhalf.values,yerr=[uf.eJhalf2,uf.eJhalf1],fmt='.',label="",color='k')
# WE = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half' )
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma=1$'
# a[1][3].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[2],label=label)
# # WE = wyns_formulaJ_error_sample(data,gamma=0.75,angle='Half_05')
# # for i in range(len(data)):
# # label=None
# # if(i==0):
# # label=r'$\gamma=0.75$'
# # a[1][3].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[3],label=label)
# WE = wyns_formulaJ_error_sample(data,gamma=0.51,angle='Half_05')
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma=0.51$'
# a[1][3].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[4],label=label)
# WE = wyns_formulaJ_error_sample(data,gamma=1.,angle='Half_05',nfw=5.*data['R_half']/1000.)
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'NFW'
# a[1][3].fill_between([labelrange[i]-0.2,labelrange[i]+0.2], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=0.5,edgecolor="None",color=sns.color_palette()[0],label=label)
# gammas = gs_gammas.T[23]
# while(len(gammas)<len(data)):
# gammas = np.append(gammas,0.8)
# WE = wyns_formulaJ_error_sample(data,gammaarray=gammas,angle='Half_05')
# for i in range(len(data)):
# label=None
# if(i==0):
# label=r'$\gamma_\mathrm{GS}$'
# a[1][3].fill_between([labelrange[i]-0.3,labelrange[i]+0.3], [WE[i][0]-WE[i][1],WE[i][0]-WE[i][1]], [WE[i][0]+WE[i][2],WE[i][0]+WE[i][2]],alpha=1.,facecolor="None",label=label)
# add_thetas(a[1][3],labelrange,np.ones(0.5)*len(data))
# a[1][3].legend(loc="lower center",ncol=2, bbox_to_anchor=(0.5, 1.0))
# a[1][3].set_ylabel(r'$\log_{10}(J(0.5^\circ)/\,\mathrm{GeV^2\,cm}^{-5})$')
# plt.savefig('dwarfs_data.pdf',bbox_inches='tight')
``` |
{
"source": "jls943/sponge_evol_dynamics",
"score": 4
} |
#### File: jls943/sponge_evol_dynamics/matching_orthos.py
```python
import argparse
import re
import Bio.SeqIO
def matching_orthos():
#! /usr/bin/env python3
#a function to pull the gene tree files that correspond to a list of gene trees of interest
#could be anything of interest in a list of gene trees.
#this script pulls gene trees based on identity number, but pull_short_gene_trees.py will work on order number within the directory
import argparse
import os
import shutil
import re
def psgt():
#creating the set I'll need inside the loop
certain_set = set()
#putting all of the numbers of interest into a set for easy comparison
try:
with open("{0}".format(args.tree_nums), "r") as interesting:
print("opened interesting gene tree num file")
for line in interesting:
tree = line.strip()
certain_set.add(tree)
print("made set of gene tree nums")
try:
os.mkdir("{0}".format(args.new_dir))
except FileExistsError:
print("This directory already exists. Please provide a different name")
#scanning through the directory of gene trees and finding the ones that match up with the numbers
#in the list file provided. Copying these trees to a new directory
for item in os.scandir("{0}".format(args.tree_dir)):
just_name = re.match("(Mus_musculus\|\d+_rename)\.phylip\.treefile", "{0}".format(item.name))
if just_name.group(1) in certain_set:
#copy the ones that match into the directory you made earlier
destination = os.path.join(args.new_dir, item.name)
shutil.copy(item.path, destination)
except IOError:
print("problem reading file")
parser = argparse.ArgumentParser(description = "arguments for filtering OGs to only those with a given number of taxa")
parser.add_argument("-t", "--tree_nums", required = True, help = "list of gene tree numbers of interest")
parser.add_argument("-d", "--tree_dir", required = True, help = "path to a directory with gene trees in it")
parser.add_argument("-n", "--new_dir", required = True, help = "path to a directory for the desired gene trees")
args = parser.parse_args()
psgt()
#/mnt/lustre/macmaneslab/jlh1023/pipeline_dev/pipeline_scripts/pull_certain_gene_trees.py \
#-t /mnt/lustre/macmaneslab/jlh1023/phylo_qual/actual_final/comparisons/common_to_both.txt \
#-d /mnt/lustre/macmaneslab/jlh1023/phylo_qual/actual_final/good/trees/gene_trees/ \
#-n /mnt/lustre/macmaneslab/jlh1023/phylo_qual/actual_final/good/trees/good_common_gene_trees
#! /usr/bin/env python3
import argparse
import Bio.SeqIO
#function to pull members of one-to-one orthogroups from a protein fasta file
def pull():
try:
#creating an empty set and dictionary to hold orthogroups
ogs = set()
ols = {}
prot_set = set()
with open("{}".format(args.cluster), "r") as cluster_file:
with open("{}".format(args.ortho), "r") as ortho_file:
#saving all the orthogroup names in a set
print("Getting orthogroup names from kinfin file")
for line in cluster_file:
line = line.split("\t")
if line[0].startswith("OG"):
ogs.add(line[0])
print("Pulled orthogroup names from kinfin file")
#populating the dictionary with keys = orthogroup names,
#and values = a list of the proteins in that orthogroup
#also making a set that contains all the protein names
print("Getting protein names from orthofinder file")
for line in ortho_file:
if line.startswith("OG"):
#stripping of white space and splitting on tabs
line = line.rstrip()
line = line.lstrip()
line = line.split("\t")
#if the OG name is in the set, put all the proteins into a new set (and dictionary)
if line[0] in ogs:
ols.setdefault(line[0], line[1:])
for protein in line[1:]:
protein = protein.split(", ")
for indv in protein:
if indv != "":
prot_set.add(indv)
print("Pulled {0} protein names from orthofinder file".format(len(prot_set)))
print("Parsing the fasta file")
#running through the catted fasta of all the proteins and pulling those seqs that
#match the ones in the set.
prot_seqs = []
prot_names = set()
for record in Bio.SeqIO.parse("{}".format(args.prots), "fasta"):
if record.id in prot_set:
cur_prot = Bio.SeqRecord.SeqRecord(id = record.id, seq = record.seq, description = "")
cur_prot_name = record.id
prot_seqs.append(cur_prot)
prot_names.add(cur_prot_name)
test_set = prot_set.difference(prot_names)
print(len(test_set))
print(test_set)
Bio.SeqIO.write(prot_seqs, "{}".format(args.out), "fasta")
except IOError:
print("Problem reading files")
parser = argparse.ArgumentParser(description = "arguments for pulling 1-to-1 orthologues from a fasta")
parser.add_argument("-c", "--cluster", required = True, help = "all.all.cluster_1to1s.txt provided by kinfin")
parser.add_argument("-r", "--ortho", required = True, help = "Orthogroups.csv provided by orthofinder")
parser.add_argument("-p", "--prots", required = True, help = "fasta file containing all proteins in the orthofinder analysis")
parser.add_argument("-o", "--out", required = True, help = "name of the output fasta file")
args = parser.parse_args()
pull()
``` |
{
"source": "jlsajfj/NBT",
"score": 3
} |
#### File: NBT/examples/map.py
```python
import os, sys
import math
from struct import pack
# local module
try:
import nbt
except ImportError:
# nbt not in search path. Let's see if it can be found in the parent folder
extrasearchpath = os.path.realpath(os.path.join(__file__,os.pardir,os.pardir))
if not os.path.exists(os.path.join(extrasearchpath,'nbt')):
raise
sys.path.append(extrasearchpath)
from nbt.region import RegionFile
from nbt.chunk import Chunk
from nbt.world import WorldFolder,McRegionWorldFolder
# PIL module (not build-in)
try:
from PIL import Image
except ImportError:
# PIL not in search path. Let's see if it can be found in the parent folder
sys.stderr.write("Module PIL/Image not found. Pillow (a PIL fork) can be found at http://python-imaging.github.io/\n")
# Note: it may also be possible that PIL is installed, but JPEG support is disabled or broken
sys.exit(70) # EX_SOFTWARE
def get_heightmap_image(chunk, buffer=False, gmin=False, gmax=False):
points = chunk.blocks.generate_heightmap(buffer, True)
# Normalize the points
hmin = min(points) if (gmin == False) else gmin # Allow setting the min/max explicitly, in case this is part of a bigger map
hmax = max(points) if (gmax == False) else gmax
hdelta = hmax-hmin+0.0
pixels = ""
for y in range(16):
for x in range(16):
# pix X => mc -Z
# pix Y => mc X
offset = (15-x)*16+y
height = int((points[offset]-hmin)/hdelta*255)
if (height < 0): height = 0
if (height > 255): height = 255
pixels += pack(">B", height)
im = Image.fromstring('L', (16,16), pixels)
return im
# List of blocks to ignore
# Uncomment all the lines to show underground structures
# TODO: move this list into a separate config file
block_ignore = [
'air', # At least this one
# 'cave_air', 'water', 'lava', 'snow', 'ice',
# 'grass', 'tall_grass', 'dead_bush',
# 'seagrass', 'tall_seagrass', 'kelp', 'kelp_plant',
# 'dandelion', 'poppy', 'oxeye_daisy', 'white_tulip',
# 'azure_bluet', 'lilac', 'rose_bush', 'peony', 'blue_orchid',
# 'lily_pad', 'sugar_cane', 'vine', 'pumpkin', 'cactus',
# 'wheat', 'potatoes', 'beetroots', 'carrots',
# 'oak_leaves', 'dark_oak_leaves', 'birch_leaves',
# 'acacia_leaves', 'spruce_leaves',
# 'oak_log', 'dark_oak_log', 'birch_log',
# 'acacia_log', 'spruce_log',
# 'brown_mushroom', 'red_mushroom',
# 'brown_mushroom_block', 'red_mushroom_block', 'mushroom_stem',
# 'grass_block', 'grass_path', 'farmland', 'dirt',
# 'stone', 'sand', 'gravel', 'clay',
# 'sandstone', 'diorite', 'andesite', 'granite', 'obsidian',
# 'coal_ore', 'iron_ore', 'gold_ore', 'diamond_ore',
# 'redstone_ore', 'lapis_ore', 'emerald_ore',
# 'cobweb',
]
# Map of block colors from names
# Legacy block numeric identifiers are now hidden by Block class
# and mapped to alpha identifiers in best effort
# TODO: move this map into a separate config file
block_colors = {
'acacia_leaves': {'h':114, 's':64, 'l':22 },
'acacia_log': {'h':35, 's':93, 'l':30 },
'air': {'h':0, 's':0, 'l':0 },
'andesite': {'h':0, 's':0, 'l':32 },
'azure_bluet': {'h':0, 's':0, 'l':100},
'bedrock': {'h':0, 's':0, 'l':10 },
'birch_leaves': {'h':114, 's':64, 'l':22 },
'birch_log': {'h':35, 's':93, 'l':30 },
'blue_orchid': {'h':0, 's':0, 'l':100},
'bookshelf': {'h':0, 's':0, 'l':100},
'brown_mushroom': {'h':0, 's':0, 'l':100},
'brown_mushroom_block': {'h':0, 's':0, 'l':100},
'cactus': {'h':126, 's':61, 'l':20 },
'cave_air': {'h':0, 's':0, 'l':0 },
'chest': {'h':0, 's':100, 'l':50 },
'clay': {'h':7, 's':62, 'l':23 },
'coal_ore': {'h':0, 's':0, 'l':10 },
'cobblestone': {'h':0, 's':0, 'l':25 },
'cobblestone_stairs': {'h':0, 's':0, 'l':25 },
'crafting_table': {'h':0, 's':0, 'l':100},
'dandelion': {'h':60, 's':100, 'l':60 },
'dark_oak_leaves': {'h':114, 's':64, 'l':22 },
'dark_oak_log': {'h':35, 's':93, 'l':30 },
'dark_oak_planks': {'h':35, 's':93, 'l':30 },
'dead_bush': {'h':0, 's':0, 'l':100},
'diorite': {'h':0, 's':0, 'l':32 },
'dirt': {'h':27, 's':51, 'l':15 },
'end_portal_frame': {'h':0, 's':100, 'l':50 },
'farmland': {'h':35, 's':93, 'l':15 },
'fire': {'h':55, 's':100, 'l':50 },
'flowing_lava': {'h':16, 's':100, 'l':48 },
'flowing_water': {'h':228, 's':50, 'l':23 },
'glass_pane': {'h':0, 's':0, 'l':100},
'granite': {'h':0, 's':0, 'l':32 },
'grass': {'h':94, 's':42, 'l':25 },
'grass_block': {'h':94, 's':42, 'l':32 },
'gravel': {'h':21, 's':18, 'l':20 },
'ice': {'h':240, 's':10, 'l':95 },
'infested_stone': {'h':320, 's':100, 'l':50 },
'iron_ore': {'h':22, 's':65, 'l':61 },
'iron_bars': {'h':22, 's':65, 'l':61 },
'ladder': {'h':35, 's':93, 'l':30 },
'lava': {'h':16, 's':100, 'l':48 },
'lilac': {'h':0, 's':0, 'l':100},
'lily_pad': {'h':114, 's':64, 'l':18 },
'lit_pumpkin': {'h':24, 's':100, 'l':45 },
'mossy_cobblestone': {'h':115, 's':30, 'l':50 },
'mushroom_stem': {'h':0, 's':0, 'l':100},
'oak_door': {'h':35, 's':93, 'l':30 },
'oak_fence': {'h':35, 's':93, 'l':30 },
'oak_fence_gate': {'h':35, 's':93, 'l':30 },
'oak_leaves': {'h':114, 's':64, 'l':22 },
'oak_log': {'h':35, 's':93, 'l':30 },
'oak_planks': {'h':35, 's':93, 'l':30 },
'oak_pressure_plate': {'h':35, 's':93, 'l':30 },
'oak_stairs': {'h':114, 's':64, 'l':22 },
'peony': {'h':0, 's':0, 'l':100},
'pink_tulip': {'h':0, 's':0, 'l':0 },
'poppy': {'h':0, 's':100, 'l':50 },
'pumpkin': {'h':24, 's':100, 'l':45 },
'rail': {'h':33, 's':81, 'l':50 },
'red_mushroom': {'h':0, 's':50, 'l':20 },
'red_mushroom_block': {'h':0, 's':50, 'l':20 },
'rose_bush': {'h':0, 's':0, 'l':100},
'sugar_cane': {'h':123, 's':70, 'l':50 },
'sand': {'h':53, 's':22, 'l':58 },
'sandstone': {'h':48, 's':31, 'l':40 },
'seagrass': {'h':94, 's':42, 'l':25 },
'sign': {'h':114, 's':64, 'l':22 },
'spruce_leaves': {'h':114, 's':64, 'l':22 },
'spruce_log': {'h':35, 's':93, 'l':30 },
'stone': {'h':0, 's':0, 'l':32 },
'stone_slab': {'h':0, 's':0, 'l':32 },
'tall_grass': {'h':94, 's':42, 'l':25 },
'tall_seagrass': {'h':94, 's':42, 'l':25 },
'torch': {'h':60, 's':100, 'l':50 },
'snow': {'h':240, 's':10, 'l':85 },
'spawner': {'h':180, 's':100, 'l':50 },
'vine': {'h':114, 's':64, 'l':18 },
'wall_torch': {'h':60, 's':100, 'l':50 },
'water': {'h':228, 's':50, 'l':23 },
'wheat': {'h':123, 's':60, 'l':50 },
'white_wool': {'h':0, 's':0, 'l':100},
}
def get_map(chunk):
# Show an image of the chunk from above
pixels = b""
for z in range(16):
for x in range(16):
# Find the highest block in this column
max_height = chunk.get_max_height()
ground_height = max_height
tints = []
for y in range(max_height,-1,-1):
block_id = chunk.get_block(x, y, z)
if block_id != None:
#block_data = 0 # TODO: use block properties
#if (block_id == 'water' or block_id == 'water'):
#tints.append({'h':228, 's':50, 'l':23}) # Water
#elif (block_id == 'leaves'): # TODO: old id - update
#if (block_data == 1):
#tints.append({'h':114, 's':64, 'l':22}) # Redwood Leaves
#elif (block_data == 2):
#tints.append({'h':93, 's':39, 'l':10}) # Birch Leaves
#else:
#tints.append({'h':114, 's':64, 'l':22}) # Normal Leaves
#elif (block_id == 'ice'):
#tints.append({'h':240, 's':5, 'l':95}) # Ice
#elif (block_id == 'fire'):
#tints.append({'h':55, 's':100, 'l':50}) # Fire
#elif (block_id != 'air' or block_id != 'cave_air' or y == 0):
if (block_id not in block_ignore or y == 0):
# Here is ground level
ground_height = y
break
if block_id != None:
if block_id in block_colors:
color = block_colors[block_id]
else:
color = {'h':0, 's':0, 'l':100}
print("warning: unknown color for block id: %s" % block_id)
print("hint: add that block to the 'block_colors' map")
else:
color = {'h':0, 's':0, 'l':0}
height_shift = 0 #(ground_height-64)*0.25
final_color = {'h':color['h'], 's':color['s'], 'l':color['l'] + height_shift}
if final_color['l'] > 100: final_color['l'] = 100
if final_color['l'] < 0: final_color['l'] = 0
# Apply tints from translucent blocks
for tint in reversed(tints):
final_color = hsl_slide(final_color, tint, 0.4)
rgb = hsl2rgb(final_color['h'], final_color['s'], final_color['l'])
pixels += pack("BBB", rgb[0], rgb[1], rgb[2])
im = Image.frombytes('RGB', (16,16), pixels)
return im
## Color functions for map generation ##
# Hue given in degrees,
# saturation and lightness given either in range 0-1 or 0-100 and returned in kind
def hsl_slide(hsl1, hsl2, ratio):
if (abs(hsl2['h'] - hsl1['h']) > 180):
if (hsl1['h'] > hsl2['h']):
hsl1['h'] -= 360
else:
hsl1['h'] += 360
# Find location of two colors on the H/S color circle
p1x = math.cos(math.radians(hsl1['h']))*hsl1['s']
p1y = math.sin(math.radians(hsl1['h']))*hsl1['s']
p2x = math.cos(math.radians(hsl2['h']))*hsl2['s']
p2y = math.sin(math.radians(hsl2['h']))*hsl2['s']
# Slide part of the way from tint to base color
avg_x = p1x + ratio*(p2x-p1x)
avg_y = p1y + ratio*(p2y-p1y)
avg_h = math.atan(avg_y/avg_x)
avg_s = avg_y/math.sin(avg_h)
avg_l = hsl1['l'] + ratio*(hsl2['l']-hsl1['l'])
avg_h = math.degrees(avg_h)
#print('tint: %s base: %s avg: %s %s %s' % (tint,final_color,avg_h,avg_s,avg_l))
return {'h':avg_h, 's':avg_s, 'l':avg_l}
# From http://www.easyrgb.com/index.php?X=MATH&H=19#text19
def hsl2rgb(H,S,L):
H = H/360.0
S = S/100.0 # Turn into a percentage
L = L/100.0
if (S == 0):
return (int(L*255), int(L*255), int(L*255))
var_2 = L * (1+S) if (L < 0.5) else (L+S) - (S*L)
var_1 = 2*L - var_2
def hue2rgb(v1, v2, vH):
if (vH < 0): vH += 1
if (vH > 1): vH -= 1
if ((6*vH)<1): return v1 + (v2-v1)*6*vH
if ((2*vH)<1): return v2
if ((3*vH)<2): return v1 + (v2-v1)*(2/3.0-vH)*6
return v1
R = int(255*hue2rgb(var_1, var_2, H + (1.0/3)))
G = int(255*hue2rgb(var_1, var_2, H))
B = int(255*hue2rgb(var_1, var_2, H - (1.0/3)))
return (R,G,B)
def main(world_folder, show=True):
world = WorldFolder(world_folder)
bb = world.get_boundingbox()
world_map = Image.new('RGB', (16*bb.lenx(),16*bb.lenz()))
t = world.chunk_count()
try:
i =0.0
for chunk in world.iter_chunks():
if i % 50 ==0:
sys.stdout.write("Rendering image")
elif i % 2 == 0:
sys.stdout.write(".")
sys.stdout.flush()
elif i % 50 == 49:
sys.stdout.write("%5.1f%%\n" % (100*i/t))
i +=1
chunkmap = get_map(chunk)
x,z = chunk.get_coords()
world_map.paste(chunkmap, (16*(x-bb.minx),16*(z-bb.minz)))
print(" done\n")
filename = os.path.basename(world_folder)+".png"
world_map.save(filename,"PNG")
print("Saved map as %s" % filename)
except KeyboardInterrupt:
print(" aborted\n")
filename = os.path.basename(world_folder)+".partial.png"
world_map.save(filename,"PNG")
print("Saved map as %s" % filename)
return 75 # EX_TEMPFAIL
if show:
world_map.show()
return 0 # NOERR
if __name__ == '__main__':
if (len(sys.argv) == 1):
print("No world folder specified!")
sys.exit(64) # EX_USAGE
if sys.argv[1] == '--noshow' and len(sys.argv) > 2:
show = False
world_folder = sys.argv[2]
else:
show = True
world_folder = sys.argv[1]
# clean path name, eliminate trailing slashes. required for os.path.basename()
world_folder = os.path.normpath(world_folder)
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
sys.exit(72) # EX_IOERR
sys.exit(main(world_folder, show))
```
#### File: NBT/examples/regionfile_analysis.py
```python
import locale, os, sys
import collections
from optparse import OptionParser
import gzip
import zlib
from struct import unpack
# local module
try:
import nbt
except ImportError:
# nbt not in search path. Let's see if it can be found in the parent folder
extrasearchpath = os.path.realpath(os.path.join(__file__,os.pardir,os.pardir))
if not os.path.exists(os.path.join(extrasearchpath,'nbt')):
raise
sys.path.append(extrasearchpath)
from nbt.region import RegionFile, RegionFileFormatError
class ChunkMetadata(object):
def __init__(self, x, z):
self.x = x
self.z = z
self.sectorstart = None
self.sectorlen = None
self.timestamp = None
self.length = None
self.compression = None
self.status = None
def __repr__(self):
return "chunk %02s,%02s [%d] @%-5d %2d %-5s %-5s %d" % (self.x, self.z, self.status, self.sectorstart, self.sectorlen, self.length, self.compression, self.timestamp)
class Statuses(object):
"""Keep track of the number of statuses for all chunks.
The different types of status are defined in RegionFile"""
def __init__(self):
self.counts = {}
self.names = {}
# Read status names from STATUS_CHUNK_* constants in RegionFile.
for var in dir(RegionFile):
if var.startswith("STATUS_CHUNK_"):
name = var[13:].title().replace("_"," ")
value = getattr(RegionFile, var)
self.counts[value] = 0
self.names[value] = name
def count(self, status, count=1):
if status not in self.counts:
self.counts[status] = 0
self.names = "Status %s" % status
self.counts[status] += count
def get_name(self, status):
if status in self.names:
return self.names[status]
else:
return "Status %s" % status
def results(self):
for value in sorted(self.counts.keys()):
yield value, self.counts[value], self.get_name(value)
def total(self):
return sum(self.counts.values())
class ByteCounter(object):
"""Keep track of types of bytes in a binary stream."""
def __init__(self):
self.counts = {}
def count(self, bytestream):
if isinstance(bytestream, collections.Iterable):
for byte in bytestream:
if byte not in self.counts:
self.counts[byte] = 0
self.counts[byte] += 1
else:
if bytestream not in self.counts:
self.counts[bytestream] = 0
self.counts[bytestream] += 1
def results(self):
for value in sorted(self.counts.keys()):
yield value, self.counts[value]
def analyse_regionfile(filename, warnings=True):
region = RegionFile(filename)
statuscounts = Statuses()
errors = []
if region.size % 4096 != 0:
errors.append("File size is %d bytes, which is not a multiple of 4096" % region.size)
sectorsize = region._bytes_to_sector(region.size)
sectors = sectorsize*[None]
if region.size == 0:
errors.append("File size is 0 bytes")
sectors = []
elif sectorsize < 2:
errors.append("File size is %d bytes, too small for the 8192 byte header" % region.size)
else:
sectors[0] = "locations"
sectors[1] = "timestamps"
chunks = {}
for x in range(32):
for z in range(32):
c = ChunkMetadata(x,z)
(c.sectorstart, c.sectorlen, c.timestamp, status) = region.header[x,z]
(c.length, c.compression, c.status) = region.chunk_headers[x,z]
c.uncompressedlength = 0
chunks[x,z] = c
statuscounts.count(c.status)
if c.status < 0:
errors.append("chunk %d,%d has status %d: %s" % \
(x, z, c.status, statuscounts.get_name(c.status)))
try:
if c.sectorstart == 0:
if c.sectorlen != 0:
errors.append("chunk %d,%d is not created, but is %d sectors in length" % (x, z, c.sectorlen))
if c.timestamp != 0:
errors.append("chunk %d,%d is not created, but has timestamp %d" % (x, z, c.timestamp))
raise RegionFileFormatError('')
allocatedbytes = 4096 * c.sectorlen
if c.timestamp == 0:
errors.append("chunk %d,%d has no timestamp" % (x, z))
if c.sectorstart < 2:
errors.append("chunk %d,%d starts at sector %d, which is in the header" % (x, z, c.sectorstart))
raise RegionFileFormatError('')
if 4096 * c.sectorstart >= region.size:
errors.append("chunk %d,%d starts at sector %d, while the file is only %d sectors" % (x, z, c.sectorstart, sectorsize))
raise RegionFileFormatError('')
elif 4096 * c.sectorstart + 5 > region.size:
# header of chunk only partially fits
errors.append("chunk %d,%d starts at sector %d, but only %d bytes of sector %d are present in the file" % (x, z, c.sectorstart, sectorsize))
raise RegionFileFormatError('')
elif not c.length:
errors.append("chunk %d,%d length is undefined." % (x, z))
raise RegionFileFormatError('')
elif c.length == 1:
errors.append("chunk %d,%d has length 0 bytes." % (x, z))
elif 4096 * c.sectorstart + 4 + c.length > region.size:
# header of chunk fits, but not the complete chunk
errors.append("chunk %d,%d is %d bytes in length, which is behind the file end" % (x, z, c.length))
requiredsectors = region._bytes_to_sector(c.length + 4)
if c.sectorlen <= 0:
errors.append("chunk %d,%d is %d sectors in length" % (x, z, c.sectorlen))
raise RegionFileFormatError('')
if c.compression == 0:
errors.append("chunk %d,%d is uncompressed. This is deprecated." % (x, z))
elif c.compression == 1:
errors.append("chunk %d,%d uses GZip compression. This is deprecated." % (x, z))
elif c.compression > 2:
errors.append("chunk %d,%d uses an unknown compression type (%d)." % (x, z, c.compression))
if c.length + 4 > allocatedbytes: # TODO 4 or 5?
errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d sectors, " \
"but only %d %s allocated" % \
(x, z, c.length+4, c.length-1, requiredsectors, c.sectorlen, \
"sector is" if (c.sectorlen == 1) else "sectors are"))
elif c.length + 4 + 4096 == allocatedbytes:
# If the block fits in exactly n sectors, Minecraft seems to allocated n+1 sectors
# Threat this as a warning instead of an error.
if warnings:
errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d %s, " \
"but %d sectors are allocated" % \
(x, z, c.length+4, c.length-1, requiredsectors, \
"sector" if (requiredsectors == 1) else "sectors", c.sectorlen))
elif c.sectorlen > requiredsectors:
errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d %s, " \
"but %d sectors are allocated" % \
(x, z, c.length+4, c.length-1, requiredsectors, \
"sector" if (requiredsectors == 1) else "sectors", c.sectorlen))
# Decompress chunk, check if that succeeds.
# Check if the header and footer indicate this is a NBT file.
# (without parsing it in detail)
compresseddata = None
data = None
try:
if 0 <= c.compression <= 2:
region.file.seek(4096*c.sectorstart + 5)
compresseddata = region.file.read(c.length - 1)
except Exception as e:
errors.append("Error reading chunk %d,%d: %s" % (x, z, str(e)))
if (c.compression == 0):
data = compresseddata
if (c.compression == 1):
try:
data = gzip.decompress(compresseddata)
except Exception as e:
errors.append("Error decompressing chunk %d,%d using gzip: %s" % (x, z, str(e)))
elif (c.compression == 2):
try:
data = zlib.decompress(compresseddata)
except Exception as e:
errors.append("Error decompressing chunk %d,%d using zlib: %s" % (x, z, str(e)))
if data:
c.uncompressedlength = len(data)
if data[0] != 10:
errors.append("chunk %d,%d is not a valid NBT file: outer object is not a TAG_Compound, but %r" % (x, z, data[0]))
elif data[-1] != 0:
errors.append("chunk %d,%d is not a valid NBT file: files does not end with a TAG_End." % (x, z))
else:
(length, ) = unpack(">H", data[1:3])
name = data[3:3+length]
try:
name.decode("utf-8", "strict")
except Exception as e:
errors.append("Error decompressing chunk %d,%d using unknown compression: %s" % (x, z, str(e)))
if warnings:
# Read the unused bytes in a sector and check if all bytes are zeroed.
unusedlen = 4096*c.sectorlen - (c.length+4)
if unusedlen > 0:
try:
region.file.seek(4096*c.sectorstart + 4 + c.length)
unused = region.file.read(unusedlen)
zeroes = unused.count(b'\x00')
if zeroes < unusedlen:
errors.append("%d of %d unused bytes are not zeroed in sector %d after chunk %d,%d" % \
(unusedlen-zeroes, unusedlen, c.sectorstart + c.sectorlen - 1, x, z))
except Exception as e:
errors.append("Error reading tail of chunk %d,%d: %s" % (x, z, str(e)))
except RegionFileFormatError:
pass
if c.sectorlen and c.sectorstart:
# Check for overlapping chunks
for b in range(c.sectorlen):
m = "chunk %-2d,%-2d part %d/%d" % (x, z, b+1, c.sectorlen)
p = c.sectorstart + b
if p > sectorsize:
errors.append("%s outside file" % (m))
break
if sectors[p] != None:
errors.append("overlap in sector %d: %s and %s" % (p, sectors[p], m))
if (b == 0):
if (c.uncompressedlength > 0):
m += " (4+1+%d bytes compressed: %d bytes uncompressed)" % (c.length-1, c.uncompressedlength)
elif c.length:
m += " (4+1+%d bytes compressed)" % (c.length-1)
else:
m += " (4+1+0 bytes)"
if sectors[p] != None:
m += " (overlapping!)"
sectors[p] = m
e = sectors.count(None)
if e > 0:
if warnings:
errors.append("Fragmentation: %d of %d sectors are unused" % (e, sectorsize))
for sector, content in enumerate(sectors):
if content == None:
sectors[sector] = "empty"
if warnings:
region.file.seek(4096*sector)
unused = region.file.read(4096)
zeroes = unused.count(b'\x00')
if zeroes < 4096:
errors.append("%d bytes are not zeroed in unused sector %d" % (4096-zeroes, sector))
return errors, statuscounts, sectors, chunks
def debug_regionfile(filename, warnings=True):
print(filename)
errors, statuscounts, sectors, chunks = analyse_regionfile(filename, warnings)
print("File size is %d sectors" % (len(sectors)))
print("Chunk statuses (as reported by nbt.region.RegionFile):")
for value, count, name in statuscounts.results():
print("status %2d %-21s%4d chunks" % (value, ("(%s):" % name), count))
print("%d chunks in total" % statuscounts.total()) #q should be 1024
if len(errors) == 0:
print("No errors or warnings found")
elif warnings:
print("Errors and Warnings:")
else:
print("Errors:")
for error in errors:
print(error)
print("File content by sector:")
for i,s in enumerate(sectors):
print("sector %03d: %s" % (i, s))
def print_errors(filename, warnings=True):
errors, statuscounts, sectors, chunks = analyse_regionfile(filename, warnings)
print(filename)
for error in errors:
print(error)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", default=False,
action="store_true", help="Show detailed info about region file")
parser.add_option("-q", "--quiet", dest="warnings", default=True,
action="store_false", help="Only show errors, no warnings")
(options, args) = parser.parse_args()
if (len(args) == 0):
print("No region file specified! Use -v for verbose results; -q for errors only (no warnings)")
sys.exit(64) # EX_USAGE
for filename in args:
try:
if options.verbose:
debug_regionfile(filename, options.warnings)
else:
print_errors(filename, options.warnings)
except IOError as e:
sys.stderr.write("%s: %s\n" % (e.filename, e.strerror))
# sys.exit(72) # EX_IOERR
sys.exit(0)
```
#### File: NBT/nbt/__init__.py
```python
__all__ = ["nbt", "world", "region", "chunk"]
from . import *
# Documentation only automatically includes functions specified in __all__.
# If you add more functions, please manually include them in doc/index.rst.
VERSION = (1, 5, 0)
"""NBT version as tuple. Note that the major and minor revision number are
always present, but the patch identifier (the 3rd number) is only used in 1.4."""
def _get_version():
"""Return the NBT version as string."""
return ".".join([str(v) for v in VERSION])
```
#### File: NBT/tests/alltests.py
```python
import sys
import logging
import unittest
try:
from unittest import skip as _skip
except ImportError:
# Python 2.6 has an older unittest API. The backported package is available from pypi.
import unittest2 as unittest
testmodules = ['examplestests', 'nbttests', 'regiontests']
"""Files to check for test cases. Do not include the .py extension."""
def get_testsuites_in_module(module):
"""
Return a list of unittest.TestSuite subclasses defined in module.
"""
suites = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestSuite):
suites.append(obj)
return suites
def load_tests_in_modules(modulenames):
"""
Given a list of module names, import the modules, load and run the
test cases in these modules. The modules are typically files in the
current directory, but this is not a requirement.
"""
loader = unittest.TestLoader()
suites = []
for name in modulenames:
module = __import__(name)
suite = loader.loadTestsFromModule(module)
for suiteclass in get_testsuites_in_module(module):
# Wrap suite in TestSuite classes
suite = suiteclass(suite)
suites.append(suite)
suite = unittest.TestSuite(suites)
return suite
if __name__ == "__main__":
logger = logging.getLogger("nbt.tests")
if len(logger.handlers) == 0:
# Logging is not yet configured. Configure it.
logging.basicConfig(level=logging.INFO, stream=sys.stderr, format='%(levelname)-8s %(message)s')
testresult = unittest.TextTestRunner(verbosity=2).run(load_tests_in_modules(testmodules))
sys.exit(0 if testresult.wasSuccessful() else 1)
```
#### File: NBT/tests/downloadsample.py
```python
import sys
import os
try:
import urllib.request as urllib # Python 3
except ImportError:
import urllib2 as urllib # Python 2
import logging
import subprocess
import tarfile
import hashlib
import glob
import tempfile
import shutil
URL = "https://github.com/downloads/twoolie/NBT/Sample_World.tar.gz"
"""URL to retrieve"""
workdir = os.path.dirname(__file__)
"""Directory for download and extracting the sample files"""
worlddir = os.path.join(workdir, 'Sample World')
"""Destination folder for the sample world."""
checksums = {
'Sample World': None,
'Sample World/data': None,
'Sample World/level.dat': 'f252cf8b938fa1e41c9335ea1bdc70fca73ac5c63c2cf2db4b2ddc4cb2fa4d91',
'Sample World/level.dat_mcr': '933238e89a9f7f94c72f236da0d81d44d966c7a1544490e51e682ab42ccc50ff',
'Sample World/level.dat_old': 'c4b5a5c355d4f85c369604ca27ee349dba41adc4712a43a6f8c8399fe44071e7',
'Sample World/region': None,
'Sample World/region/r.-1.0.mca': '6e8ec8698e2e68ca3ee2090da72e4f24c85f9db3f36191e5e33ebc8cafb209f2',
'Sample World/region/r.-1.0.mcr': '3a9ccafc6f64b98c0424814f44f1d0d3429cbb33448ff97e2e84ca649bfa16ae',
'Sample World/region/r.-1.1.mca': 'c5f6fb5c72ca534d0f73662f2199dca977d2de1521b4823f73384aa6826c4b74',
'Sample World/region/r.-1.1.mcr': '8e8b545b412a6a2bb519aee0259a63e6a918cd25a49538451e752e3bf90d4cf1',
'Sample World/region/r.0.0.mca': 'd86e51c2adf35f82492e974f75fe83e9e5db56a267a3fe76150dc42f0aeb07c7',
'Sample World/region/r.0.0.mcr': 'a8e7fea4e40a70e0d70dea7ebb1328c7623ed01b77d8aff34d01e530fbdad9d5',
'Sample World/region/r.0.1.mca': '8a03d910c7fd185ae5efb1409c592e4a9688dfab1fbd31f8c736461157a7675d',
'Sample World/region/r.0.1.mcr': '08fcd50748d4633a3b1d52e1b323c7dd9c4299a28ec095d0261fd195d3c9a537',
'Sample World/session.lock': 'd05da686dd04cd2ad1f660ddaa7645abc9fd9af396357a5b3256b437af0d7dba',
}
"""SHA256 checksums for each file in the tar file.
Directories MUST also be included (without trailing slash), with None as the checksum"""
def download(url, destination):
"""
Download the file from the specified URL, and extract the contents.
Uses urllib2.
WARNING: urllib2 does not verify the certificate for Python
earlier than 2.7.9 or 3.4.2 (!). Verify the checksums before using
the downloaded files.
Warning: Before Python 2.7.9, urllib2 can't download over HTTPS, since
it effectively only supports SSLv3, which is nowadays deprecated by websites.
In these cases, the following SSLError is raised:
error:14077410:SSL routines:SSL23_GET_SERVER_HELLO:sslv3 alert handshake failure
May raise an IOError or SSLError.
"""
logger = logging.getLogger("nbt.tests.downloadsample")
localfile = None
remotefile = None
try:
logger.info("Downloading %s" % url)
request = urllib.Request(url)
remotefile = urllib.urlopen(request)
localfile = open(destination, 'wb')
chunksize = 524288 # 0.5 MiB
size = 0
while True:
data = remotefile.read(chunksize)
if not data: break
localfile.write(data)
size += len(data)
logging.info("Downloaded %0.1f MiByte..." % (float(size)/1048576))
finally:
try:
localfile.close()
except (IOError, AttributeError):
pass
logging.info("Download complete")
def download_with_external_tool(url, destination):
"""
Download the file from the specified URL, and extract the contents.
Uses the external curl program.
wget fails if it is compiled with older version of OpenSSL. Hence we use curl.
May raise an OSError (or one of it's subclasses).
"""
logger = logging.getLogger("nbt.tests.downloadsample")
logger.info("Downloading %s (with curl)" % url)
# command = ['wget', '-O', destination, url]
command = ['curl', '-o', destination, '-L', '-#', url]
# Open a subprocess, wait till it is done, and get the STDOUT result
exitcode = subprocess.call(command)
if exitcode != 0:
raise OSError("%s returned exit code %d" % (" ".join(command), exitcode))
def extract(filename, workdir, filelist):
"""
Extract contents of a tar file in workdir. The tar file may be compressed
using gzip or bzip2.
For security, only files listed in filelist are extracted.
Extraneous files will be logged as warning.
"""
logger = logging.getLogger("nbt.tests.downloadsample")
logger.info("Extracting %s" % filename)
def filefilter(members):
for tarinfo in members:
if tarinfo.name in filelist:
logger.info("Extract %s" % tarinfo.name)
yield tarinfo
else:
logger.warning("Skip %s" % tarinfo.name)
# r:* means any compression (gzip or bz2 are supported)
files = tarfile.open(filename, 'r:*')
files.extractall(workdir, filefilter(files.getmembers()))
files.close()
def verify(checksums):
"""
Verify if all given files are present and their SHA256
checksum is correct. Any files not explicitly listed are deleted.
checksums is a dict of file => checksum, with file a file relative to dir.
Returns a boolean indicating that all checksums are correct, and all files
are present.
Any warnings and errors are printer to logger.
Errors or exceptions result in a return value of False.
"""
logger = logging.getLogger("nbt.tests.downloadsample")
success = True
for path in checksums.keys():
try:
check = checksums[path]
if check == None: continue # Skip folders
localfile = open(path, 'rb')
h = hashlib.sha256()
chunksize = 524288 # 0.5 MiB
while True:
data = localfile.read(chunksize)
if not data: break
h.update(data)
localfile.close()
calc = h.hexdigest()
if calc != check:
logger.error("Checksum failed %s: %s found, %s expected" % (path, calc, check))
success = False
except IOError as e:
if e.errno == 2:
logger.error('Checksum verificiation failed: file %s not found' % e.filename)
else:
logger.error('Checksum verificiation of %s failed: errno %d: %s' % \
(e.filename, e.errno, e.strerror))
return False
logger.info("Checksum of %d files verified" % len(checksums))
return success
def install(url=URL, workdir=workdir, checksums=checksums):
"""
Download and extract a sample world, used for testing.
The download file and sample world are stored in workdir.
Verifies the checksum of all files. Files without a checksum are not
extracted.
"""
# the paths in checksum are relative to the working dir, and UNIX-based.
# Normalise them to support Windows; and create the following three derivates:
# - posixpaths: list of relative posix paths -- to filter tar extraction
# - nchecksums: as checksum, but with normalised absolute paths
# - files: list of normalised absolute path of files (non-directories)
logger = logging.getLogger("nbt.tests.downloadsample")
posixpaths = checksums.keys()
nchecksums = dict([(os.path.join(workdir, os.path.normpath(path)), checksums[path]) \
for path in posixpaths if checksums[path] != None])
files = nchecksums.keys()
tar_file = os.path.join(workdir, os.path.basename(url))
if not any(map(os.path.exists, files)):
# none of the destination files exist. We can safely download/extract without overwriting.
if not os.path.exists(tar_file):
has_ssl_error = False
try:
download(url=URL, destination=tar_file)
except urllib.URLError as e:
# e.reason may be a socket.error. If so, print e.reason.strerror.
logger.error('Download %s failed: %s' % \
(url, e.reason.strerror if hasattr(e.reason, "strerror") else e.reason))
has_ssl_error = "sslv3" in ("%s" % e)
except urllib.HTTPError as e:
# urllib.HTTPError.reason does not have a reason in Python 2.6 (perhaps neither in 2.7).
logger.error('Download %s failed: HTTP Error %d: %s' % (url, e.code, \
e.reason if hasattr(e, "reason") else e))
return False
except Exception as e:
logger.error('Download %s failed: %s' % (url, e))
return False
if has_ssl_error:
try:
download_with_external_tool(url=URL, destination=tar_file)
except Exception as e:
logger.error('Download %s failed: %s' % (url, e))
return False
try:
extract(filename=tar_file, workdir=workdir, filelist=posixpaths)
except tarfile.TarError as e:
logger.error('Extract %s failed: %s' % (tar_file, e.message))
return False
except Exception as e:
logger.error('Extract %s failed: %s' % (tar_file, e))
return False
return verify(checksums=nchecksums)
def _mkdir(dstdir, subdir):
"""Helper function: create folder /dstdir/subdir"""
os.mkdir(os.path.join(dstdir, os.path.normpath(subdir)))
def _copyglob(srcdir, destdir, pattern):
"""Helper function: copies files from /srcdir/pattern to /destdir/pattern.
pattern is a glob pattern."""
for fullpath in glob.glob(os.path.join(srcdir, os.path.normpath(pattern))):
relpath = os.path.relpath(fullpath, srcdir)
shutil.copy2(fullpath, os.path.join(destdir, relpath))
def _copyrename(srcdir, destdir, src, dest):
"""Helper function: copy file from /srcdir/src to /destdir/dest."""
shutil.copy2(os.path.join(srcdir, os.path.normpath(src)), \
os.path.join(destdir, os.path.normpath(dest)))
def temp_mcregion_world(worldfolder=worlddir):
"""Create a McRegion worldfolder in a temporary directory, based on the
files in the given mixed worldfolder. Returns the temporary directory path."""
logger = logging.getLogger("nbt.tests.downloadsample")
tmpfolder = tempfile.mkdtemp(prefix="nbtmcregion")
logger.info("Create temporary McRegion world folder at %s" % tmpfolder)
_mkdir(tmpfolder, 'region')
_copyglob(worldfolder, tmpfolder, "region/*.mcr")
_copyrename(worldfolder, tmpfolder, "level.dat_mcr", "level.dat")
return tmpfolder
def temp_anvil_world(worldfolder=worlddir):
"""Create a Anvil worldfolder in a temporary directory, based on the
files in the given mixed worldfolder. Returns the temporary directory path."""
logger = logging.getLogger("nbt.tests.downloadsample")
tmpfolder = tempfile.mkdtemp(prefix="nbtanvil")
logger.info("Create temporary Anvil world folder at %s" % tmpfolder)
_mkdir(tmpfolder, 'region')
_copyglob(worldfolder, tmpfolder, "region/*.mca")
_copyrename(worldfolder, tmpfolder, "level.dat", "level.dat")
return tmpfolder
def cleanup_temp_world(tmpfolder):
"""Remove a temporary directory"""
logger = logging.getLogger("nbt.tests.downloadsample")
logger.info("Remove temporary world folder at %s" % tmpfolder)
shutil.rmtree(tmpfolder, ignore_errors=True)
if __name__ == '__main__':
logger = logging.getLogger("nbt.tests.downloadsample")
if len(logger.handlers) == 0:
# Logging is not yet configured. Configure it.
logging.basicConfig(level=logging.INFO, stream=sys.stderr, format='%(levelname)-8s %(message)s')
success = install()
sys.exit(0 if success else 1)
``` |
{
"source": "jlsanders/genfunc",
"score": 2
} |
#### File: jlsanders/genfunc/toy_potentials.py
```python
import numpy as np
from scipy.optimize import fmin_bfgs, leastsq, fmin_powell,fmin
# in units kpc, km/s and 10^11 M_solar
Grav = 430091.5694
# Triaxial harmonic
def H_ho(x,omega):
""" Simple harmonic oscillator Hamiltonian = 0.5 * omega**2 * x**2"""
return 0.5*np.sum(x[3:]**2+(omega*x[:3])**2)
def angact_ho(x,omega):
""" Calculate angle and action variable in sho potential with
parameter omega """
action = (x[3:]**2+(omega*x[:3])**2)/(2.*omega)
angle = np.array([np.arctan(-x[3+i]/omega[i]/x[i]) if x[i]!=0. else -np.sign(x[3+i])*np.pi/2. for i in range(3)])
for i in range(3):
if(x[i]<0):
angle[i]+=np.pi
return np.concatenate((action,angle % (2.*np.pi)))
def deltaH_ho(omega,xsamples):
if(np.any(omega<1e-5)):
return np.nan
H = 0.5*np.sum(xsamples.T[3:]**2,axis=0)+0.5*np.sum((omega[:3]*xsamples.T[:3].T)**2,axis=1)
return H-np.mean(H)
def Jac_deltaH_ho(omega,xsamples):
dHdparams = omega[:3]*xsamples.T[:3].T**2
return dHdparams-np.mean(dHdparams,axis=0)
def findbestparams_ho(xsamples):
""" Minimize sum of square differences of H_sho-<H_sho> for timesamples """
return np.abs(leastsq(deltaH_ho,np.array([10.,10.,10.]), Dfun = Jac_deltaH_ho, args=(xsamples,))[0])[:3]
# Isochrone
def cart2spol(X):
""" Performs coordinate transformation from cartesian
to spherical polar coordinates with (r,phi,theta) having
usual meanings. """
x,y,z,vx,vy,vz=X
r=np.sqrt(x*x+y*y+z*z)
p=np.arctan2(y,x)
t=np.arccos(z/r)
vr=(vx*np.cos(p)+vy*np.sin(p))*np.sin(t)+np.cos(t)*vz
vp=-vx*np.sin(p)+vy*np.cos(p)
vt=(vx*np.cos(p)+vy*np.sin(p))*np.cos(t)-np.sin(t)*vz
return np.array([r,p,t,vr,vp,vt])
def H_iso(x,params):
""" Isochrone Hamiltonian = -GM/(b+sqrt(b**2+(r-r0)**2))"""
#r = (np.sqrt(np.sum(x[:3]**2))-params[2])**2
r = np.sum(x[:3]**2)
return 0.5*np.sum(x[3:]**2)-Grav*params[0]/(params[1]+np.sqrt(params[1]**2+r))
def angact_iso(x,params):
""" Calculate angle and action variable in isochrone potential with
parameters params = (M,b) """
GM = Grav*params[0]
E = H_iso(x,params)
r,p,t,vr,vphi,vt=cart2spol(x)
st=np.sin(t)
Lz=r*vphi*st
L=np.sqrt(r*r*vt*vt+Lz*Lz/st/st)
if(E>0.): # Unbound
return (np.nan,np.nan,np.nan,np.nan,np.nan,np.nan)
Jr=GM/np.sqrt(-2*E)-0.5*(L+np.sqrt(L*L+4*GM*params[1]))
action = np.array([Jr,Lz,L-abs(Lz)])
c=GM/(-2*E)-params[1]
e=np.sqrt(1-L*L*(1+params[1]/c)/GM/c)
eta=np.arctan2(r*vr/np.sqrt(-2.*E),params[1]+c-np.sqrt(params[1]**2+r*r))
OmR=np.power(-2*E,1.5)/GM
Omp=0.5*OmR*(1+L/np.sqrt(L*L+4*GM*params[1]))
thetar=eta-e*c*np.sin(eta)/(c+params[1])
if(abs(vt)>1e-10):
psi=np.arctan2(np.cos(t),-np.sin(t)*r*vt/L)
else:
psi=np.pi/2.
a=np.sqrt((1+e)/(1-e))
ap=np.sqrt((1+e+2*params[1]/c)/(1-e+2*params[1]/c))
F = lambda x,y: np.pi/2.-np.arctan(np.tan(np.pi/2.-0.5*y)/x) if y>np.pi/2. \
else -np.pi/2.+np.arctan(np.tan(np.pi/2.+0.5*y)/x) if y<-np.pi/2. \
else np.arctan(x*np.tan(0.5*y))
thetaz=psi+Omp*thetar/OmR-F(a,eta)-F(ap,eta)/np.sqrt(1+4*GM*params[1]/L/L)
LR=Lz/L
sinu = LR/np.sqrt(1.-LR**2)/np.tan(t)
u = 0
if(sinu>1.):
u=np.pi/2.
elif(sinu<-1.):
u = -np.pi/2.
else:
u = np.arcsin(sinu)
if(vt>0.):
u=np.pi-u
thetap=p-u+np.sign(Lz)*thetaz
angle = np.array([thetar,thetap,thetaz])
return np.concatenate((action,angle % (2.*np.pi)))
def deltaH_iso(params,p,r):
deltaH = p-Grav*params[0]/(params[1]+np.sqrt(params[1]**2+r))
if(params[0]<0. or params[1]<0. or np.any(deltaH>0.)):
return np.nan
return (deltaH-np.mean(deltaH))
# return JR-np.mean(JR)
def Jac_deltaH_iso(params,p,r):
H_o = -Grav/(params[1]+np.sqrt(params[1]**2+r))
H_1 = Grav*params[0]*(1.+params[1]/np.sqrt(params[1]**2+r))/(params[1]+np.sqrt(params[1]**2+r))**2
return np.array([(H_o-np.mean(H_o)),(H_1-np.mean(H_1))])
def findbestparams_iso(xsamples):
""" Minimize sum of square differences of H_iso-<H_iso> for timesamples"""
p = 0.5*np.sum(xsamples.T[3:]**2,axis=0)
r = np.sum(xsamples.T[:3]**2,axis=0)
return np.abs(leastsq(deltaH_iso,np.array([10.,10.]), Dfun = None , col_deriv=1,args=(p,r,))[0]) #Jac_deltaH_iso
``` |
{
"source": "JlsBssmnn/ALiPy",
"score": 3
} |
#### File: alipy/index/multi_label_tools.py
```python
import collections
import numpy as np
from ..utils.interface import BaseCollection
from ..utils.misc import check_matrix
__all__ = ['check_index_multilabel',
'infer_label_size_multilabel',
'flattern_multilabel_index',
'integrate_multilabel_index',
'get_labelmatrix_in_multilabel',
'get_Xy_in_multilabel',
]
def check_index_multilabel(index):
"""check if the given indexes are legal.
Parameters
----------
index: list or np.ndarray
index of the data.
"""
if isinstance(index, BaseCollection):
return index
if not isinstance(index, (list, np.ndarray)):
index = [index]
datatype = collections.Counter([type(i) for i in index])
if len(datatype) != 1:
raise TypeError("Different types found in the given indexes.")
if not datatype.popitem()[0] == tuple:
raise TypeError("Each index should be a tuple.")
return index
def infer_label_size_multilabel(index_arr, check_arr=True):
"""Infer the label size from a set of index arr.
raise if all index are example index only.
Parameters
----------
index_arr: list or np.ndarray
index array.
Returns
-------
label_size: int
the inferred label size.
"""
if check_arr:
index_arr = check_index_multilabel(index_arr)
data_len = np.array([len(i) for i in index_arr])
if np.any(data_len == 2):
label_size = np.max([i[1] for i in index_arr if len(i) == 2]) + 1
elif np.all(data_len == 1):
raise Exception(
"Label_size can not be induced from fully labeled set, label_size must be provided.")
else:
raise ValueError(
"All elements in indexes should be a tuple, with length = 1 (example_index, ) "
"to query all labels or length = 2 (example_index, [label_indexes]) to query specific labels.")
return label_size
def flattern_multilabel_index(index_arr, label_size=None, check_arr=True):
"""
Falt the multilabel_index to one-dimensional.
Parameters
----------
index_arr: list or np.ndarray
index array.
label_size: int
the inferred label size.
check_arr: bool
if True,check the index_arr is a legal multilabel_index.
Returns
-------
decomposed_data: list
the decomposed data after falting.
"""
if check_arr:
index_arr = check_index_multilabel(index_arr)
if label_size is None:
label_size = infer_label_size_multilabel(index_arr)
else:
assert (label_size > 0)
decomposed_data = []
for item in index_arr:
if len(item) == 1:
for i in range(label_size):
decomposed_data.append((item[0], i))
else:
if isinstance(item[1], collections.Iterable):
label_ind = [i for i in item[1] if 0 <= i < label_size]
else:
assert (0 <= item[1] < label_size)
label_ind = [item[1]]
for j in range(len(label_ind)):
decomposed_data.append((item[0], label_ind[j]))
return decomposed_data
def integrate_multilabel_index(index_arr, label_size=None, check_arr=True):
""" Integrated the indexes of multi-label.
Parameters
----------
index_arr: list or np.ndarray
multi-label index array.
label_size: int, optional (default = None)
the size of label set. If not provided, an inference is attempted.
raise if the inference is failed.
check_arr: bool, optional (default = True)
whether to check the validity of index array.
Returns
-------
array: list
the integrated array.
"""
if check_arr:
index_arr = check_index_multilabel(index_arr)
if label_size is None:
label_size = infer_label_size_multilabel(index_arr)
else:
assert (label_size > 0)
integrated_arr = []
integrated_dict = {}
for index in index_arr:
example_ind = index[0]
if len(index) == 1:
integrated_dict[example_ind] = set(range(label_size))
else:
# length = 2
if example_ind in integrated_dict.keys():
integrated_dict[example_ind].update(
set(index[1] if isinstance(index[1], collections.Iterable) else [index[1]]))
else:
integrated_dict[example_ind] = set(
index[1] if isinstance(index[1], collections.Iterable) else [index[1]])
for item in integrated_dict.items():
if len(item[1]) == label_size:
integrated_arr.append((item[0],))
else:
# -------------------------------------------------------------------------------------------
# integrated_arr.append((item[0], tuple(item[0])))
integrated_arr.append((item[0], tuple(item[1])))
return integrated_arr
def get_labelmatrix_in_multilabel(index, data_matrix, unknown_element=0):
"""get data matrix by giving index in multi-label setting.
Note:
Each index should be a tuple, with the first element representing instance index.
e.g.
queried_index = (1, [3,4]) # 1st instance, 3rd,4t _labels
queried_index = (1, [3]) # 1st instance, 3rd _labels
queried_index = (1, 3)
queried_index = (1, (3))
queried_index = (1, (3,4))
queried_index = (1, ) # query all _labels
Parameters
----------
index: {list, np.ndarray, tuple, MultiLabelIndexCollection}
if only one index, a tuple is expected.
Otherwise, it should be a list type with n tuples.
data_matrix: array-like
matrix with [n_samples, n_features] or [n_samples, n_classes].
unknown_element: object
value to fill up the unknown part of the matrix_clip.
Returns
-------
Matrix_clip: np.ndarray
data matrix given index
index_arr: list
index of examples correspond to the each row of Matrix_clip
"""
# check validity
index = check_index_multilabel(index)
data_matrix = check_matrix(data_matrix)
ins_bound = data_matrix.shape[0]
ele_bound = data_matrix.shape[1]
index_arr = [] # record if a row is already constructed
current_rows = 0 # record how many rows have been constructed
label_indexed = None
for k in index:
# k is a tuple with 2 elements
k_len = len(k)
if k_len != 1 and k_len != 2:
raise ValueError(
"A single index should only have 1 element (example_index, ) to query all _labels or"
"2 elements (example_index, [label_indexes]) to query specific _labels. But found %d in %s" %
(len(k), str(k)))
example_ind = k[0]
assert (example_ind < ins_bound)
if example_ind in index_arr:
ind_row = index_arr.index(example_ind)
else:
index_arr.append(example_ind)
ind_row = -1 # new row
current_rows += 1
if k_len == 1: # all _labels
label_ind = [i for i in range(ele_bound)]
else:
if isinstance(k[1], collections.Iterable):
label_ind = [i for i in k[1] if 0 <= i < ele_bound]
else:
assert (0 <= k[1] < ele_bound)
label_ind = [k[1]]
# construct mat
if ind_row == -1:
tmp = np.zeros((1, ele_bound)) + unknown_element
tmp[0, label_ind] = data_matrix[example_ind, label_ind]
if label_indexed is None:
label_indexed = tmp.copy()
else:
label_indexed = np.append(label_indexed, tmp, axis=0)
else:
label_indexed[ind_row, label_ind] = data_matrix[example_ind, label_ind]
return label_indexed, index_arr
def get_Xy_in_multilabel(index, X, y, unknown_element=0):
"""get data matrix by giving the indexes of known instance-label pairs in multi-label setting.
Note:
Each index should be a tuple, with the first element representing instance index.
e.g.
queried_index = (1, [3,4]) # 1st instance, 3rd,4th labels
queried_index = (1, [3]) # 1st instance, 3rd labels
queried_index = (1, 3)
queried_index = (1, (3))
queried_index = (1, (3,4))
queried_index = (1, ) # query all labels
Parameters
----------
index: {list, np.ndarray, tuple, MultiLabelIndexCollection}
The indexes of known instance-label pairs.
if only one index, a tuple is expected.
Otherwise, it should be a list type with n tuples or MultiLabelIndexCollection object.
X: array-like
array with [n_samples, n_features].
y: array-like
array with [n_samples, n_classes].
unknown_element: object
value to fill up the unknown part of the matrix_clip.
Returns
-------
X_clip: np.ndarray
Data matrix of the retrieved data.
y_clip: np.ndarray
Label matrix of the retrieved data.
ins_index: np.ndarray
Index of each retrieved data.
"""
# check validity
X = check_matrix(X)
if not len(X) == len(y):
raise ValueError("Different length of instances and labels found.")
label_matrix, ins_index = get_labelmatrix_in_multilabel(index, y, unknown_element=unknown_element)
return X[ins_index, :], label_matrix, ins_index
# np.unravel_index
# np.ravel_multi_index
```
#### File: alipy/query_strategy/noisy_oracles.py
```python
from __future__ import division
import collections
from abc import ABCMeta, abstractmethod
import copy
import numpy as np
import scipy.stats
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors
from .base import BaseNoisyOracleQuery
from .query_labels import QueryInstanceUncertainty
from .query_labels import _get_proba_pred
from ..oracle import Oracles, Oracle
__all__ = ['majority_vote',
'get_query_results',
'get_majority_vote',
'QueryNoisyOraclesCEAL',
'QueryNoisyOraclesIEthresh',
'QueryNoisyOraclesAll',
'QueryNoisyOraclesRandom',
]
def majority_vote(labels, weight=None):
"""Perform majority vote to determine the true label from
multiple noisy oracles.
Parameters
----------
labels: list
A list with length=k, which contains the labels provided by
k noisy oracles.
weight: list, optional (default=None)
The weights of each oracle. It should have the same length with
labels.
Returns
-------
vote_count: int
The number of votes.
vote_result: object
The label of the selected_instance, produced by majority voting
of the selected oracles.
"""
oracle_weight = np.ones(len(labels)) if weight is None else weight
assert len(labels) == len(oracle_weight)
vote_result = collections.Counter(labels)
most_votes = vote_result.most_common(n=1)
return most_votes[0][1], most_votes[0][0]
def get_query_results(selected_instance, oracles, names=None):
"""Get the query results from oracles of the selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
names: list, optional (default=None)
A list of str which contains the names of oracles to query from.
If not provided, it will query from all oracles.
Each name should in oracles.names().
Returns
-------
query_labels: list
The queried labels.
query_costs: list
The total cost of query.
"""
costs = []
if isinstance(oracles, list):
oracle_type = 'list'
for oracle in oracles:
assert isinstance(oracle, Oracle)
elif isinstance(oracles, Oracles):
oracle_type = 'oracles'
else:
raise TypeError("The type of parameter oracles must be a list or alipy.oracle.Oracles object.")
labeling_results = []
if oracle_type == 'list':
for i in oracles.names() if oracle_type == 'oracles' else range(len(oracles)):
lab, co = oracles[i].query_by_index(selected_instance)
labeling_results.append(lab[0])
costs.append(np.sum(co))
else:
results = oracles.query_from_s(selected_instance, oracles_name=names)
labeling_results = [res[0][0] for res in results]
costs = [np.sum(res[1]) for res in results]
return labeling_results, costs
def get_majority_vote(selected_instance, oracles, names=None):
"""Get the majority vote results of the selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
names: list, optional (default=None)
A list of str which contains the names of oracles to query from.
If not provided, it will query from all oracles.
Each name should in oracles.names().
Returns
-------
vote_count: int
The number of votes.
vote_result: object
The label of the selected_instance, produced by majority voting
of the selected oracles.
query_costs: int
The total cost of query.
"""
labeling_results, cost = get_query_results(selected_instance, oracles, names)
majority_vote_result = majority_vote(labeling_results)
return majority_vote_result[0], majority_vote_result[1], np.sum(cost)
class QueryNoisyOraclesCEAL(BaseNoisyOracleQuery):
"""Cost-Effective Active Learning from Diverse Labelers (CEAL) method assumes
that different oracles have different expertise. Even the very noisy oracle
may perform well on some kind of examples. The cost of a labeler is proportional
to its overall labeling quality and it is thus necessary to query from the right oracle
according to the selected instance.
This method will select an instance-labeler pair (x, a), and queries the label of x
from a, where the selection of both the instance and labeler is based on a
evaluation function Q(x, a).
The selection of instance is depend on its uncertainty. The selection of oracle is
depend on the oracle's performance on the nearest neighbors of selected instance.
The cost of each oracle is proportional to its overall labeling quality.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
initial_labeled_indexes: {list, np.ndarray, IndexCollection}
The indexes of initially labeled samples. Used for initializing the scores of each oracle.
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>. 2017.
Cost-Effective Active Learning from Diverse Labelers. In The
Proceedings of the 26th International Joint Conference
on Artificial Intelligence (IJCAI-17), 1879-1885.
"""
def __init__(self, X, y, oracles, initial_labeled_indexes):
super(QueryNoisyOraclesCEAL, self).__init__(X, y, oracles=oracles)
# ytype = type_of_target(self.y)
# if 'multilabel' in ytype:
# warnings.warn("This query strategy does not support multi-label.",
# category=FunctionWarning)
assert (isinstance(initial_labeled_indexes, collections.Iterable))
self._ini_ind = np.asarray(initial_labeled_indexes)
# construct a nearest neighbor object implemented by scikit-learn
self._nntree = NearestNeighbors(metric='euclidean')
self._nntree.fit(self.X[self._ini_ind])
def select(self, label_index, unlabel_index, eval_cost=False, model=None, **kwargs):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
model: object, optional (default=None)
Current classification model, should have the 'predict_proba' method for probabilistic output.
If not provided, LogisticRegression with default parameters implemented by sklearn will be used.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
Returns
-------
selected_instance: int
The index of selected instance.
selected_oracle: int or str
The index of selected oracle.
If a list is given, the index of oracle will be returned.
If a Oracles object is given, the oracle name will be returned.
"""
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
pred_unlab, _ = _get_proba_pred(self.X[unlabel_index], model)
n_neighbors = min(kwargs.pop('n_neighbors', 10), len(self._ini_ind) - 1)
return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab,
n_neighbors=n_neighbors, eval_cost=eval_cost)
def select_by_prediction_mat(self, label_index, unlabel_index, predict, **kwargs):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
Returns
-------
selected_instance: int
The index of selected instance.
selected_oracle: int or str
The index of selected oracle.
If a list is given, the index of oracle will be returned.
If a Oracles object is given, the oracle name will be returned.
"""
n_neighbors = min(kwargs.pop('n_neighbors', 10), len(self._ini_ind)-1)
eval_cost = kwargs.pop('n_neighbors', False)
Q_table, oracle_ind_name_dict = self._calc_Q_table(label_index, unlabel_index, self._oracles, predict,
n_neighbors=n_neighbors, eval_cost=eval_cost)
# get the instance-oracle pair
selected_pair = np.unravel_index(np.argmax(Q_table, axis=None), Q_table.shape)
sel_ora = oracle_ind_name_dict[selected_pair[0]]
if not isinstance(sel_ora, list):
sel_ora = [sel_ora]
return [unlabel_index[selected_pair[1]]], sel_ora
def _calc_Q_table(self, label_index, unlabel_index, oracles, pred_unlab, n_neighbors=10, eval_cost=False):
"""Query from oracles. Return the Q table and the oracle name/index of each row of Q_table.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
Returns
-------
Q_table: 2D array
The Q table.
oracle_ind_name_dict: dict
The oracle name/index of each row of Q_table.
"""
# Check parameter and initialize variables
if self.X is None or self.y is None:
raise Exception('Data matrix is not provided, use select_by_prediction_mat() instead.')
assert (isinstance(unlabel_index, collections.Iterable))
assert (isinstance(label_index, collections.Iterable))
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
num_of_neighbors = n_neighbors
if len(unlabel_index) <= 1:
return unlabel_index
Q_table = np.zeros((len(oracles), len(unlabel_index))) # row:oracle, col:ins
spv = np.shape(pred_unlab)
# calc least_confident
rx = np.partition(pred_unlab, spv[1] - 1, axis=1)
rx = 1 - rx[:, spv[1] - 1]
for unlab_ind, unlab_ins_ind in enumerate(unlabel_index):
# evaluate oracles for each instance
nn_dist, nn_of_selected_ins = self._nntree.kneighbors(X=self.X[unlab_ins_ind].reshape(1, -1),
n_neighbors=num_of_neighbors,
return_distance=True)
nn_dist = nn_dist[0]
nn_of_selected_ins = nn_of_selected_ins[0]
nn_of_selected_ins = self._ini_ind[nn_of_selected_ins] # map to the original population
oracles_score = []
for ora_ind, ora_name in enumerate(self._oracles_iterset):
# calc q_i(x), expertise of this instance
oracle = oracles[ora_name]
labels, cost = oracle.query_by_index(nn_of_selected_ins)
oracles_score.append(sum([nn_dist[i] * (labels[i] == self.y[nn_of_selected_ins[i]]) for i in
range(num_of_neighbors)]) / num_of_neighbors)
# calc c_i, cost of each labeler
labels, cost = oracle.query_by_index(label_index)
if eval_cost:
oracles_cost = sum([labels[i] == self.y[label_index[i]] for i in range(len(label_index))]) / len(label_index)
else:
oracles_cost = cost[0]
Q_table[ora_ind, unlab_ind] = oracles_score[ora_ind] * rx[unlab_ind] / max(oracles_cost, 0.0001)
return Q_table, self._oracle_ind_name_dict
class QueryNoisyOraclesSelectInstanceUncertainty(BaseNoisyOracleQuery, metaclass=ABCMeta):
"""This class implement select and select_by_prediction_mat by uncertainty."""
def __init__(self, X=None, y=None, oracles=None):
super(QueryNoisyOraclesSelectInstanceUncertainty, self).__init__(X=X, y=y, oracles=oracles)
def select(self, label_index, unlabel_index, model=None, **kwargs):
"""Select an instance and a batch of oracles to label it.
The instance is selected by uncertainty, the oracles is
selected by the difference between their
labeling results and the majority vote results.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
Returns
-------
selected_instance: int
The index of selected instance. Selected by uncertainty.
selected_oracles: list
The selected oracles for querying.
"""
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
pred_unlab, _ = _get_proba_pred(self.X[unlabel_index], model)
return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab)
def select_by_prediction_mat(self, label_index, unlabel_index, predict):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
Returns
-------
selected_instance: int
The index of selected instance. Selected by uncertainty.
selected_oracles: list
The selected oracles for querying.
"""
# Check parameter and initialize variables
assert (isinstance(unlabel_index, collections.Iterable))
assert (isinstance(label_index, collections.Iterable))
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
if len(unlabel_index) <= 1:
return unlabel_index
# select instance and oracle
unc = QueryInstanceUncertainty(measure='least_confident')
selected_instance = unc.select_by_prediction_mat(unlabel_index=unlabel_index, predict=predict, batch_size=1)[0]
return [selected_instance], self.select_by_given_instance(selected_instance)
@abstractmethod
def select_by_given_instance(self, selected_instance):
pass
class QueryNoisyOraclesIEthresh(QueryNoisyOraclesSelectInstanceUncertainty):
"""IEthresh will select a batch of oracles to label the selected instance.
It will score for each oracle according to the difference between their
labeling results and the majority vote results.
At each iteration, a batch of oracles whose scores are larger than a threshold will be selected.
Oracle with a higher score is more likely to be selected.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
initial_labeled_indexes: {list, np.ndarray, IndexCollection}
The indexes of initially labeled samples. Used for initializing the scores of each oracle.
epsilon: float, optional (default=0.1)
The value to determine how many oracles will be selected.
S_t = {a|UI(a) >= epsilon * max UI(a)}
References
----------
[1] <NAME> , <NAME> , <NAME> . Efficiently learning the accuracy of labeling
sources for selective sampling.[C] ACM SIGKDD International Conference on
Knowledge Discovery & Data Mining. ACM, 2009.
"""
def __init__(self, X, y, oracles, initial_labeled_indexes, **kwargs):
super(QueryNoisyOraclesIEthresh, self).__init__(X, y, oracles=oracles)
self._ini_ind = np.asarray(initial_labeled_indexes)
# record the labeling history of each oracle
self._oracles_history = dict()
for i in range(len(self._oracles_iterset)):
self._oracles_history[i] = dict()
# record the results of majority vote
self._majority_vote_results = dict()
# calc initial QI(a) for each oracle a
self._UI = np.ones(len(self._oracles_iterset))
self.epsilon = kwargs.pop('epsilon', 0.8)
def _calc_uia(self, oracle_history, majority_vote_result, alpha=0.05):
"""Calculate the UI(a) by providing the labeling history and the majority vote results.
Parameters
----------
oracle_history: dict
The labeling history of an oracle. The key is the index of instance, the value is the
label given by the oracle.
majority_vote_result: dict
The results of majority vote of instances. The key is the index of instance,
the value is the label given by the oracle.
alpha: float, optional (default=0.05)
Used for calculating the critical value for the Student’s t-distribution with n−1
degrees of freedom at the alpha/2 confidence level.
Returns
-------
uia: float
The UI(a) value.
"""
n = len(self._oracles_iterset)
t_crit_val = scipy.stats.t.isf([alpha / 2], n - 1)[0]
reward_arr = []
for ind in oracle_history.keys():
if oracle_history[ind] == majority_vote_result[ind]:
reward_arr.append(1)
else:
reward_arr.append(0)
mean_a = np.mean(reward_arr)
std_a = np.std(reward_arr)
uia = mean_a + t_crit_val * std_a / np.sqrt(n)
return uia
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
selected_oracles: list
The selected oracles for querying.
"""
selected_oracles = np.nonzero(self._UI >= self.epsilon * np.max(self._UI))
selected_oracles = selected_oracles[0]
# update UI(a) for each selected oracle
labeling_results = []
for i in selected_oracles:
lab, _ = self._oracles[self._oracle_ind_name_dict[i]].query_by_index(selected_instance)
labeling_results.append(lab[0])
self._oracles_history[i][selected_instance] = copy.copy(lab[0])
_, majority_vote_result = majority_vote(labeling_results)
reward_arr = np.zeros(len(selected_oracles))
same_ind = np.nonzero(labeling_results == majority_vote_result)[0]
reward_arr[same_ind] = 1
self._majority_vote_results[selected_instance] = majority_vote_result
for i in selected_oracles:
self._UI[i] = self._calc_uia(self._oracles_history[i], self._majority_vote_results)
# return results
return [self._oracle_ind_name_dict[i] for i in selected_oracles]
class QueryNoisyOraclesAll(QueryNoisyOraclesSelectInstanceUncertainty):
"""This strategy will select instance by uncertainty and query from all
oracles and return the majority vote result.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
"""
def __init__(self, oracles, X=None, y=None):
super(QueryNoisyOraclesAll, self).__init__(X=X, y=y, oracles=oracles)
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
oracles_ind: list
The indexes of selected oracles.
"""
return self._oracle_ind_name_dict.values()
class QueryNoisyOraclesRandom(QueryNoisyOraclesSelectInstanceUncertainty):
"""Select a random oracle to query."""
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
oracles_ind: list
The indexes of selected oracles.
"""
return [self._oracle_ind_name_dict[np.random.randint(0, len(self._oracles), 1)[0]]]
```
#### File: examples/AL_settings/cost_sensitive.py
```python
import numpy as np
import copy
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from alipy import ToolBox
from alipy.index.multi_label_tools import get_Xy_in_multilabel
from alipy.query_strategy.cost_sensitive import QueryCostSensitiveHALC, QueryCostSensitivePerformance, QueryCostSensitiveRandom
from alipy.query_strategy.cost_sensitive import hierarchical_multilabel_mark
# the num of classes of the classification problem
NUM_CLASS = 5
NUM_SAMPLES = 2000
X, y = make_multilabel_classification(n_samples=NUM_SAMPLES, n_features=20, n_classes=NUM_CLASS,
n_labels=3, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None)
y[y == 0] = -1
# the cost of each class
cost = [1, 3, 3, 7, 10]
# if node_i is the parent of node_j , then label_tree(i,j)=1 else 0
label_tree = np.zeros((5,5),dtype=np.int)
label_tree[0, 1] = 1
label_tree[0, 2] = 1
label_tree[1, 3] = 1
label_tree[2, 4] = 1
alibox = ToolBox(X=X, y=y, query_type='PartLabels')
# Split data
alibox.split_AL(test_ratio=0.3, initial_label_rate=0.1, split_count=10, all_class=True)
# train one model for each label on dataset
# the base model using SVC in sklearn
models = []
for __ in range(NUM_CLASS):
models.append(SVC(decision_function_shape='ovr', gamma='auto'))
# The budget of query
budget = 40
# The cost budget is 500
stopping_criterion = alibox.get_stopping_criterion('cost_limit', 500)
performance_result = []
halc_result = []
random_result = []
def main_loop(alibox, strategy, round):
# Get the data split of one fold experiment
train_idx, test_idx, label_ind, unlab_ind = alibox.get_split(round)
# Get intermediate results saver for one fold experiment
saver = alibox.get_stateio(round)
# initalizing the models
train_traget = label_ind.get_matrix_mask((NUM_SAMPLES, NUM_CLASS), sparse=False)
for j in np.arange(NUM_CLASS):
j_target = train_traget[:, j]
i_samples = np.where(j_target!=0)[0]
m = models[j]
m.fit(X[i_samples, :], y[i_samples, j])
while not stopping_criterion.is_stop():
# Select a subset of Uind according to the query strategy
select_ind = strategy.select(label_ind, unlab_ind, cost=cost, budget=budget, models=models)
select_ind = hierarchical_multilabel_mark(select_ind, label_ind, label_tree, y)
label_ind.update(select_ind)
unlab_ind.difference_update(select_ind)
# Update model and calc performance according to the model you are using
train_traget = label_ind.get_matrix_mask((NUM_SAMPLES, NUM_CLASS), sparse=False)
for j in np.arange(NUM_CLASS):
j_target = train_traget[:, j]
i_samples = np.where(j_target!=0)[0]
m = models[j]
m.fit(X[i_samples, :], y[i_samples, j])
pred = None
for j in np.arange(NUM_CLASS):
model = models[j]
pred_j = model.predict(X[test_idx])
if pred is None:
pred = pred_j.reshape((len(test_idx), 1))
else:
pred = np.hstack((pred, pred_j.reshape((len(test_idx), 1))))
performance = alibox.calc_performance_metric(y_true=y[test_idx], y_pred=pred, performance_metric='hamming_loss')
# Save intermediate results to file
st = alibox.State(select_index=select_ind.index, performance=performance, cost=budget)
saver.add_state(st)
# Passing the current progress to stopping criterion object
stopping_criterion.update_information(saver)
# Reset the progress in stopping criterion object
stopping_criterion.reset()
return saver
for round in range(5):
train_idx, test_idx, label_ind, unlab_ind = alibox.get_split(round)
# Use pre-defined strategy
random = QueryCostSensitiveRandom(X,y)
perf = QueryCostSensitivePerformance(X, y)
halc = QueryCostSensitiveHALC(X, y,label_tree=label_tree)
random_result.append(copy.deepcopy(main_loop(alibox, random, round)))
performance_result.append(copy.deepcopy(main_loop(alibox, perf, round)))
halc_result.append(copy.deepcopy(main_loop(alibox, halc, round)))
analyser = alibox.get_experiment_analyser(x_axis='cost')
analyser.add_method(method_name='random', method_results=random_result)
analyser.add_method(method_name='performance', method_results=performance_result)
analyser.add_method(method_name='HALC', method_results=halc_result)
print(analyser)
analyser.plot_learning_curves(title='Example of cost-sensitive', std_area=False)
```
#### File: examples/tools/aceThreading_usage.py
```python
from sklearn.datasets import load_iris
from alipy.data_manipulate import split
from alipy.utils.multi_thread import aceThreading
# Get the data
X, y = load_iris(return_X_y=True)
# Split the data
train, test, lab, unlab = split(X=X, y=y, test_ratio=0.3, initial_label_rate=0.05,
split_count=10)
# init the aceThreading
acethread = aceThreading(examples=X, labels=y,
train_idx=train, test_idx=test,
label_index=lab, unlabel_index=unlab,
max_thread=None, refresh_interval=1, saving_path='.')
# You can also use a ToolBox object to initialize an aceThreading() object without passing redundant parameters.
# # initializing a ToolBox object first here.
# acethread = alibox.get_ace_threading(target_function=target_func)
# acethread.start_all_threads()
from sklearn import linear_model
from alipy.experiment import State
from alipy.query_strategy import QueryInstanceQBC
# define the custom function
# Specifically, the parameters of the custom function must be:
# (round, train_id, test_id, Ucollection, Lcollection, saver, examples, labels, global_parameters)
def target_func(round, train_id, test_id, Lcollection, Ucollection, saver, examples, labels, global_parameters):
# your query strategy
qs = QueryInstanceQBC(examples, labels, disagreement='vote_entropy')
# your model
reg = linear_model.LogisticRegression(solver='liblinear')
reg.fit(X=examples[Lcollection.index, :], y=labels[Lcollection.index])
# stopping criterion
while len(Ucollection) > 30:
select_index = qs.select(Lcollection, Ucollection, reg, n_jobs=1)
Ucollection.difference_update(select_index)
Lcollection.update(select_index)
# update model
reg.fit(X=examples[Lcollection.index, :], y=labels[Lcollection.index])
pred = reg.predict(examples[test_id, :])
accuracy = sum(pred == labels[test_id]) / len(test_id)
# save intermediate results
st = State(select_index=select_index, performance=accuracy)
saver.add_state(st)
saver.save()
# set the target function
acethread.set_target_function(target_func)
# start the all threads
acethread.start_all_threads(global_parameters=None)
# get the result,return list of stateIO
stateIO_list = acethread.get_results()
# save the state of multi_thread to the saving_path in pkl form
acethread.save()
# or Recover the multi_thread_state from path.
recover_acethread = aceThreading.recover("./multi_thread_state.pkl")
```
#### File: ALiPy/test/test_repository.py
```python
from __future__ import division
import numpy as np
import pytest
from alipy.oracle.knowledge_repository import ElementRepository, MatrixRepository
from alipy.utils.ace_warnings import *
# initialize
X = np.array(range(100)) # 100 instances in total with 2 features
X = np.tile(X, (2, 1))
X = X.T
# print(X)
y = np.array([0] * 50 + [1] * 50) # 0 for first 50, 1 for the others.
# print(y)
label_ind = [11, 32, 0, 6, 74]
ele_exa = ElementRepository(labels=y[label_ind], indexes=label_ind, examples=X[label_ind])
ele = ElementRepository(labels=y[label_ind], indexes=label_ind)
def test_ele_raise_no_example():
with pytest.warns(ValidityWarning, match=r'.*is not in the repository.*'):
ele.discard(index=7)
with pytest.raises(ValueError, match=r'Different length of parameters found.*'):
ele.update_query(labels=[1], indexes=[10, 9])
with pytest.warns(ValidityWarning, match=r'.*is not in the repository.*'):
ele.retrieve_by_indexes(indexes=7)
with pytest.raises(Exception, match=r'This repository do not have the instance information.*'):
ele.retrieve_by_examples(examples=[4,4])
def test_ele_raise_example():
with pytest.raises(Exception, match=r'This repository has the instance information.*'):
ele_exa.update_query(labels=[1], indexes=[9])
with pytest.warns(ValidityWarning, match=r'.*is not in the repository.*'):
ele_exa.discard(index=7)
with pytest.raises(ValueError, match=r'Different length of parameters found.*'):
ele_exa.update_query(labels=[1], indexes=[10, 9])
with pytest.warns(ValidityWarning, match=r'.*is not in the repository.*'):
ele_exa.retrieve_by_indexes(indexes=7)
with pytest.warns(ValidityWarning, match=r'Example for retrieving is not in the repository.*'):
ele_exa.retrieve_by_examples(examples=[4,4])
def test_ele_basic_no_example():
ele.add(select_index=1, label=0)
assert (1 in ele)
ele.update_query(labels=[1], indexes=[60])
ele.update_query(labels=[1], indexes=61)
assert (60 in ele)
assert (61 in ele)
ele.update_query(labels=[1, 1], indexes=[63, 64])
assert (63 in ele)
assert (64 in ele)
ele.discard(index=61)
assert (61 not in ele)
_, a = ele.retrieve_by_indexes(60)
assert (a == 1)
_, b = ele.retrieve_by_indexes([63, 64])
assert (np.all(b == [1, 1]))
print(ele.get_training_data())
print(ele.full_history())
"""
(array([], dtype=float64), array([0, 0, 0, 0, 1, 0, 1, 1, 1]), array([11, 32, 0, 6, 74, 1, 60, 63, 64]))
+----------------+----------------+----------------------+---------------------+
| 0 | 1 | 2 | in all |
+----------------+----------------+----------------------+---------------------+
| query_index:60 | query_index:61 | query_index:[63, 64] | number_of_queries:3 |
| response:1 | response:1 | response:[1, 1] | cost:0 |
| cost:None | cost:None | cost:None | |
+----------------+----------------+----------------------+---------------------+
"""
def test_ele_basic_example():
ele_exa.add(select_index=1, label=0, example=X[1])
assert 1 in ele_exa
exa,lab = ele_exa.retrieve_by_indexes(1)
assert np.all(exa == [1, 1])
assert lab == [0]
exa, lab = ele_exa.retrieve_by_examples(examples=[1,1])
assert np.all(exa == [1, 1])
assert lab == [0]
#################################
# Test MatrixRepository
#################################
mr = MatrixRepository(labels=y[label_ind], indexes=label_ind, examples=X[label_ind])
mr2 = MatrixRepository(labels=y[label_ind], indexes=label_ind, examples=X[label_ind])
def test_mat_raise_example():
with pytest.raises(ValueError, match=r'Different length of the given parameters found.*'):
mr3 = MatrixRepository(labels=y[label_ind], indexes=label_ind, examples=X[label_ind[0:3]])
with pytest.raises(Exception, match=r'This repository has the instance information.*'):
mr2.update_query(labels=[1], indexes=[9])
with pytest.warns(ValidityWarning, match=r'.*is not in the repository.*'):
mr2.discard(index=7)
with pytest.raises(ValueError, match=r'Different length of parameters found.*'):
mr2.update_query(labels=[1], indexes=[10, 9])
with pytest.warns(ValidityWarning, match=r'.*is not in the repository.*'):
mr2.retrieve_by_indexes(indexes=7)
with pytest.warns(ValidityWarning, match=r'.*or retrieving is not in the repository.*'):
mr2.retrieve_by_examples(examples=[4,4])
def test_mat_basic_example():
mr.add(select_index=1, label=0, example=[1, 1])
assert (1 in mr)
mr.update_query(labels=[1], indexes=[60], examples=[[60,60]])
mr.update_query(labels=[1], indexes=61, examples=[[61, 61]])
assert (60 in mr)
assert (61 in mr)
mr.update_query(labels=[1, 1], indexes=[63, 64], examples=X[[63,64]])
assert (63 in mr)
assert (64 in mr)
mr.discard(index=61)
assert (61 not in mr)
_, a = mr.retrieve_by_indexes(60)
assert (a == 1)
_, b = mr.retrieve_by_indexes([63, 64])
assert (np.all(b == [1, 1]))
print(mr.get_training_data())
print(mr.full_history())
exa,lab = mr.retrieve_by_indexes(1)
assert np.all(exa == [1, 1])
assert lab == [0]
exa, lab = mr.retrieve_by_examples(examples=[1,1])
assert np.all(exa == [1, 1])
assert lab == [0]
cost1 = ElementRepository(labels=y[label_ind], indexes=label_ind, examples=X[label_ind])
cost2 = MatrixRepository(labels=y[label_ind], indexes=label_ind, examples=X[label_ind])
def test_cost():
cost1.add(select_index=2, label=0, example=[2,2], cost=1)
cost2.add(select_index=2, label=0, example=[2,2], cost=1)
cost1.discard(index=2)
cost1.update_query(labels=[1, 1], indexes=[63, 64], examples=X[[63, 64]], cost=[1, 1])
cost2.update_query(labels=[1, 1], indexes=[63, 64], examples=X[[63,64]], cost=[1,1])
# if __name__ == '__main__':
# test_mat_basic_example()
``` |
{
"source": "jlsche/data.taipei.tagConceptionize",
"score": 3
} |
#### File: jlsche/data.taipei.tagConceptionize/dataset_getter.py
```python
import json
import codecs
import pandas as pd
import sys
from collections import defaultdict
df = pd.read_csv('./input.csv', encoding='big5')
df = df.drop_duplicates(subset='fieldDescription', take_last=True)
df = df[(df['category']==u'求學及進修') | (df['category']==u'交通及通訊') | (df['category']==u'生活安全及品質') | (df['category']==u'就醫') | (df['category']==u'休閒旅遊')]
def matchAttri(tag_defs):
#counter = 0
for idx, row in df.iterrows(): # iterate all datasets
try:
field_description = row['fieldDescription'].encode('big5')
field_description = field_description.replace(', ', '`')
field_description = field_description.replace(',', '`')
field_description = field_description.replace('\r', '')
field_description = field_description.replace('\n', '')
field_description = field_description.replace(',', '`')
field_description = field_description.replace('、 ', '`')
field_description = field_description.replace('、', '`')
field_description = field_description.replace(' ', '`')
field_description = field_description.replace(' ', '`')
except:
print 'error when encode row to big5'
split_result = field_description.split('`')
for field in split_result: # iterate all attribute in a dataset
for tag in tag_defs:
# tag is now unicode, encode('utf8')
# field is now string encode('utf8') or decode('big5')
try:
if tag in field.decode('big5'):
#counter += 1
f.write(row['title'])
f.write(',')
f.write(row['data_link'])
f.write(',')
f.write(field.decode('big5'))
f.write('\n')
except:
print 'error'
#print counter, 'attributes matched'
##########################################
geo_def_list = [u'緯度',u'經度',u'經緯度',u'地址',u'區域',u'座標',u'位置','lat','latitude','Lat','Latitude','lng','longitude','Lng','Longitude']
price_def_list = [u'價錢',u'權利金',u'利息',u'費用',u'收入']
##########################################
f_r = codecs.open('./wish_list', 'r', encoding='utf8')
input_str = f_r.read()
wish_list = [ele for ele in input_str.split(',')]
for ele in wish_list:
print ele.encode('utf8'),
print '\n'
#'''
f = codecs.open('./tagMatchingResult.csv', 'w', encoding='utf8')
f.write('dataset_title,dataset_link,attri_matched\n')
matchAttri(wish_list)
f.close()
temp_df = pd.read_csv('./tagMatchingResult.csv')
temp_df = temp_df.drop_duplicates()
temp_df.to_csv('./tagMatchingResult.csv',index=False)
#'''
``` |
{
"source": "JLSchoolWork/12SDD-FinanceHelper",
"score": 4
} |
#### File: 12SDD-FinanceHelper/Python/compound.py
```python
import cmd
import math
import random
class InterestCalculator(cmd.Cmd):
# MARK Global Variables
validArgFlags = ["i", "p", "r", "n", "c"]
# MARK Convenience Functions
def arrayContains(self, array, item):
"""
Check to see if an item exists in an array.
:param array: Any list of values
:param item: A value to check if exists in array.
:return: Boolean, if exists: True, else False
"""
try:
array.index(item)
return True
except:
return False
# MARK Print Functions
def printNumberOfPeriods(self, argDict):
"""
Reads the number of periods from the dictionary and prints it.
:param argDict: {"flag":value}
"""
print "Number of Periods: " + str(argDict["n"])
def printInterestRate(self, argDict):
"""
Reads the interest rate from the dictionary and prints it.
:param argDict: {"flag":value}
"""
print "Interest Rate: " + str(argDict["r"]) + "%"
def printAmountLoaned(self, argDict):
"""
Reads the amount loaned (principle) from the dictionary and prints it.
:param argDict: {"flag":value}
"""
print "Amount Loaned: $" + str(argDict["p"])
def printAmountOwed(self, argDict):
"""
Reads the amount owed from the dictionary and prints it.
:param argDict: {"flag":value}
"""
print "Amount Owed: $" + str(round(argDict["i"], 2))
def printAllCompound(self, argDict):
"""
Calls all of the print functions to print all 4 pro-numerals.
This is just a convenience method.
:param argDict: {"flag":value}
"""
self.printAmountLoaned(argDict)
self.printInterestRate(argDict)
self.printNumberOfPeriods(argDict)
self.printAmountOwed(argDict)
def printLine(self):
"""
Prints a line with 50 -s.
Used for visually separating lines of prints.
"""
line = ""
for _ in range(0, 50):
line += "-"
print line
# MARK Calculations
def getRate(self, argDict):
"""
Corrects the % inputted by the user to a number usable by the formulas.
:rtype: float
:param argDict: {"flag":value}
:return: The rate to be used in formulas.
"""
return (argDict["r"] / 100) + 1
def calculateInterestRate(self, argDict):
"""
Calculates the interest rate by reading the other 3 values from the dictionary.
:rtype: float
:param argDict: {"flag":value}
:return: The interest rate as a float
"""
amountBorrowed = argDict["p"]
amountOwed = argDict["i"]
numPeriods = math.floor(argDict["n"])
return ((math.e ** (math.log(amountOwed / amountBorrowed) / numPeriods)) - 1) * 100
def calculateNumberOfPeriods(self, argDict):
"""
Calculates the interest rate by reading the other 3 values from the dictionary.
:rtype: float
:param argDict: {"flag":value}
:return: The interest rate as a float
"""
amountBorrowed = argDict["p"]
rate = self.getRate(argDict)
amountOwed = argDict["i"]
return math.ceil(math.log(amountOwed / amountBorrowed) / math.log(
rate)) # Round up to the nearest whole as cannot have half periods
def calculateAmountBorrowed(self, argDict):
"""
Calculates the interest rate by reading the other 3 values from the dictionary.
:rtype: float
:param argDict: {"flag":value}
:return: The interest rate as a float
"""
amountOwed = argDict["i"]
rate = self.getRate(argDict)
numPeriods = math.floor(argDict["n"])
return amountOwed * (rate ** -numPeriods)
def calculateAmountOwed(self, argDict):
"""
Calculates the interest rate by reading the other 3 values from the dictionary.
:rtype: float
:param argDict: {"flag":value}
:return: The interest rate as a float
"""
amountBorrowed = argDict["p"]
rate = self.getRate(argDict)
numPeriods = math.floor(argDict["n"])
return amountBorrowed * (rate ** numPeriods)
# MARK Parsing Input
def adjustDateFormat(self, value, currentFormat, targetFormat):
"""
Converts a given time unit to another one, and rounds down. i.e. 7 days = 1 week
:rtype: float
:param value: The number of units to be converted to another unit.
:param currentFormat: The unit of the value being passed in. i.e. D, W, M
:param targetFormat: The date unit which we want the value to be changed to. i.e. D, W, M
:return: A floored adjusted date, ie 8 days -> 1 week, as compounding only occurs every full number.
"""
adjustmentValues = {
"s": 60.0,
"m": 60.0,
"h": 24.0,
"D": 7.0,
"W": 2.0,
"F": 30.0 / 14.0,
"M": 3.0,
"Q": 4.0,
"Y": 1.0,
}
keys = "<KEY>".split(" ")
currentIndex = keys.index(currentFormat)
targetIndex = keys.index(targetFormat)
adjustedValue = float(value)
if currentIndex < targetIndex:
for i in range(currentIndex, targetIndex):
adjustedValue /= adjustmentValues[keys[i]]
elif currentIndex > targetIndex:
for i in range(currentIndex, targetIndex, -1):
adjustedValue *= adjustmentValues[keys[i - 1]]
return math.floor(adjustedValue)
def argDictFromInput(self, input):
"""
Parses the argument string from the command to a dictionary and also checks for any abnormalities.
Also converts the time units if need be.
:rtype: Dictionary
:param input: The string argument from the command.
:return: {"flag":value}
"""
args = input.replace(" ", "").split("-")
# A coathanger fix to stitch back together arguments that have a hyphen which is meant to be a negative sign
for i in range(0, len(args)):
try:
int(args[i][0])
args[i - 1] = args[i - 1] + "-" + args[i]
del args[i]
except:
pass
dict = {}
shouldAdjust = None
adjustTo = None
for arg in args:
if arg != "":
try:
if arg[0] == "n":
try:
dict[arg[0]] = float(arg[1:])
except:
dict[arg[0]] = float(arg[1:-1])
shouldAdjust = arg[-1:]
elif arg[0] == "c":
adjustTo = arg[1:]
elif self.arrayContains(self.validArgFlags, arg[0]):
dict[arg[0]] = float(arg[1:])
else:
print "Invalid Argument Flag: " + arg[0]
return None
except:
print arg[1:] + " is not a valid number."
return None
# If the number of arguments is not right
if len(dict) != 3:
print "Invalid amount of arguments."
return None
# Check for any negative argument values.
for key, value in dict.iteritems():
if key != "c":
if value < 0:
print "Cannot have value for argument " + key + " be less than 0."
return None
# If amount owed is less that principle
if self.isOwedgtPrinciple(dict):
print "Principle cannot be greater than amount owed."
return None
# Adjusting the time format if required
if shouldAdjust != None and adjustTo != None:
try:
dict["n"] = self.adjustDateFormat(dict["n"], shouldAdjust, adjustTo)
except:
print "Invalid time flag, refer to 'help' for all of the valid time flags."
return None
elif shouldAdjust != None and adjustTo == None:
pass # No need to adjust, assuming the user was being verbose
elif shouldAdjust == None and adjustTo != None:
print "Period time format not specified. Add either a d,m,q,y after the number to specify."
return None
return dict
def isOwedgtPrinciple(self, argDict):
"""
Just moving the kind of ugly try, except block out of an
:param argDict: {"flag":value}
:return: Boolean
"""
try:
return argDict["p"] > argDict["i"]
except:
return False
# MARK Commands
def do_compound(self, line):
"""
Called when the command 'compound' is entered into the terminal.
Calls other functions to parse the input, then finds which parameter of the 4 is missing and calculates it.
Prints all the parameters and the calculated value.
:param line: The argument string following the command.
"""
argDict = self.argDictFromInput(line)
if argDict == None:
print "Invalid Input"
return
if not argDict.has_key("p"):
print "Calculating Principle..."
argDict["p"] = self.calculateAmountBorrowed(argDict)
if not argDict.has_key("r"):
print "Calculating Rate..."
argDict["r"] = self.calculateInterestRate(argDict)
if not argDict.has_key("n"):
print "Calculating Number of Periods..."
argDict["n"] = self.calculateNumberOfPeriods(argDict)
if not argDict.has_key("i"):
print "Caclulating Amount Owed..."
argDict["i"] = self.calculateAmountOwed(argDict)
self.printAllCompound(argDict)
def do_test(self, arg):
"""
Called when the command 'test' is typed into the terminal.
It will generate a random set of numbers and run all of the calculations ofn them and will print them.
:param arg: The string of arguments after the command. Does not do anything in this function.
"""
rate = float(random.randrange(1, 100, 1)) / 10
numPeriods = random.randrange(1, 40, 1)
principle = float(random.randrange(1000, 100000, 1)) / 100
argDict = {
"r": rate,
"n": numPeriods,
"p": principle
}
owed = self.calculateAmountOwed(argDict)
print "Starting Test...."
self.printLine()
print "Preset Rate: " + str(rate)
print "Preset Periods: " + str(numPeriods)
print "Preset Principle: " + str(principle)
print "Preset Owed: " + str(round(owed, 2))
command = "p " + str(principle) + " -r " + str(rate) + " -n " + str(numPeriods) + " -i " + str(owed)
for i in range(0, 4):
parts = command.split("-")
parts.remove(parts[i])
newCommand = ""
for part in parts:
newCommand += "-" + part
self.printLine()
print "compound " + newCommand
self.do_compound(newCommand)
self.printLine()
print "Now check to see if all of the calculated numbers match or are extremely close to the original."
def do_help(self, arg):
"""
Called when the command 'help' is typed into the terminal.
:param arg: The string of arguments after the command. Does not do anything in this function.
"""
print "compound -i <float> -p <float> -r <float> -n <float><s,m,h,D,W,F,M,Q,Y> -f <s,m,h,D,W,F,M,Q,Y>"
print "-i : How much owed at the end."
print "-p : The principle amount loaned."
print "-r : The interest rate, in %."
print "-n : The amount of time."
print "-c : How often the interest is compounded"
print " s : Seconds"
print " m : Minutes"
print " h : Hours"
print " D : Days"
print " W : Weeks"
print " F : Fortnights"
print " M : Months"
print " Q : Quarters"
print " Y : Years"
print "Enter 3 of the first 4 flags to find the value of the missing one. -f is optional, if it is not used, this will use the amount of time as the number of periods."
print "Type quit to exit the program."
def do_quit(self, args):
"""
Register the command 'quit' so that it can terminate the command line process.
:param args: The string of arguments after the command. Does not do anything in this function.
:return: True to stop the command line app
"""
return True
def emptyline(self):
"""
Overriden to stop the default action of repeating the previous command if there is an empty line.
"""
pass
def do_EOF(self, line):
"""
When there is an end of line character, kill the current command line process.
:param line: The string of arguments after the command. Does not do anything in this function.
:return: True
"""
return True
if __name__ == '__main__':
print "Type 'help' to show the list of flags and how to use this command line tool."
InterestCalculator().cmdloop()
```
#### File: Python/tax/ClothingSection.py
```python
from TkHelper import *
from Section import *
from GridLocation import *
class ClothingDeductionSection(Section):
def onChangeDoOwnLaundry(self):
self.doesLaundryAtHome = not self.doesLaundryAtHome
self.toggleField(self.doesLaundryAtHome, self.workLaundry, self.workLaundryLabel)
self.toggleField(self.doesLaundryAtHome, self.combinedLaundry, self.combinedLaundryLabel)
def onChangePaidForLaundry(self):
self.doesLaundryOutside = not self.doesLaundryOutside
self.toggleField(self.doesLaundryOutside, self.totalLaundryExpenses, self.totalLaundryExpensesLabel)
def __init__(self, controller):
super(self.__class__, self).__init__(controller, "Clothing Deductions")
helper = TkHelper()
# Create the UI Elements
helper.createCheckBox(self, "Do you do the laundry yourself?", True, GridLocation(0, 1),
self.onChangeDoOwnLaundry)
(self.workLaundryLabel, self.workLaundry) = helper.createNumericalField(self, "Number of work laundry only runs:",
(0, 999), GridLocation(0, 2), "Home work laundry")
(self.combinedLaundryLabel, self.combinedLaundry) = helper.createNumericalField(self,
"Number of mixed (work & personal) laundry runs:",
(0, 999), GridLocation(0, 3),
"Home combined laundry")
helper.createCheckBox(self, "Have you paid for any work related laundry?", False, GridLocation(0, 4),
self.onChangePaidForLaundry)
(self.totalLaundryExpensesLabel, self.totalLaundryExpenses) = helper.createNumericalField(self,
"Total external laundry expenses:",
(0, 9999999), GridLocation(0, 5), "External laundry expenses")
helper.createListBox(self, "Did you buy any of the following?", [
"Compulsory Work Uniform",
"Non-compulsory Work Uniform",
"Occupation Specific Clothing",
"Protective Clothing"
], GridLocation(0, 6))
# Apply any rules such as requiring ints or floats and also if fields are required or hidden
self.doesLaundryAtHome = True
self.doesLaundryOutside = False
self.toggleField(self.doesLaundryOutside, self.totalLaundryExpenses, self.totalLaundryExpensesLabel)
self.requiredInt.append(self.workLaundry)
self.requiredInt.append(self.combinedLaundry)
self.requiredFloat.append(self.totalLaundryExpenses)
```
#### File: Python/tax/CompletionSection.py
```python
from Section import *
from TkHelper import *
class CompletedSection(Section):
def calculateTaxable(self, income):
if income <= 18200:
return 0
if 18201 <= income <= 37000:
return (income - 18200) * 0.19
if 37001 <= income <= 80000:
return ((income - 37000) * 0.325) + 3572
if 80001 <= income <= 180000:
return ((income - 80000) * 0.37) + 17547
if income >= 180001:
return ((income - 180000) * 0.47) + 54547
def doPersonal(self):
self.statements.append("First Name: " + self.allData["fname"])
self.statements.append("Last Name: " + self.allData["lname"])
self.statements.append("Phone Number: " + self.allData["phone"])
self.statements.append("Bank Account Number: " + self.allData["bank"])
self.statements.append("Medicare Number: " + self.allData["medi"])
def doAddress(self):
self.statements.append("Street Address 1: " + self.allData["str1"])
self.statements.append("Street Address 2: " + self.allData["str2"])
self.statements.append("Suburb: " + self.allData["Suburb"])
self.statements.append("Post Code: " + self.allData["post"])
self.statements.append("State: " + self.allData["State"])
def doTransit(self):
try:
kms = int(self.allData["How many kms total?"])
self.totalDeductible += kms * 0.66
del self.allData["How many kms total?"]
self.statements.append("Total deductible from driving for work: $" + str(kms * 0.66))
except:
pass
self.statements.append("Claimable from Public Transport Costs: $" + self.allData["public transport"])
try:
self.totalDeductible += float(self.allData["public transport"])
except:
pass
self.statements.append("Claimable from Other Expenses: $" + self.allData["Other Transport Expenses"])
try:
self.totalDeductible += float(self.allData["Other Transport Expenses"])
except:
pass
def doUniform(self):
selectedValues = self.allData["Did you buy any of the following?"]
del self.allData["Did you buy any of the following?"]
boughtClothingCodes = ""
codes = {
"Compulsory Work Uniform": "C",
"Non-compulsory Work Uniform": "N",
"Occupation Specific Clothing": "S",
"Protective Clothing": "P",
}
for value in selectedValues:
key = codes.keys()[value]
boughtClothingCodes += codes[key] + ", "
self.statements.append("Clothing letters for items bought: " + boughtClothingCodes[:-2])
try:
numWorkOnly = int(self.allData["Home work laundry"])
self.totalDeductible += numWorkOnly
del self.allData["Home work laundry"]
numComb = int(self.allData["Home combined laundry"])
self.totalDeductible += numComb * 0.5
del self.allData["Home combined laundry"]
self.statements.append("Total deductible from laundry: $" + str(numWorkOnly + (numComb * 0.5)))
except:
pass
self.statements.append("Claimable from external laundry: $" + self.allData["External laundry expenses"])
try:
self.totalDeductible += float(self.allData["External laundry expenses"])
except:
print "Failed to add total external laundry expenses into total deductible!!!"
def doDonations(self):
self.statements.append("Claimable from donations: $" + self.allData["donated"])
try:
self.totalDeductible += float(self.allData["donated"])
except:
pass
def doTotalClaimable(self):
totalIncome = float(self.allData["income"])
taxableIncome = totalIncome - self.totalDeductible
if taxableIncome < 0:
helper = TkHelper()
helper.showAlert("You have more deductions than your income.")
self.statements.append("Total Income: $" + str(totalIncome))
self.statements.append("Total Amount Deductible: $" + str(self.totalDeductible))
self.statements.append("Taxable Income: $" + str(taxableIncome))
self.statements.append("Tax Payable: $" + str(self.calculateTaxable(taxableIncome)))
self.statements.append("Medicare Levy: $" + str(taxableIncome * 0.02))
def addLine(self):
line = ''
for _ in range (0, 20):
line += "-"
self.statements.append(line)
def processData(self):
self.statements = []
self.totalDeductible = 0.0
print self.allData
self.doPersonal()
self.addLine()
self.doAddress()
self.addLine()
self.doTransit()
self.addLine()
self.doUniform()
self.addLine()
self.doDonations()
self.addLine()
self.doTotalClaimable()
print self.statements
helper = TkHelper()
for i in range(0, len(self.statements)):
helper.createLabel(self, self.statements[i], GridLocation(0, i))
def __init__(self, controller, dataDict):
super(self.__class__, self).__init__(controller, "Receipt")
self.allData = dataDict
self.processData()
```
#### File: Python/tax/DonationSection.py
```python
from TkHelper import *
from Section import *
from GridLocation import *
class DonationDeductionSection(Section):
def onChangeDonated(self):
"""
Called when the "has donated > $2" is checked or unchecked
"""
self.hasDonated = not self.hasDonated
self.toggleField(self.hasDonated, self.donated, self.donatedLabel)
def __init__(self, controller):
super(self.__class__, self).__init__(controller, "Donation Deductions")
helper = TkHelper()
# Create the UI Elements
helper.createCheckBox(self, "Have you donated $2 or more to an approved organisation?", False,
GridLocation(0, 1), self.onChangeDonated)
(self.donatedLabel, self.donated) = helper.createNumericalField(self, "How much have you donated in total? $",
(2, 99999999999999),GridLocation(0, 2), "donated")
# Apply any rules such as requiring ints or floats and also if fields are required or hidden
self.hasDonated = False
self.toggleField(self.hasDonated, self.donated, self.donatedLabel)
self.requiredFloat.append(self.donated)
```
#### File: Python/tax/TkHelper.py
```python
from Tkinter import *
from GridLocation import *
import tkMessageBox
class TkHelper(object):
_wraplength = 350
def showAlert(self, message):
"""
Show a new window as an alert.
:param message: The message in the label to be shown
"""
tkMessageBox.showinfo("Error", message)
def createHeading(self, parent, title, location=GridLocation(0, 0)):
"""
Creates and adds a heading label to the top of a frame
:param parent: A class containing the parent frame of the header label as a variable 'frame'
:param title: The header text
:param location: The GridLocation to place the header if it should not be at the top by default
:return: The created label instance
"""
label = Label(parent.frame, text=title)
label.pack(side=LEFT)
label.grid(row=location.row, column=location.column)
return label
def createField(self, parent, name, location, key=None):
"""
Creates and adds a textfield and a label
:param parent: A class containing the parent frame of the textfield as a variable 'frame'
:param name: The name of the field, will be used as a key if key is not explicitly assigned
:param location: The GridLocation to place the textfield
:param key: An optional shorter key if the name is too long or inappropriate to use
:return: (instance of label, instance of the textfield)
"""
label = self.createLabel(parent, name, location)
field = Entry(parent.frame)
field.grid(row=location.row, column=location.column + 1)
if key == None:
parent.widgetsDict[name] = field
else:
parent.widgetsDict[key] = field
return (label, field)
def createLabel(self, parent, title, location):
"""
Creates a label and adds it to the parent's frame
:param parent: A class containing the parent frame of the label as a variable 'frame'
:param title: The context of the label
:param location: The GridLocation to place the location
:return: An instance of the label that was created
"""
label = Label(parent.frame, text=title, wraplength=self._wraplength, anchor=W, justify=RIGHT)
label.grid(row=location.row, column=location.column)
return label
def createNumericalField(self, parent, name, bounds, location, key=None):
"""
Creates and adds a spinbox and a label
:param parent: A class containing the parent frame of the spinbox as a variable 'frame'
:param name: The name of the field, will be used as a key if key is not explicitly assigned
:param location: The GridLocation to place the textfield
:param key: An optional shorter key if the name is too long or inappropriate to use
:return: (instance of label, instance of the spinbox)
"""
label = self.createLabel(parent, name, location)
field = Spinbox(parent.frame, from_=bounds[0], to=bounds[1], width=18)
field.grid(row=location.row, column=location.column + 1)
if key == None:
parent.widgetsDict[name] = field
else:
parent.widgetsDict[key] = field
return label, field
def createDropdown(self, parent, name, options, location):
"""
Creates and adds a dropdown menu and a label to a frame
:param parent: A class containing the parent frame of the dropdown menu as a variable 'frame'
:param name: THe name of the dropdown menu, it is also
:param options: An array of strings which will become shown as options in the dropdown menu
:param location: The GridLocation to place the dropdown menu
:return: (instance of the label, a string variable bound to the value of the selected item in the dropdown menu.
"""
label = self.createLabel(parent, name, location)
value = StringVar(parent.frame)
value.set(options[0])
dropdown = apply(OptionMenu, (parent.frame, value) + tuple(options))
dropdown.grid(row=location.row, column=location.column + 1)
parent.widgetsDict[name] = dropdown
return label, value
def createCheckBox(self, parent, name, default, location, onChange):
"""
Creates and adds a checkbox to a frame
:param parent: A class containing the parent frame of the checkbox as a variable 'frame'
:param name: The label text for the checkbox
:param default: The default value of the checkbox, True for ticked and vice versa.
:param location: The GridLocation to place the checkbox
:param onChange: The function to be called when the value of the checkbox changes.
:return: An instance of the checkbox which was added to the frame
"""
checkBox = Checkbutton(parent.frame, text=name, command=onChange, wraplength=self._wraplength)
checkBox.grid(row=location.row, column=location.column, columnspan=2)
if default:
checkBox.select()
return checkBox
def createButton(self, parent, title, action, location):
"""
Creates and adds a button to a frame
:param parent: A class containing the parent frame of the button as a variable 'frame'
:param title: The title of the button
:param action: The function ot be called when the value of the checkbox changes.
:param location: The GridLocation to place the button
:return: An instance of the button that was added to the frame
"""
button = Button(parent.frame, text=title, command=action)
button.grid(row=location.row, column=location.column)
return button
def createListBox(self, parent, title, options, location):
"""
Creates and adds a listbox to a frame
:param parent: A class containing the parent frame of the listbox as a variable 'frame'
:param title: The heading/title of the list box
:param options: An array of strings to be the available options to pick from
:param location: The GridLocation to place the listbox
:return: Instance of the listbox
"""
label = self.createLabel(parent, title, location)
list = Listbox(parent.frame, selectmode=MULTIPLE, height=len(options))
for i in range(0, len(options)):
list.insert(i, options[i])
list.grid(row=location.row, column=location.column + 1)
parent.widgetsDict[title] = list
return list
``` |
{
"source": "jlscs/FastNN",
"score": 2
} |
#### File: image_models/datasets/dataset_factory.py
```python
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import cifar10
import flowers
import mnist
from flags import FLAGS
from utils import dataset_utils
datasets_map = {
'cifar10': cifar10,
'flowers': flowers,
'mnist': mnist,
}
def get_dataset_iterator(dataset_name, train_image_size, preprocessing_fn=None, data_sources=None, reader=None):
with tf.device("/cpu:0"):
if not dataset_name:
raise ValueError('expect dataset_name not None.')
if dataset_name not in datasets_map:
raise ValueError('Name of network unknown %s' % dataset_name)
if dataset_name == 'mock':
return dataset_utils._create_mock_iterator(train_image_size)
def parse_fn(example):
with tf.device("/cpu:0"):
image, label = datasets_map[dataset_name].parse_fn(example)
if preprocessing_fn is not None:
image = preprocessing_fn(image, train_image_size, train_image_size)
if FLAGS.use_fp16:
image = tf.cast(image, tf.float16)
label -= FLAGS.labels_offset
return image, label
return dataset_utils._create_dataset_iterator(data_sources, parse_fn, reader)
```
#### File: image_models/utils/cluster_utils.py
```python
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from flags import FLAGS
def create_config_proto():
"""Returns session config proto."""
config = tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
allow_soft_placement=True,
gpu_options=tf.GPUOptions(
force_gpu_compatible=True,
allow_growth=True))
return config
def get_cluster_manager():
"""Returns the cluster manager to be used."""
return GrpcClusterManager(create_config_proto())
class BaseClusterManager(object):
"""The manager for the cluster of servers running the fast-nn."""
def __init__(self):
assert FLAGS.job_name in ['worker'], 'job_name must be worker'
if FLAGS.job_name and FLAGS.worker_hosts:
cluster_dict = {'worker': FLAGS.worker_hosts.split(',')}
else:
cluster_dict = {'worker': ['127.0.0.1:0']}
self._num_workers = len(cluster_dict['worker'])
self._cluster_spec = tf.train.ClusterSpec(cluster_dict)
self._device_exp = tf.train.replica_device_setter(
worker_device="/job:worker/task:%d/" % FLAGS.task_index,
cluster=self._cluster_spec)
def get_target(self):
"""Returns a target to be passed to tf.Session()."""
raise NotImplementedError('get_target must be implemented by subclass')
def get_cluster_spec(self):
return self._cluster_spec
def num_workers(self):
return self._num_workers
def device_exp(self):
return self._device_exp
class GrpcClusterManager(BaseClusterManager):
"""A cluster manager for a cluster networked with gRPC."""
def __init__(self, config_proto):
super(GrpcClusterManager, self).__init__()
self._server = tf.train.Server(self._cluster_spec,
job_name=FLAGS.job_name,
task_index=FLAGS.task_index,
config=config_proto,
protocol=FLAGS.protocol)
self._target = self._server.target
def get_target(self):
return self._target
``` |
{
"source": "jlsepulveda/dynamodb-copy-table",
"score": 2
} |
#### File: jlsepulveda/dynamodb-copy-table/dynamodb-data-transformation.py
```python
import boto3
import json
import decimal
import base64
import os
import sys
if len(sys.argv) != 3:
print 'Usage: %s <source_table_name>' \
' <destination_table_name>' % sys.argv[0]
sys.exit(1)
src_table = sys.argv[1]
dst_table = sys.argv[2]
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
src=dynamodb.Table(src_table)
dest=dynamodb.Table(dst_table)
response = src.scan()
for i in response['Items']:
json.dumps(i,cls=DecimalEncoder)
new_item = {}
for f in i.keys():
new_item[f] = i[f]
new_item['classroomCourseUUID'] = base64.b64encode(i['classroomId'] + '|0')
dest.put_item(Item=new_item)
while 'LastEvaluatedKey' in response:
response = src.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
for i in response['items']:
print (json.dumps(i, cls=DecimalEncoder))
``` |
{
"source": "jlsheehan/pybuilder-docker-build",
"score": 2
} |
#### File: jlsheehan/pybuilder-docker-build/build.py
```python
from pybuilder.core import use_plugin, init, Project, Author
use_plugin("python.core")
use_plugin("python.flake8")
use_plugin("python.distutils")
use_plugin("pypi:pybuilder_pytest")
use_plugin('pypi:pybuilder_pytest_coverage')
use_plugin("pypi:pybuilder_git_version")
authors = [Author("<NAME>", "<EMAIL>")]
summary = "A Docker build plugin for PyBuilder"
url = "https://github.com/jlsheehan/pybuilder-docker-build"
license = "MIT License"
name = "pybuilder-docker-build"
default_task = "publish"
@init
def set_properties(project: Project):
project.depends_on("docker")
project.set_property("distutils_readme_description", True)
project.set_property("distutils_description_overwrite", True)
```
#### File: python/pybuilder_docker_build/util.py
```python
import os
from docker import DockerClient
from pybuilder.core import Project, Logger
docker_client_singleton = None
def _full_image_tag(project):
return "{docker_image_repo}:{docker_image_tag}".format(
docker_image_repo=project.get_property("docker_image_repo"),
docker_image_tag=project.get_property("docker_image_tag")
)
def _build_args(project: Project, logger: Logger):
build_args_dict = {
"PROJECT_NAME": project.name,
"PROJECT_VERSION": project.version,
"PROJECT_DIST_VERSION": project.dist_version,
"PROJECT_DIST": os.path.relpath(project.expand_path(project.get_property("dir_dist")), start=project.get_property("docker_build_path"))
}
if project.has_property("docker_build_args"):
build_args_dict.update(project.get_property("docker_build_args"))
logger.debug("Created build args: %s", build_args_dict)
return build_args_dict
def _get_docker_client():
global docker_client_singleton
if docker_client_singleton is None:
docker_client_singleton = DockerClient.from_env()
return docker_client_singleton
``` |
{
"source": "jlshix/nowcoder",
"score": 3
} |
#### File: nowcoder/python/09_jump_stage_2.py
```python
class Solution:
def jumpFloorII(self, number):
# write code here
res = 1
if number >= 2:
for _ in range(number-1):
res *= 2
return res
```
#### File: nowcoder/python/13_odd_before_even.py
```python
class Solution:
def reOrderArray(self, array):
# write code here
# key 作为排序依据
return sorted(array, key=lambda x: x%2==0)
``` |
{
"source": "jlskuz/otio-drp-adapter",
"score": 3
} |
#### File: otio_drp_adapter/adapters/drp.py
```python
import json
import opentimelineio as otio
from os.path import basename
def read_from_file(filepath, main_mix=False, full_tracks=False):
# We read the .drp file directly
with open(filepath) as source:
# First line contains metadata and the starting settings
metadata = json.loads(source.readline().strip())
# Next ones are the scene switches, let's decode them right away
timeline_data = []
for line in source:
timeline_data += [json.loads(line.strip())]
# We use the filename as the timeline name (without the .drp suffix)
timeline_name = basename(filepath[:-4])
track_name = "Main Mix"
timeline = otio.schema.Timeline(timeline_name)
# We will use the masterTimecode as a 0 reference
mt = metadata["masterTimecode"]
# BlackMagic's ATEM seems to be only 1080p25, but well
widths, rates = metadata["videoMode"].split("p")
rate = int(rates)
# For now, the timeline is a single track.
main_track = otio.schema.Track(track_name)
# If we don't have sources, the .drp file is probably broken.
if "sources" not in metadata:
raise Exception("No sources in drp file")
tc_ref = otio.opentime.from_timecode(mt, rate)
current_tc = otio.opentime.RationalTime(value=0, rate=rate)
# Let's compute the duration of the full scene based on the last switch
last_tc = timeline_data[-1]["masterTimecode"]
end_frame = otio.opentime.from_timecode(last_tc, rate)
duration = end_frame - tc_ref
# And make it available for the ext ref
available_range = otio.opentime.TimeRange(
start_time=current_tc,
duration=duration,
)
# Let's create an hash with all the indices as the key for later
# and create the external reference for the files
# (it may be more clever to generate one for each source)
extrefs = dict()
for src in metadata["sources"]:
src["ref"] = None
# If it's an actual file, generate a ref for it
if "file" in src:
ref = otio.schema.ExternalReference(
target_url=src["file"], available_range=available_range
)
# add it to the src dict from JSON
src["ref"] = ref
src_track = otio.schema.Track(src["name"])
src["track"] = src_track
# add our entry to extrefs, with _index_ as the key (it's an int.)
extrefs[src["_index_"]] = src
# Let's append the interesting tracks in reverse order
for k, src in sorted(
extrefs.items(), key=lambda x: x[1]["name"], reverse=True
):
if "track" in src:
timeline.tracks.append(src["track"])
if full_tracks:
sclip = otio.schema.Clip(
src["name"],
media_reference=src["ref"],
source_range=available_range,
)
src["track"].append(sclip)
# Loosely try to get the scene chosen before the show starts
try:
current_source = metadata["mixEffectBlocks"][0]["source"]
except KeyError:
current_source = 0
if main_mix:
timeline.tracks.append(main_track)
# Let's loop over the switches in the timeline
for c in timeline_data:
# End of current clip is there, and it has that many frames
next_clip_tc = otio.opentime.from_timecode(
c["masterTimecode"], rate
)
next_clip_frames = next_clip_tc - tc_ref
tr = otio.opentime.TimeRange(
current_tc,
next_clip_frames,
)
# So let's figure out its name and ext ref from our hash
# and compute its length in frames
clip = otio.schema.Clip(
extrefs[current_source]["name"],
media_reference=extrefs[current_source]["ref"],
source_range=tr,
)
gap = otio.schema.Gap(source_range=tr)
# Add it to the track
if main_mix:
main_track.append(clip)
if not full_tracks:
for name, source in extrefs.items():
if "track" in source:
if name == current_source:
source["track"].append(clip.clone())
else:
source["track"].append(gap.clone())
# Prepare for the next round, let's move on at the end
# of the added clip, and set the sources for the next clip.
current_tc += next_clip_frames
if "source" in c["mixEffectBlocks"][0]:
current_source = c["mixEffectBlocks"][0]["source"]
else:
break
return timeline
```
#### File: otio-drp-adapter/tests/test_otio_drp_adapter.py
```python
import os
import opentimelineio as otio
SAMPLE_DATA_DIR = os.path.join(os.path.dirname(__file__), "sample_data")
DRP_EXAMPLE_PATH = os.path.join(SAMPLE_DATA_DIR, "sample.drp")
def test_adapter():
timeline = otio.adapters.read_from_file(
DRP_EXAMPLE_PATH, "otio_drp_adapter"
)
assert isinstance(timeline, otio.schema.Timeline)
``` |
{
"source": "jlsneto/cerejeira",
"score": 2
} |
#### File: cereja/utils/_utils.py
```python
import ast
import gc
import math
import os
import threading
import time
from collections import OrderedDict, defaultdict
from importlib import import_module
import importlib
import sys
import types
import random
from typing import Any, Union, List, Tuple, Sequence, Iterable, Dict
import logging
import itertools
from copy import copy
import inspect
# Needed init configs
from ..config.cj_types import ClassType, FunctionType, Number
__all__ = ['CjTest', 'camel_to_snake', 'combine_with_all', 'fill', 'get_attr_if_exists',
'get_implements', 'get_instances_of', 'import_string',
'install_if_not', 'invert_dict', 'logger_level', 'module_references', 'set_log_level', 'time_format',
'string_to_literal', 'rescale_values', 'Source', 'sample', 'obj_repr', 'truncate', 'type_table_of',
'list_methods', 'can_do', 'chunk', 'is_iterable', 'is_indexable', 'is_sequence', 'is_numeric_sequence',
'clipboard',
'sort_dict', 'dict_append', 'to_tuple', 'dict_to_tuple', 'list_to_tuple', 'group_by', 'dict_values_len',
'dict_max_value', 'dict_min_value', 'dict_filter_value', 'get_zero_mask', 'get_batch_strides', 'Thread',
'prune_values']
logger = logging.getLogger(__name__)
_DICT_ITEMS_TYPE = type({}.items())
class Thread(threading.Thread):
def __init__(self, target, args=None, kwargs=None, name=None, daemon=None, callback=None):
while threading.active_count() > os.cpu_count() * 2:
time.sleep(0.1)
super().__init__(daemon=daemon, name=name)
if args is None:
args = ()
if kwargs is None:
kwargs = {}
self._func = target
self._args = args
self._kwargs = kwargs
self._callback = callback
def run(self):
res = self._func(*self._args, **self._kwargs)
if self._callback:
self._callback(res)
def is_indexable(v):
return hasattr(v, '__getitem__')
def chunk(data: Sequence, batch_size: int = None, fill_with: Any = None, is_random: bool = False,
max_batches: int = None) -> List[Union[Sequence, List, Tuple, Dict]]:
"""
e.g:
>>> import cereja as cj
>>> data = list(range(15))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
>>> cj.chunk(data, batch_size=4)
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14]]
>>> cj.chunk(data, batch_size=4, is_random=True, fill_with=0)
[[7, 2, 11, 4], [10, 6, 1, 13], [12, 9, 5, 0], [8, 3, 14, 0]]
>>> data = {"key1": 'value1', "key2": 'value2', "key3": 'value3', "key4": 'value4'}
>>> cj.chunk(data, batch_size=2,is_random=True)
[{'key3': 'value3', 'key2': 'value2'}, {'key1': 'value1', 'key4': 'value4'}]
@param data: Iterable data
@param batch_size: number of items per batch
@param fill_with: Any, but isn't valid for dict
@param is_random: shuffle data
@param max_batches: limit number of batches
@return: list of batches
"""
assert is_iterable(data) and len(data) > 0, f"Chunk isn't possible, because value {data} isn't valid."
if batch_size is None and max_batches is None:
return [data]
# used to return the same data type
__parser = None
if isinstance(data, (dict, tuple, set)):
__parser = type(data)
data = data.items() if isinstance(data, dict) else data
data = list(data) if isinstance(data, (set, tuple, str, bytes, bytearray, _DICT_ITEMS_TYPE)) else copy(data)
if not batch_size or batch_size > len(data) or batch_size < 1:
if isinstance(max_batches, (int, float)) and max_batches > 0:
batch_size = math.ceil(len(data) / max_batches)
else:
batch_size = len(data)
if is_random:
random.shuffle(data)
if max_batches is None:
max_batches = len(data) // batch_size if len(data) % batch_size == 0 else len(data) // batch_size + 1
batches = []
for i in range(0, len(data), batch_size):
result = data[i:i + batch_size]
if fill_with is not None and len(result) < batch_size:
result += [fill_with] * (batch_size - len(result))
batches.append(__parser(result) if __parser is not None else result)
max_batches -= 1
if not max_batches:
break
return batches
def _get_tkinter():
try:
from tkinter import Tk
except ImportError:
raise ValueError("Sorry. Isn't possible.")
return Tk()
def clipboard() -> str:
return _get_tkinter().clipboard_get()
def truncate(text: Union[str, bytes], k=15):
"""
Truncate text.
eg.:
>>> import cereja as cj
>>> cj.utils.truncate("Cereja is fun.", k=3)
'Cer...'
@param text: string or bytes
@param k: natural numbers, default is 4
"""
assert isinstance(text, (str, bytes)), TypeError(f"{type(text)} isn't valid. Expected str or bytes")
if k > len(text) or k <= 4:
return text
n = int((k - 4) / 2) # k is the max length of text, 4 is the length of truncate symbol
trunc_chars = '....' if isinstance(text, str) else b'....'
return text[:n] + trunc_chars + text[-n:]
def obj_repr(obj_, attr_limit=10, val_limit=3, show_methods=False, show_private=False, deep=3):
try:
if isinstance(obj_, (str, bytes)):
return truncate(obj_, k=attr_limit)
if isinstance(obj_, (bool, float, int, complex)):
return obj_
rep_ = []
if deep > 0:
for attr_ in dir(obj_):
if attr_.startswith('_') and not show_private:
continue
obj = obj_.__getattribute__(attr_)
if isinstance(obj, (str, bool, float, int, complex, bytes, bytearray)):
rep_.append(f'{attr_} = {obj_repr(obj)}')
continue
if callable(obj) and not show_methods:
continue
if is_iterable(obj):
temp_v = []
for k in obj:
if isinstance(obj, dict):
k = f'{k}:{type(obj[k])}'
elif is_iterable(k):
k = obj_repr(k, deep=deep)
deep -= 1
else:
k = str(k)
temp_v.append(k)
if len(temp_v) == val_limit:
break
temp_v = ', '.join(temp_v) # fix me, if bytes ...
obj = f'{obj.__class__.__name__}({temp_v} ...)'
rep_.append(f'{attr_} = {obj}')
if len(rep_) >= attr_limit:
rep_.append('...')
break
else:
return repr(obj_)
except Exception as err:
logger.error(err)
rep_ = []
rep_ = ',\n '.join(rep_)
__repr_template = f"""
{rep_}
"""
return f"{obj_.__class__.__name__} ({__repr_template})"
def can_do(obj: Any) -> List[str]:
"""
List methods and attributes of a Python object.
It is essentially the builtin `dir` function without the private methods and attributes
@param obj: Any
@return: list of attr names sorted by name
"""
return sorted([i for i in filter(lambda attr: not attr.startswith('_'), dir(obj))])
def sample(v: Sequence, k: int = None, is_random: bool = False) -> Union[list, dict, set, Any]:
"""
Get sample of anything
@param v: Any
@param k: int
@param is_random: default False
@return: sample iterable
"""
return chunk(v, batch_size=k, is_random=is_random, max_batches=1)[0]
def type_table_of(o: Union[list, tuple, dict]):
if isinstance(o, (list, tuple)):
type_table = {i: type(i) for i in o}
elif isinstance(o, dict):
type_table = {}
for k, v in o.items():
if isinstance(o, dict):
v = type_table_of(v)
type_table[k] = (v, type(v))
else:
type_table = {o: type(o)}
return type_table
def camel_to_snake(value: str):
snaked_ = []
for i, char in enumerate(value):
if not i == 0 and char.isupper():
char = f'_{char}'
snaked_.append(char)
return ''.join(snaked_).lower()
def get_implements(klass: type):
classes = klass.__subclasses__()
collected_classes = []
for k in classes:
k_classes = k.__subclasses__()
if k_classes:
collected_classes += get_implements(k)
if not k.__name__.startswith('_'):
collected_classes.append(k)
return collected_classes
def get_instances_of(klass: type):
return filter(lambda x: isinstance(x, klass), gc.get_objects())
def _invert_parser_key(key):
return to_tuple(key) if isinstance(key, (list, set, dict)) else key
def _invert_append(obj, k, v):
dict_append(obj, k, v)
if len(obj[k]) == 1:
obj[k] = obj[k][0]
def invert_dict(dict_: Union[dict, set]) -> dict:
"""
Inverts the key by value
e.g:
>>> example = {"a": "b", "c": "d"}
>>> invert_dict(example)
{"b" : "a", "d": "c"}
:return: dict
"""
if not isinstance(dict_, dict):
raise TypeError("Send a dict object.")
new_dict = {}
for key, value in dict_.items():
key = _invert_parser_key(key)
if isinstance(value, dict):
if key not in new_dict:
new_dict.update({key: invert_dict(value)})
else:
_invert_append(new_dict, key, invert_dict(value))
continue
if isinstance(value, (tuple, list, set)):
for k in dict_[key]:
k = _invert_parser_key(k)
_invert_append(new_dict, k, key)
continue
if value not in new_dict:
new_dict[value] = key
else:
value = _invert_parser_key(value)
_invert_append(new_dict, value, key)
return new_dict
def group_by(values, fn) -> dict:
"""
group items by result of fn (function)
eg.
>>> import cereja as cj
>>> values = ['joab', 'leite', 'da', 'silva', 'Neto', 'você']
>>> cj.group_by(values, lambda x: 'N' if x.lower().startswith('n') else 'OTHER')
# {'OTHER': ['joab', 'leite', 'da', 'silva', 'você'], 'N': ['Neto']}
@param values: list of values
@param fn: a function
"""
d = defaultdict(list)
for el in values:
d[fn(el)].append(el)
return dict(d)
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError(f"{dotted_path} doesn't look like a module path") from err
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err:
raise ImportError(f'Module {module_path} does not define a {class_name} attribute/class') from err
def get_attr_if_exists(obj: Any, attr: str) -> Union[object, None]:
if hasattr(obj, attr):
return getattr(obj, attr)
return None
def time_format(seconds: float, format_='%H:%M:%S') -> Union[str, float]:
"""
Default format is '%H:%M:%S'
>>> time_format(3600)
'01:00:00'
"""
# this because NaN
if seconds >= 0 or seconds < 0:
time_ = time.strftime(format_, time.gmtime(abs(seconds)))
if seconds < 0:
return f"-{time_}"
return time_
return seconds # NaN
def fill(value: Union[list, str, tuple], max_size, with_=' ') -> Any:
"""
Calculates and adds value
"""
fill_values = [with_] * (max_size - len(value))
if isinstance(value, str):
fill_values = ' '.join(fill_values)
value = f"{value}{fill_values}"
elif isinstance(value, list):
value += fill_values
elif isinstance(value, tuple):
value += tuple(fill_values)
return value
def list_methods(klass) -> List[str]:
methods = []
for i in dir(klass):
if i.startswith('_') or not callable(getattr(klass, i)):
continue
methods.append(i)
return methods
def string_to_literal(val: Union[str, bytes]):
if isinstance(val, (str, bytes)):
try:
return ast.literal_eval(val)
except:
pass
return val
def module_references(instance: types.ModuleType, **kwargs) -> dict:
"""
dict of all functions and classes defined in the module.
To also list the variables it is necessary to define explicitly with the special variable on your module
_include
**kwargs:
_include -> to includes any definition and variables
_exclude -> to exclude any definition
:param instance:
:return: List[str]
"""
assert isinstance(instance, types.ModuleType), "You need to submit a module instance."
logger.debug(f"Checking module {instance.__name__}")
definitions = {}
for i in dir(instance):
if i.startswith('_'):
continue
exclude = get_attr_if_exists(instance, "_exclude") or kwargs.get("_exclude") or []
include = get_attr_if_exists(instance, "_include") or kwargs.get("_include") or []
obj = get_attr_if_exists(instance, i)
if i in include:
definitions[i] = obj
if obj is not None and i not in exclude and callable(obj):
if obj.__module__ == instance.__name__:
definitions[i] = obj
logger.debug(f"Collected: {definitions}")
return definitions
def install_if_not(lib_name: str):
from ..display import console
try:
importlib.import_module(lib_name)
output = 'Alredy Installed'
except ImportError:
from ..system.commons import run_on_terminal
command_ = f"{sys.executable} -m pip install {lib_name}"
output = run_on_terminal(command_)
console.log(output)
def set_log_level(level: Union[int, str]):
"""
Default log level is INFO
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
"""
log = logging.getLogger()
log.setLevel(level)
logger.info(f"Update log level to {level}")
def logger_level():
import logging
return logging.getLogger().level
def combine_with_all(a: list, b: list, n_a_combinations: int = 1, is_random: bool = False) -> List[Tuple[Any, ...]]:
"""
>>> a = [1, 2, 3]
>>> b = ['anything_a', 'anything_b']
>>> combine_with_all(a, b)
[(1, 'anything_a'), (1, 'anything_b'), (2, 'anything_a'), (2, 'anything_b'), (3, 'anything_a'), (3, 'anything_b')]
>>> combine_with_all(a, b, n_a_combinations=2)
[((1, 2), 'anything_a'), ((1, 2), 'anything_b'),
((1, 3), 'anything_a'), ((1, 3), 'anything_b'),
((2, 3), 'anything_a'), ((2, 3), 'anything_b')]
"""
if not isinstance(n_a_combinations, int):
raise TypeError(f"Please send {int}.")
n_a_combinations = len(a) if n_a_combinations > len(a) else abs(n_a_combinations)
combination = itertools.combinations(a, n_a_combinations) if n_a_combinations > 1 else a
product_with_b = list(itertools.product(combination, b))
if is_random:
random.shuffle(product_with_b)
return product_with_b
class CjTest(object):
__template_unittest_function = """
def test_{func_name}(self):
pass
"""
__template_unittest_class = """
class {class_name}Test(unittest.TestCase):
{func_tests}
"""
__template_unittest = """import unittest
{tests}
if __name__ == '__main__':
unittest.main()
"""
__prefix_attr_err = "Attr Check Error {attr_}."
def __init__(self, instance_obj: object):
self._prefix_attr = f"__{instance_obj.__class__.__name__}__"
self._instance_obj = instance_obj
self._set_attr_current_values()
self._checks = []
self._n_checks_passed = 0
@property
def checks(self):
return self._checks
@property
def n_checks(self):
return len(self._checks)
@property
def _instance_obj_attrs(self):
return filter(lambda attr_: attr_.__contains__('__') is False, dir(self._instance_obj))
def _get_attr_obj(self, attr_: str):
if not hasattr(self._instance_obj, attr_):
raise ValueError(f"Attr {attr_} not found.")
value = getattr(self._instance_obj, attr_)
return self._Attr(attr_, value)
def _set_attr_current_values(self):
for attr_ in self._instance_obj_attrs:
attr_obj = self._get_attr_obj(attr_)
attr_name = self.parse_attr(attr_)
setattr(self, attr_name, attr_obj)
def parse_attr(self, attr_: str):
attr_ = self._valid_attr(attr_)
return f'{self._prefix_attr}{attr_}'
def __getattr__(self, item):
return self.__getattribute__(self.parse_attr(item))
class _Attr(object):
def __init__(self, name: str, value: Any):
self.name = name
self.is_callable = callable(value)
self.is_private = self.name.startswith('_')
self.is_bool = value is True or value is False
self.is_class = isinstance(value, ClassType)
self.is_function = isinstance(value, FunctionType)
self.class_of_attr = value.__class__
self._operator_repr = None
self.tests_case = []
def __repr__(self):
return f"{self.name}"
def __str__(self):
return f"{self.name}"
def __len__(self):
return len(self.tests_case)
def __eq__(self, other):
""" ==value """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__eq__, '==')
return self
def __ge__(self, other):
""">=value """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__ge__, '>=')
return self
def __gt__(self, other):
""">value """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__gt__, '>')
return self
def __le__(self, other):
""" <=value. """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__le__, '<=')
return self
def __lt__(self, other):
""" <value. """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__lt__, '<')
return self
def __ne__(self, other):
""" !=value. """
if isinstance(other, self.__class__):
return NotImplemented
self.tests_case = (other, self.class_of_attr.__ne__, '!=')
return self
def copy(self):
return copy(self)
def run(self, current_value):
expected, operator, _ = self.tests_case
if not operator(current_value, expected):
return [f"{repr(current_value)} not {_} {repr(expected)}"]
return []
def _valid_attr(self, attr_name: str):
assert hasattr(self._instance_obj,
attr_name), f"{self.__prefix_attr_err.format(attr_=repr(attr_name))} isn't defined."
return attr_name
def add_check(self, *check_: _Attr):
for i in check_:
if i not in self._checks:
self._checks.append(i)
def remove_check(self, index: int):
self._checks.pop(index)
def check_attr(self, attr_name: Union[str, _Attr]):
if isinstance(attr_name, str):
stored_test = self.__getattribute__(self.parse_attr(attr_name))
else:
stored_test = attr_name
current_value = getattr(self._instance_obj, stored_test.name)
if not stored_test.is_callable:
tests_ = stored_test.run(current_value)
passed = not any(tests_)
self._n_checks_passed += len(stored_test) - len(tests_)
msg_err = f"{self.__prefix_attr_err.format(attr_=repr(stored_test.name))} {' '.join(tests_)}"
assert passed, msg_err
def check_all(self):
for attr_ in self._checks:
self.check_attr(attr_)
@classmethod
def _get_class_test(cls, ref):
func_tests = ''.join(cls.__template_unittest_function.format(func_name=i) for i in list_methods(ref))
return cls.__template_unittest_class.format(class_name=ref.__name__, func_tests=func_tests)
@classmethod
def _get_func_test(cls, ref):
return cls.__template_unittest_function.format(func_name=ref.__name__)
@classmethod
def _get_test(cls, ref):
if isinstance(ref, (FunctionType, types.MethodType)):
return cls._get_func_test(ref)
if isinstance(ref, type):
return cls._get_class_test(ref)
raise TypeError("send a function or class reference")
@classmethod
def build_test(cls, reference):
module_func_test = []
tests = []
if isinstance(reference, types.ModuleType):
for _, ref in module_references(reference).items():
if isinstance(ref, type):
tests.append(cls._get_test(ref))
continue
module_func_test.append(cls._get_test(ref))
else:
if isinstance(reference, type):
tests.append(cls._get_test(reference))
else:
module_func_test.append(cls._get_test(reference))
if module_func_test:
module_func_test = ''.join(module_func_test)
tests = [cls.__template_unittest_class.format(class_name='Module', func_tests=module_func_test)] + tests
return cls.__template_unittest.format(tests='\n'.join(tests))
def _add_license(base_dir, ext='.py'):
from cereja.file import FileIO
from cereja.config import BASE_DIR
licence_file = FileIO.load(BASE_DIR)
for file in FileIO.load_files(base_dir, ext=ext, recursive=True):
if 'Copyright (c) 2019 The Cereja Project' in file.string:
continue
file.insert('"""\n' + licence_file.string + '\n"""')
file.save(exist_ok=True)
def _rescale_down(input_list, size):
assert len(input_list) >= size, f'{len(input_list), size}'
skip = len(input_list) // size
for n, i in enumerate(range(0, len(input_list), skip), start=1):
if n > size:
break
yield input_list[i]
def _rescale_up(values, k, fill_with=None, filling='inner'):
size = len(values)
assert size <= k, f'Error while resizing: {size} < {k}'
clones = (math.ceil(abs(size - k) / size))
refill_values = abs(k - size * clones)
if filling == 'pre':
for i in range(abs(k - size)):
yield fill_with if fill_with is not None else values[0]
for value in values:
# guarantees that the original value will be returned
yield value
if filling != 'inner':
continue
for i in range(clones - 1): # -1 because last line.
# value original or fill_with.
yield fill_with if fill_with is not None else value
if refill_values > 0:
refill_values -= 1
yield fill_with if fill_with is not None else value
k -= 1
if k < 0:
break
if filling == 'post':
for i in range(abs(k - size)):
yield fill_with if fill_with is not None else values[-1]
def _interpolate(values, k):
if isinstance(values, list):
from ..array import Matrix
# because true_div ...
values = Matrix(values)
size = len(values)
first_position = 0
last_position = size - 1
step = (last_position - first_position) / (k - 1)
positions = [first_position]
previous_position = positions[-1]
for _ in range(k - 2):
positions.append(previous_position + step)
previous_position = positions[-1]
positions.append(last_position)
for position in positions:
previous_position = math.floor(position)
next_position = math.ceil(position)
if previous_position == next_position:
yield values[previous_position]
else:
delta = position - previous_position
yield values[previous_position] + (values[next_position] - values[previous_position]) / (
next_position - previous_position) * delta
def rescale_values(values: List[Any], granularity: int, interpolation: bool = False, fill_with=None, filling='inner') -> \
List[Any]:
"""
Resizes a list of values
eg.
>>> import cereja as cj
>>> cj.rescale_values(values=list(range(100)),granularity=12)
[0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88]
>>> cj.rescale_values(values=list(range(5)),granularity=10)
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4]
>>> cj.rescale_values(values=list(range(5)),granularity=10, filling='pre')
[0, 0, 0, 0, 0, 0, 1, 2, 3, 4]
>>> cj.rescale_values(values=list(range(5)),granularity=10, filling='post')
[0, 1, 2, 3, 4, 4, 4, 4, 4, 4]
@note if you don't send any value for filling a value will be chosen arbitrarily depending on the filling type.
If interpolation is set to True, then the resized values are calculated by interpolation,
otherwise they are sub- ou upsampled from the original list
@param values: Sequence of anything
@param granularity: is a integer
@param interpolation: is a boolean
@param fill_with: only scale up, send any value for filling
@param filling: in case of scale up, you can define how the filling will be (pre, inner, post). 'inner' is default.
@return: rescaled list of values.
"""
if interpolation:
result = list(_interpolate(values, granularity))
else:
if len(values) >= granularity:
result = list(_rescale_down(values, granularity))
else:
result = list(_rescale_up(values, granularity, fill_with=fill_with, filling=filling))
assert len(result) == granularity, f"Error while resizing the list size {len(result)} != {granularity}"
return result
class Source:
def __init__(self, reference: Any):
self._name = None
self._doc = inspect.getdoc(reference)
self._source_code = inspect.getsource(reference)
if hasattr(reference, '__name__'):
self._name = reference.__name__
@property
def source_code(self):
return self._source_code.lstrip()
@property
def name(self):
return self._name
@property
def doc(self):
return self._doc
def save(self, path_, **kwargs):
from cereja import FileIO, Path
path_ = Path(path_)
if path_.is_dir:
path_ = path_.join(f'{self.name}.py')
assert path_.suffix == '.py', "Only python source code."
FileIO.create(path_, self._source_code).save(**kwargs)
def is_iterable(obj: Any) -> bool:
"""
Return whether an object is iterable or not.
:param obj: Any object for check
"""
return isinstance(obj, Iterable)
def is_sequence(obj: Any) -> bool:
"""
Return whether an object a Sequence or not, exclude strings and empty obj.
:param obj: Any object for check
"""
return not isinstance(obj, (str, dict, bytes)) and is_iterable(obj)
def is_numeric_sequence(obj: Sequence[Number]) -> bool:
try:
from cereja.array import flatten
sum(flatten(obj))
except (TypeError, ValueError):
return False
return True
def sort_dict(obj: dict, by_keys=False, by_values=False, reverse=False, by_len_values=False, func_values=None,
func_keys=None) -> OrderedDict:
func_values = (lambda v: len(v) if by_len_values else v) if func_values is None else func_values
func_keys = (lambda k: k) if func_keys is None else func_keys
key_func = None
if (by_keys and by_values) or (not by_keys and not by_values):
key_func = (lambda x: (func_keys(x[0]), func_values(x[1])))
elif by_keys:
key_func = (lambda x: func_keys(x[0]))
elif by_values:
key_func = (lambda x: func_values(x[1]))
return OrderedDict(sorted(obj.items(), key=key_func, reverse=reverse))
def list_to_tuple(obj):
assert isinstance(obj, (list, set, tuple)), f"Isn't possible convert {type(obj)} into {tuple}"
result = []
for i in obj:
if isinstance(i, list):
i = list_to_tuple(i)
elif isinstance(i, (set, dict)):
i = dict_to_tuple(i)
result.append(i)
return tuple(result)
def dict_values_len(obj, max_len=None, min_len=None, take_len=False):
return {i: len(obj[i]) if take_len else obj[i] for i in obj if
(max_len is None or len(obj[i]) <= max_len) and (min_len is None or len(obj[i]) >= min_len)}
def dict_to_tuple(obj):
assert isinstance(obj, (dict, set)), f"Isn't possible convert {type(obj)} into {tuple}"
result = []
if isinstance(obj, set):
return tuple(obj)
for k, v in obj.items():
if isinstance(v, (dict, set)):
v = dict_to_tuple(v)
elif isinstance(v, list):
v = list_to_tuple(v)
result.append((k, v))
return tuple(result)
def to_tuple(obj):
if isinstance(obj, (set, dict)):
return dict_to_tuple(obj)
if isinstance(obj, (list, tuple)):
return list_to_tuple(obj)
return tuple(obj)
def dict_append(obj: Dict[Any, Union[list, tuple]], key, *v):
"""
Add items to a key, if the key is not in the dictionary it will be created with a list and the value sent.
e.g:
>>> import cereja as cj
>>> my_dict = {}
>>> cj.utils.dict_append(my_dict, 'key_eg', 1,2,3,4,5,6)
{'key_eg': [1, 2, 3, 4, 5, 6]}
>>> cj.utils.dict_append(my_dict, 'key_eg', [1,2])
{'key_eg': [1, 2, 3, 4, 5, 6, [1, 2]]}
@param obj: Any dict of list values
@param key: dict key
@param v: all values after key
@return:
"""
assert isinstance(obj, dict), 'Error on append values. Please send a dict object.'
if key not in obj:
obj[key] = []
if not isinstance(obj[key], (list, tuple)):
obj[key] = [obj[key]]
if isinstance(obj[key], tuple):
obj[key] = (*obj[key], *v,)
else:
for i in v:
obj[key].append(i)
return obj
def dict_filter_value(obj: Dict[Any, Any], f) -> Any:
"""
Results is a filtered dict by f func result
@param obj: is a dict
@param f: function filter
@return:
"""
inv_dict = invert_dict(obj)
filter_val = f(inv_dict)
res = inv_dict[filter_val]
if isinstance(res, list):
return dict(map(lambda x: (x, filter_val), res))
return {res: filter_val}
def dict_max_value(obj: Dict[Any, Any]) -> Any:
"""
Results is a filtered dict by max value
>>> import cereja as cj
>>> cj.dict_max_value({'oi': 10, 'aqui': 20, 'sei': 20})
{'aqui': 20, 'sei': 20}
@param obj: is a dict
@return: dict filtered
"""
return dict_filter_value(obj, max)
def dict_min_value(obj: Dict[Any, Any]) -> Any:
"""
Results is a filtered dict by min value
>>> import cereja as cj
>>> cj.dict_min_value({'oi': 10, 'aqui': 20, 'sei': 20})
{'oi': 10}
@param obj: is a dict
@return: dict filtered
"""
return dict_filter_value(obj, min)
def get_zero_mask(number: int, max_len: int = 3) -> str:
"""
Returns string of numbers formated with zero mask
eg.
>>> get_zero_mask(100, 4)
'0100'
>>> get_zero_mask(101, 4)
'0101'
>>> get_zero_mask(101, 4)
'0101'
"""
return f'%0.{max_len}d' % number
def get_batch_strides(data, kernel_size, strides=1, fill_=True, take_index=False):
"""
Returns batches of fixed window size (kernel_size) with a given stride
@param data: iterable
@param kernel_size: window size
@param strides: default is 1
@param take_index: add number of index on items
@param fill_: padding last batch if it needs
"""
batches = []
for index, item in enumerate(data):
batches.append(item if not take_index else [index, item])
if index % strides == 0 and len(batches) >= kernel_size:
yield batches[:kernel_size]
batches = batches[strides:]
if len(batches):
yield rescale_values(batches, granularity=kernel_size, filling='post') if fill_ else batches
def prune_values(values: Sequence, factor=2):
assert is_indexable(values), TypeError('object is not subscriptable')
if len(values) <= factor:
return values
w = round(len(values) / 2)
k = int(round(w / factor))
res = values[w - k:w + k]
if len(res) == 0:
return values[k]
return res
``` |
{
"source": "jlsneto/hmr",
"score": 3
} |
#### File: src/datasets/common.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities.
Taken from
https://github.com/tensorflow/models/blob/master/inception/inception/data/build_image_data.py
"""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(
image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(
self._decode_jpeg_data, channels=3)
self._encode_jpeg_data = tf.placeholder(dtype=tf.uint8)
self._encode_jpeg = tf.image.encode_jpeg(
self._encode_jpeg_data, format='rgb')
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(
self._decode_png_data, channels=3)
self._encode_png_data = tf.placeholder(dtype=tf.uint8)
self._encode_png = tf.image.encode_png(self._encode_png_data)
def png_to_jpeg(self, image_data):
return self._sess.run(
self._png_to_jpeg, feed_dict={
self._png_data: image_data
})
def decode_jpeg(self, image_data):
image = self._sess.run(
self._decode_jpeg, feed_dict={
self._decode_jpeg_data: image_data
})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def encode_jpeg(self, image):
image_data = self._sess.run(
self._encode_jpeg, feed_dict={
self._encode_jpeg_data: image
})
return image_data
def encode_png(self, image):
image_data = self._sess.run(
self._encode_png, feed_dict={
self._encode_png_data: image
})
return image_data
def decode_png(self, image_data):
image = self._sess.run(
self._decode_png, feed_dict={
self._decode_png_data: image_data
})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list) and not isinstance(value, np.ndarray):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list) and not isinstance(value, np.ndarray):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to_example(image_data, image_path, height, width, label, center):
"""Build an Example proto for an image example.
Args:
image_data: string, JPEG encoding of RGB image;
image_path: string, path to this image file
labels: 3 x 14 joint location + visibility --> This could be 3 x 19
height, width: integers, image shapes in pixels.
center: 2 x 1 center of the tight bbox
Returns:
Example proto
"""
from os.path import basename
image_format = 'JPEG'
add_face = False
if label.shape[1] == 19:
add_face = True
# Split and save facepts on it's own.
face_pts = label[:, 14:]
label = label[:, :14]
feat_dict = {
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/center': int64_feature(center.astype(np.int)),
'image/x': float_feature(label[0, :].astype(np.float)),
'image/y': float_feature(label[1, :].astype(np.float)),
'image/visibility': int64_feature(label[2, :].astype(np.int)),
'image/format': bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': bytes_feature(
tf.compat.as_bytes(basename(image_path))),
'image/encoded': bytes_feature(tf.compat.as_bytes(image_data)),
}
if add_face:
# 3 x 5
feat_dict.update({
'image/face_pts':
float_feature(face_pts.ravel().astype(np.float))
})
example = tf.train.Example(features=tf.train.Features(feature=feat_dict))
return example
def convert_to_example_wmosh(image_data, image_path, height, width, label,
center, gt3d, pose, shape, scale_factors,
start_pt, cam):
"""Build an Example proto for an image example.
Args:
image_data: string, JPEG encoding of RGB image;
image_path: string, path to this image file
labels: 3 x 14 joint location + visibility
height, width: integers, image shapes in pixels.
center: 2 x 1 center of the tight bbox
gt3d: 14x3 3D joint locations
scale_factors: 2 x 1, scale factor used to scale image.
start_pt: the left corner used to crop the _scaled_ image to 300x300
cam: (3,), [f, px, py] intrinsic camera parameters.
Returns:
Example proto
"""
from os.path import basename
image_format = 'JPEG'
if label.shape[0] != 3:
label = label.T
if label.shape[1] > 14:
print('This shouldnt be happening')
import ipdb
ipdb.set_trace()
if pose is None:
has_3d = 0
# Use -1 to save.
pose = -np.ones(72)
shape = -np.ones(10)
else:
has_3d = 1
example = tf.train.Example(
features=tf.train.Features(feature={
'image/height':
int64_feature(height),
'image/width':
int64_feature(width),
'image/center':
int64_feature(center.astype(np.int)),
'image/x':
float_feature(label[0, :].astype(np.float)),
'image/y':
float_feature(label[1, :].astype(np.float)),
'image/visibility':
int64_feature(label[2, :].astype(np.int)),
'image/format':
bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename':
bytes_feature(tf.compat.as_bytes(basename(image_path))),
'image/encoded':
bytes_feature(tf.compat.as_bytes(image_data)),
'mosh/pose':
float_feature(pose.astype(np.float)),
'mosh/shape':
float_feature(shape.astype(np.float)),
'mosh/gt3d':
float_feature(gt3d.ravel().astype(np.float)),
'meta/scale_factors':
float_feature(np.array(scale_factors).astype(np.float)),
'meta/crop_pt':
int64_feature(start_pt.astype(np.int)),
'meta/has_3d':
int64_feature(has_3d),
'image/cam':
float_feature(cam.astype(np.float)),
}))
return example
def resize_img(img, scale_factor):
import cv2
import numpy as np
new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
new_img = cv2.resize(img, (new_size[1], new_size[0]))
# This is scale factor of [height, width] i.e. [y, x]
actual_factor = [
new_size[0] / float(img.shape[0]), new_size[1] / float(img.shape[1])
]
return new_img, actual_factor
def read_images_from_tfrecords(tf_path, img_size=224, sess=None):
"""
Returns image, kp, and gt3d from the tf_paths
This returns a preprocessed image, cropped around img_size.
"""
from time import time
from os.path import exists
if not exists(tf_path):
print('%s doesnt exist!' % tf_path)
exit(1)
if sess is None:
sess = tf.Session()
t0 = time()
all_images, all_kps, all_gt3ds = [], [], []
itr = 0
# Decode op graph
image_data_pl = tf.placeholder(dtype=tf.string)
decode_op = tf.image.decode_jpeg(image_data_pl)
for serialized_ex in tf.python_io.tf_record_iterator(tf_path):
example = tf.train.Example()
example.ParseFromString(serialized_ex)
image_data = example.features.feature['image/encoded'].bytes_list.value[0]
image = sess.run(decode_op, feed_dict={image_data_pl: image_data})
x = example.features.feature['image/x'].float_list.value
y = example.features.feature['image/y'].float_list.value
vis = example.features.feature['image/visibility'].int64_list.value
center = example.features.feature['image/center'].int64_list.value
x = np.array(x)
y = np.array(y)
vis = np.array(vis, dtype='bool')
center = np.array(center)
# Crop img_size.
# Pad in case.
margin = int(img_size/2)
image_pad = np.pad(image, ((margin,), (margin,), (0,)), mode='edge')
# figure out starting point
start_pt = center
end_pt = center + 2*margin
x_crop = x + margin - start_pt[0]
y_crop = y + margin - start_pt[1]
kp_crop = np.vstack([x_crop, y_crop])
kp_final = 2 * (kp_crop / img_size) - 1
kp_final = np.vstack((vis * kp_final, vis)).T
# crop:
crop = image_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
# Normalize image to [-1, 1]
crop = 2 * ((crop / 255.) - 0.5)
# Note: This says mosh but gt3d is the gt H3.6M joints & not from mosh.
gt3d = example.features.feature['mosh/gt3d'].float_list.value
gt3d = np.array(gt3d).reshape(-1, 3)
all_images.append(crop)
all_kps.append(kp_final)
all_gt3ds.append(gt3d)
itr += 1
images = np.stack(all_images)
kps = np.stack(all_kps)
gt3ds = np.stack(all_gt3ds)
print('Read %d images, %g secs' % (images.shape[0], time()-t0))
return images, kps, gt3ds
```
#### File: hmr/src/models.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python.layers.initializers import variance_scaling_initializer
def Encoder_resnet(x, is_training=True, weight_decay=0.001, reuse=False):
"""
Resnet v2-50
Assumes input is [batch, height_in, width_in, channels]!!
Input:
- x: N x H x W x 3
- weight_decay: float
- reuse: bool->True if test
Outputs:
- cam: N x 3
- Pose vector: N x 72
- Shape vector: N x 10
- variables: tf variables
"""
from tensorflow.contrib.slim.python.slim.nets import resnet_v2
with tf.name_scope("Encoder_resnet", values=[x]):
with slim.arg_scope(
resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
net, end_points = resnet_v2.resnet_v2_50(
x,
num_classes=None,
is_training=is_training,
reuse=reuse,
scope='resnet_v2_50')
net = tf.squeeze(net, axis=[1, 2])
variables = tf.contrib.framework.get_variables('resnet_v2_50')
return net, variables
def Encoder_fc3_dropout(x,
num_output=85,
is_training=True,
reuse=False,
name="3D_module"):
"""
3D inference module. 3 MLP layers (last is the output)
With dropout on first 2.
Input:
- x: N x [|img_feat|, |3D_param|]
- reuse: bool
Outputs:
- 3D params: N x num_output
if orthogonal:
either 85: (3 + 24*3 + 10) or 109 (3 + 24*4 + 10) for factored axis-angle representation
if perspective:
86: (f, tx, ty, tz) + 24*3 + 10, or 110 for factored axis-angle.
- variables: tf variables
"""
if reuse:
print('Reuse is on!')
with tf.variable_scope(name, reuse=reuse) as scope:
net = slim.fully_connected(x, 1024, scope='fc1')
net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout1')
net = slim.fully_connected(net, 1024, scope='fc2')
net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout2')
small_xavier = variance_scaling_initializer(
factor=.01, mode='FAN_AVG', uniform=True)
net = slim.fully_connected(
net,
num_output,
activation_fn=None,
weights_initializer=small_xavier,
scope='fc3')
variables = tf.contrib.framework.get_variables(scope)
return net, variables
def get_encoder_fn_separate(model_type):
"""
Retrieves diff encoder fn for image and 3D
"""
encoder_fn = None
threed_fn = None
if 'resnet' in model_type:
encoder_fn = Encoder_resnet
else:
print('Unknown encoder %s!' % model_type)
exit(1)
if 'fc3_dropout' in model_type:
threed_fn = Encoder_fc3_dropout
if encoder_fn is None or threed_fn is None:
print('Dont know what encoder to use for %s' % model_type)
import ipdb
ipdb.set_trace()
return encoder_fn, threed_fn
def Discriminator_separable_rotations(
poses,
shapes,
weight_decay,
):
"""
23 Discriminators on each joint + 1 for all joints + 1 for shape.
To share the params on rotations, this treats the 23 rotation matrices
as a "vertical image":
Do 1x1 conv, then send off to 23 independent classifiers.
Input:
- poses: N x 23 x 1 x 9, NHWC ALWAYS!!
- shapes: N x 10
- weight_decay: float
Outputs:
- prediction: N x (1+23) or N x (1+23+1) if do_joint is on.
- variables: tf variables
"""
data_format = "NHWC"
with tf.name_scope("Discriminator_sep_rotations", values=[poses, shapes]):
with tf.variable_scope("D") as scope:
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], data_format=data_format):
poses = slim.conv2d(poses, 32, [1, 1], scope='D_conv1')
poses = slim.conv2d(poses, 32, [1, 1], scope='D_conv2')
theta_out = []
for i in range(0, 23):
theta_out.append(
slim.fully_connected(
poses[:, i, :, :],
1,
activation_fn=None,
scope="pose_out_j%d" % i))
theta_out_all = tf.squeeze(tf.stack(theta_out, axis=1))
# Do shape on it's own:
shapes = slim.stack(
shapes,
slim.fully_connected, [10, 5],
scope="shape_fc1")
shape_out = slim.fully_connected(
shapes, 1, activation_fn=None, scope="shape_final")
""" Compute joint correlation prior!"""
nz_feat = 1024
poses_all = slim.flatten(poses, scope='vectorize')
poses_all = slim.fully_connected(
poses_all, nz_feat, scope="D_alljoints_fc1")
poses_all = slim.fully_connected(
poses_all, nz_feat, scope="D_alljoints_fc2")
poses_all_out = slim.fully_connected(
poses_all,
1,
activation_fn=None,
scope="D_alljoints_out")
out = tf.concat([theta_out_all,
poses_all_out, shape_out], 1)
variables = tf.contrib.framework.get_variables(scope)
return out, variables
```
#### File: src/util/image.py
```python
import numpy as np
import cv2
def resize_img(img, scale_factor):
new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
new_img = cv2.resize(img, (new_size[1], new_size[0]))
# This is scale factor of [height, width] i.e. [y, x]
actual_factor = [
new_size[0] / float(img.shape[0]), new_size[1] / float(img.shape[1])
]
return new_img, actual_factor
def scale_and_crop(image, scale, center, img_size):
image_scaled, scale_factors = resize_img(image, scale)
# Swap so it's [x, y]
scale_factors = [scale_factors[1], scale_factors[0]]
center_scaled = np.round(center * scale_factors).astype(np.int)
margin = int(img_size / 2)
image_pad = np.pad(
image_scaled, ((margin, ), (margin, ), (0, )), mode='edge')
center_pad = center_scaled + margin
# figure out starting point
start_pt = center_pad - margin
end_pt = center_pad + margin
# crop:
crop = image_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
proc_param = {
'scale': scale,
'start_pt': start_pt,
'end_pt': end_pt,
'img_size': img_size
}
return crop, proc_param
``` |
{
"source": "JLSteenwyk/BioKIT",
"score": 2
} |
#### File: BioKIT/biokit/biokit.py
```python
import logging
import sys
import textwrap
from .version import __version__
from argparse import (
ArgumentParser,
SUPPRESS,
RawDescriptionHelpFormatter,
)
from .services.alignment import (
AlignmentLength,
AlignmentRecoding,
AlignmentSummary,
ConsensusSequence,
ConstantSites,
ParsimonyInformativeSites,
PositionSpecificScoreMatrix,
VariableSites,
)
from .services.coding_sequences import (
GCContentFirstPosition,
GCContentSecondPosition,
GCContentThirdPosition,
GeneWiseRelativeSynonymousCodonUsage,
RelativeSynonymousCodonUsage,
TranslateSequence,
)
from .services.fastq import (
FastQReadLengths,
SubsetPEFastQReads,
SubsetSEFastQReads,
TrimPEAdaptersFastQ,
TrimPEFastQ,
TrimSEAdaptersFastQ,
TrimSEFastQ,
)
from .services.genome import (
GCContent,
GenomeAssemblyMetrics,
L50,
L90,
LongestScaffold,
N50,
N90,
NumberOfLargeScaffolds,
NumberOfScaffolds,
SumOfScaffoldLengths,
)
from .services.text import (
CharacterFrequency,
Faidx,
FileFormatConverter,
MultipleLineToSingleLineFasta,
RemoveFastaEntry,
RemoveShortSequences,
RenameFastaEntries,
ReorderBySequenceLength,
SequenceComplement,
SequenceLength,
SingleLineToMultipleLineFasta,
)
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
help_header = fr"""
____ _ _ _______ _______
| _ \(_) | |/ /_ _|__ __|
| |_) |_ ___ | ' / | | | |
| _ <| |/ _ \| < | | | |
| |_) | | (_) | . \ _| |_ | |
|____/|_|\___/|_|\_\_____| |_|
Version: {__version__}
Citation: Steenwyk et al. 2021, bioRxiv. DOI: 10.1101/2021.10.02.462868
https://www.biorxiv.org/content/10.1101/2021.10.02.462868v1
""" # noqa
translation_table_codes = f"""
Codes for which translation table to use
=====================================================
1. The Standard Code
2. The Vertebrate Mitochondrial Code
3. The Yeast Mitochondrial Code
4. The Mold, Protozoan, and Coelenterate Mitochondrial
Code and the Mycoplasma/Spiroplasma Code
5. The Invertebrate Mitochondrial Code
6. The Ciliate, Dasycladacean and Hexamita Nuclear Code
9. The Echinoderm and Flatworm Mitochondrial Code
10. The Euplotid Nuclear Code
11. The Bacterial, Archaeal and Plant Plastid Code
12. The Alternative Yeast Nuclear Code
13. The Ascidian Mitochondrial Code
14. The Alternative Flatworm Mitochondrial Code
16. Chlorophycean Mitochondrial Code
21. Trematode Mitochondrial Code
22. Scenedesmus obliquus Mitochondrial Code
23. Thraustochytrium Mitochondrial Code
24. Rhabdopleuridae Mitochondrial Code
25. Candidate Division SR1 and Gracilibacteria Code
26. Pachysolen tannophilus Nuclear Code
27. Karyorelict Nuclear Code
28. Condylostoma Nuclear Code
29. Mesodinium Nuclear Code
30. Peritrich Nuclear Code
31. Blastocrithidia Nuclear Code
33. Cephalodiscidae Mitochondrial UAA-Tyr Code
50. CUG-Ala Code
More information about genetic codes can be obtained from NCBI:
https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi?chapter=tgencodes.
The only codon table not described by NCBI is 50, CUG-Ala wherein CUG encodes
for alanine.
""" # noqa
adapters_available = f"""
Adapaters available
=====================================================
NexteraPE-PE
TruSeq2-PE
TruSeq2-SE
TruSeq3-PE-2
TruSeq3-PE
TruSeq3-SE
""" # noqa
class Biokit(object):
help_header = fr"""
____ _ _ _______ _______
| _ \(_) | |/ /_ _|__ __|
| |_) |_ ___ | ' / | | | |
| _ <| |/ _ \| < | | | |
| |_) | | (_) | . \ _| |_ | |
|____/|_|\___/|_|\_\_____| |_|
Version: {__version__}
Citation: Steenwyk et al. 2021, bioRxiv. DOI: 10.1101/2021.10.02.462868
https://www.biorxiv.org/content/10.1101/2021.10.02.462868v1
""" # noqa
def __init__(self):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{self.help_header}
BioKIT is a broadly applicable command-line toolkit for bioinformatics research.
Usage: biokit <command> [optional command arguments]
Command specific help messages can be viewed by adding a
-h/--help argument after the command. For example, to see the
to see the help message for the command 'get_entry', execute
"biokit get_entry -h" or "biokit get_entry --help".
Lastly, each function comes with aliases to save the user some
key strokes. For example, to get the help message for the 'get_entry'
function, you can type "biokit ge -h". All aliases are specified
in parentheses after the long form of the function name.
Commands for alignments
=======================
alignment_length (alias: aln_len)
- calculates the length of an alignment
alignment_recoding (alias: aln_recoding, recode)
- recode alignments using reduced character schemes
alignment_summary (alias: aln_summary)
- calculate summary statistics for an alignment
consensus_sequence (alias: con_seq)
- create a consensus sequence from an alignment
constant_sites (alias: con_sites)
- calculate the number of constant sites in an alignment
parsimony_informative_sites (alias: pi_sites, pis)
- calculate the number of parsimony informative sites in an alignment
position_specific_score_matrix (alias: pssm)
- create a position specific score matrix for an alignment
variable_sites (alias: var_sites, vs)
- calculate the number of variable sites in an alignment
Commands for coding sequences
=============================
gc_content_first_position (alias: gc1)
- calculate the GC content of the first position
among coding sequences
gc_content_second_position (alias: gc2)
- calculate the GC content of the second position
among coding sequences
gc_content_third_position (alias: gc3)
- calculate the GC content of the third position
among coding sequences
gene_wise_relative_synonymous_codon_usage (alias: gene_wise_rscu; gw_rscu; grscu)
- calculates relative synonymous codon usage
that has been adapted for single genes to
assess codon usage bias on individual genes
relative_synonymous_codon_usage (alias: rscu)
- calculate relative synonymous codon usage
to evaluate potential codon usage biases
translate_sequence (alias: translate_seq, trans_seq)
- translate coding sequences to amino acids
Commands for fastq files
========================
fastq_read_lengths (alias: fastq_read_lens)
- determine the lengths of fastq reads
subset_pe_fastq_reads (alias: subset_pe_fastq)
- subset paired-end fastq reads and
maintain pairing information
subset_se_fastq_reads (alias: subset_se_fastq)
- subset single-end fastq reads
trim_pe_adapters_fastq
- trim adapters from paired-end fastq reads
trim_pe_fastq
- quality trim paired-end fastq reads
and maintain pairing information
trim_se_adapters_fastq
- trim adapters from single-end fastq reads
trim_se_fastq
- quality trim single-end fastq reads
Commands for genomes
====================
gc_content (alias: gc)
- calculate the GC content of a FASTA file
genome_assembly_metrics (alias: assembly_metrics)
- calculate various genome assembly metrics
l50
- calculate the L50 of a genome assembly
l90
- calcualte the L90 of a genome assembly
longest_scaffold (alias: longest_scaff, longest_contig, longest_cont)
- determine the length of the longest
scaffold of a genome assembly
n50
- calculate the N50 of a genome assembly
n90
- calculate the N90 of a genome assembly
number_of_scaffolds (alias: num_of_scaffolds, number_of_contigs, num_of_cont)
- calculate the number of scaffolds in a
genome assembly
number_of_large_scaffolds (alias: num_of_lrg_scaffolds, number_of_large_contigs, num_of_lrg_cont)
- calculate the number of large scaffolds
sum_of_scaffold_lengths (alias: sum_of_contig_lengths)
- calculate sum of scaffold/contig lengths
Commands for sequence files
===========================
character_frequency (alias: char_freq)
- determine the frequency of all observed characters
faidx (alias: get_entry; ge)
- extract query fasta entry from multi-fasta file
file_format_converter (alias: format_converter; ffc)
- convert a multiple sequence file from one format
to another
multiple_line_to_single_line_fasta (alias: ml2sl)
- reformats sequences that occur on multiple
lines to be represented in a single line
remove_short_sequences (alias: remove_short_seqs)
- remove short sequences from a FASTA file
remove_fasta_entry
- remove entry in a FASTA file
rename_fasta_entries (alias: rename_fasta)
- rename entries in a FASTA file
reorder_by_sequence_length (alias: reorder_by_seq_len)
- reorder sequences from longest to shortest in a FASTA file
sequence_complement (alias: seq_comp)
- generate the complementary sequence for an alignment
sequence_length (alias: seq_len)
- calculate the length of each FASTA entry
single_line_to_multiple_line_fasta (alias: sl2ml)
- reformats sequences so that there are 60
characters per sequence line
""" # noqa
),
)
parser.add_argument("command", help=SUPPRESS)
args = parser.parse_args(sys.argv[1:2])
# if command is part of the possible commands (i.e., the long form
# commands, run). Otherwise, assume it is an alias and look to the
# run_alias function
try:
if hasattr(self, args.command):
getattr(self, args.command)(sys.argv[2:])
else:
self.run_alias(args.command, sys.argv[2:], parser)
except NameError as e:
print(e)
sys.exit()
# aliases # noqa
def run_alias(self, command, argv, parser):
# version
if command in ["v"]:
return self.version()
# aliases for alignments
elif command in ["aln_len"]:
return self.alignment_length(argv)
elif command in ["aln_recoding", "recode"]:
return self.alignment_recoding(argv)
elif command in ["aln_summary"]:
return self.alignment_summary(argv)
elif command in ["con_seq"]:
return self.consensus_sequence(argv)
elif command in ["con_sites"]:
return self.constant_sites(argv)
elif command in ["parsimony_informative_sites", "pi_sites", "pis"]:
return self.parsimony_informative_sites(argv)
elif command in ["pssm"]:
return self.position_specific_score_matrix(argv)
elif command in ["var_sites", "vs"]:
return self.variable_sites(argv)
# aliases for coding sequences
elif command in ["gc1"]:
return self.gc_content_first_position(argv)
elif command in ["gc2"]:
return self.gc_content_second_position(argv)
elif command in ["gc3"]:
return self.gc_content_third_position(argv)
elif command in ["gene_wise_rscu", "gw_rscu", "grscu"]:
return self.gene_wise_relative_synonymous_codon_usage(argv)
elif command in ["rscu"]:
return self.relative_synonymous_codon_usage(argv)
elif command in ["translate_seq", "trans_seq"]:
return self.translate_sequence(argv)
# aliases for fastq files
elif command in ["fastq_read_lens"]:
return self.fastq_read_lengths(argv)
elif command in ["subset_pe_fastq"]:
return self.subset_pe_fastq_reads(argv)
elif command in ["subset_se_fastq"]:
return self.subset_se_fastq_reads(argv)
elif command in ["trim_pe_adapters_fastq_reads"]:
return self.trim_pe_adapters_fastq(argv)
elif command in ["trim_pe_fastq_reads"]:
return self.trim_pe_fastq(argv)
elif command in ["trim_se_adapters_fastq_reads"]:
return self.trim_se_adapters_fastq(argv)
elif command in ["trim_se_fastq_reads"]:
return self.trim_se_fastq(argv)
# aliases for genomes
elif command in ["gc"]:
return self.gc_content(argv)
elif command in ["assembly_metrics"]:
return self.genome_assembly_metrics(argv)
elif command in ["longest_scaff", "longest_contig", "longest_cont"]:
return self.longest_scaffold(argv)
elif command in [
"num_of_lrg_scaffolds",
"number_of_large_contigs",
"num_of_lrg_cont",
]:
return self.number_of_large_scaffolds(argv)
elif command in ["num_of_scaffolds", "number_of_contigs", "num_of_cont"]:
return self.number_of_scaffolds(argv)
elif command in ["sum_of_contig_lengths"]:
return self.sum_of_scaffold_lengths(argv)
# alias for sequence files
elif command in ["char_freq"]:
return self.character_frequency(argv)
elif command in ["get_entry", "ge"]:
return self.faidx(argv)
elif command in ["format_converter", "ffc"]:
return self.file_format_converter(argv)
elif command in ["ml2sl"]:
return self.multiple_line_to_single_line_fasta(argv)
elif command in ["remove_short_seqs"]:
return self.remove_short_sequences(argv)
elif command in ["rename_fasta"]:
return self.rename_fasta_entries(argv)
elif command in ["reorder_by_seq_len"]:
return self.reorder_by_sequence_length(argv)
elif command in ["seq_comp"]:
return self.sequence_complement(argv)
elif command in ["seq_len"]:
return self.sequence_length(argv)
elif command in ["sl2ml"]:
return self.single_line_to_multiple_line_fasta(argv)
else:
print(
"Invalid command option. See help for a complete list of commands and aliases."
)
parser.print_help()
sys.exit(1)
# print version
def version(self):
print(
textwrap.dedent(
f"""\
{self.help_header}
"""
)
)
# alignment functions
@staticmethod
def alignment_length(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate the length of an alignment.
Aliases:
alignment_length, aln_len
Command line interfaces:
bk_alignment_length, bk_aln_len
Usage:
biokit alignment_length <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
AlignmentLength(args).run()
@staticmethod
def alignment_recoding(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Recode alignments using reduced character states.
Alignments can be recoded using established or
custom recoding schemes. Recoding schemes are
specified using the -c/--code argument. Custom
recoding schemes can be used and should be formatted
as a two column file wherein the first column is the
recoded character and the second column is the character
in the alignment.
Aliases:
alignment_recoding, aln_recoding, recode
Command line interfaces:
bk_alignment_recoding, bk_aln_recoding, bk_recode
Usage:
biokit alignment_recoding <fasta> -c/--code <code>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-c/--code recoding scheme to use
Codes for which recoding scheme to use
=====================================================
RY-nucleotide
R = purines (i.e., A and G)
Y = pyrimidines (i.e., T and C)
Dayhoff-6
0 = A, G, P, S, and T
1 = D, E, N, and Q
2 = H, K, and R
3 = I, L, M, and V
4 = F, W, and Y
5 = C
SandR-6
0 = A, P, S, and T
1 = D, E, N, and G
2 = Q, K, and R
3 = M, I, V, and L
4 = W and C
5 = F, Y, and H
KGB-6
0 = A, G, P, and S
1 = D, E, N, Q, H, K, R, and T
2 = M, I, and L
3 = W
4 = F and Y
5 = C and V
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-c", "--code", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
AlignmentRecoding(args).run()
@staticmethod
def alignment_summary(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Summary statistics for an alignment. Reported
statistics include alignment length, number of taxa,
number of parsimony sites, number of variable sites,
number of constant sites, frequency of each character
(including gaps, which are considered to be '-' or '?').
Aliases:
alignment_summary, aln_summary
Command line interfaces:
bk_alignment_summary, bk_aln_summary
Usage:
biokit alignment_summary <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
AlignmentSummary(args).run()
@staticmethod
def consensus_sequence(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Generates a consequence from a multiple sequence alignment
file in FASTA format.
Aliases:
consensus_sequence, con_seq
Command line interfaces:
bk_consensus_sequence, bk_con_seq
Usage:
biokit consensus_sequence <fasta> [-t/--threshold <threshold>
-ac/--ambiguous_character <ambiguous character>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-t/--threshold threshold for how common
a residue must be to be
represented
-ac/--ambiguous_character the ambiguity character to
use. Default is 'N'
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-t", "--threshold", type=str, help=SUPPRESS)
parser.add_argument("-ac", "--ambiguous_character", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
ConsensusSequence(args).run()
@staticmethod
def constant_sites(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate the number of constant sites in an
alignment.
Constant sites are defined as a site in an
alignment with the same nucleotide or amino
acid sequence (excluding gaps) among all taxa.
Aliases:
constant_sites, con_sites
Command line interfaces:
bk_constant_sites, bk_con_sites
Usage:
biokit constant_sites <fasta> [-v/--verbose]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-v/--verbose optional argument to print
site-by-site categorization
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-v", "--verbose", action="store_true", required=False, help=SUPPRESS)
args = parser.parse_args(argv)
ConstantSites(args).run()
@staticmethod
def parsimony_informative_sites(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate the number of parsimony informative
sites in an alignment.
Parsimony informative sites are defined as a
site in an alignment with at least two nucleotides
or amino acids that occur at least twice.
To obtain site-by-site summary of an alignment,
use the -v/--verbose option.
Aliases:
parsimony_informative_sites, pi_sites, pis
Command line interfaces:
bk_parsimony_informative_sites, bk_pi_sites, bk_pis
Usage:
biokit parsimony_informative_sites <fasta> [-v/--verbose]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-v/--verbose optional argument to print
site-by-site categorization
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-v", "--verbose", action="store_true", required=False, help=SUPPRESS)
args = parser.parse_args(argv)
ParsimonyInformativeSites(args).run()
@staticmethod
def position_specific_score_matrix(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Generates a position specific score matrix for an alignment.
Aliases:
position_specific_score_matrix, pssm
Command line interfaces:
bk_position_specific_score_matrix, bk_pssm
Usage:
biokit position_specific_score_matrix <fasta>
[-ac/--ambiguous_character <ambiguous character>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-ac/--ambiguous_character the ambiguity character to
use. Default is 'N'
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-ac", "--ambiguous_character", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
PositionSpecificScoreMatrix(args).run()
@staticmethod
def variable_sites(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate the number of variable sites in an
alignment.
Variable sites are defined as a site in an
alignment with at least two nucleotide or amino
acid characters among all taxa.
Aliases:
variable_sites, var_sites, vs
Command line interfaces:
bk_variable_sites, bk_var_sites, bk_vs
Usage:
biokit variable_sites <fasta> [-v/--verbose]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-v/--verbose optional argument to print
site-by-site categorization
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-v", "--verbose", action="store_true", required=False, help=SUPPRESS)
args = parser.parse_args(argv)
VariableSites(args).run()
# coding sequence functions
@staticmethod
def gc_content_first_position(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate GC content of the first codon position.
The input must be the coding sequence of a gene or
genes. All genes are assumed to have sequence lengths
divisible by three.
Aliases:
gc_content_first_position, gc1
Command line interfaces:
bk_gc_content_first_position, bk_gc1
Usage:
biokit gc_content_first_position <fasta> [-v/--verbose]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-v, --verbose optional argument to print
the GC content of each fasta
entry
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument(
"-v", "--verbose", action="store_true", required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
GCContentFirstPosition(args).run()
@staticmethod
def gc_content_second_position(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate GC content of the second codon position.
The input must be the coding sequence of a gene or
genes. All genes are assumed to have sequence lengths
divisible by three.
Aliases:
gc_content_second_position, gc2
Command line interfaces:
bk_gc_content_second_position, bk_gc2
Usage:
biokit gc_content_second_position <fasta> [-v/--verbose]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-v, --verbose optional argument to print
the GC content of each fasta
entry
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument(
"-v", "--verbose", action="store_true", required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
GCContentSecondPosition(args).run()
@staticmethod
def gc_content_third_position(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate GC content of the third codon position.
The input must be the coding sequence of a gene or
genes. All genes are assumed to have sequence lengths
divisible by three.
Aliases:
gc_content_third_position, gc3
Command line interfaces:
bk_gc_content_third_position, bk_gc3
Usage:
biokit gc_content_third_position <fasta> [-v/--verbose]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-v, --verbose optional argument to print
the GC content of each fasta
entry
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument(
"-v", "--verbose", action="store_true", required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
GCContentThirdPosition(args).run()
@staticmethod
def gene_wise_relative_synonymous_codon_usage(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate gene-wise relative synonymous codon usage (gw-RSCU).
Codon usage bias examines biases for codon usage of
a particular gene. We adapted RSCU to be applied to
individual genes rather than only codons. Specifically,
gw-RSCU is the mean (or median) RSCU value observed
in a particular gene. This provides insight into how
codon usage bias influences codon usage for a particular
gene. This function also outputs the standard deviation
of RSCU values for a given gene.
The output is col 1: the gene identifier; col 2: the
gw-RSCU based on the mean RSCU value observed in a gene;
col 3: the gw-RSCU based on the median RSCU value observed
in a gene; and the col 4: the standard deviation of
RSCU values observed in a gene.
Custom genetic codes can be used as input and should
be formatted with the codon in first column and the
resulting amino acid in the second column.
Aliases:
gene_wise_relative_synonymous_codon_usage; gene_wise_rscu; gw_rscu; grscu
Command line interfaces:
bk_gene_wise_relative_synonymous_codon_usage; bk_gene_wise_rscu; bk_gw_rscu; bk_grscu
Usage:
biokit gene_wise_relative_synonymous_codon_usage <fasta>
[-tt/--translation_table <code>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-tt/--translation_table Code for the translation table
to be used. Default: 1, which
is the standard code.
{translation_table_codes}
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument(
"-tt", "--translation_table", type=str, required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
GeneWiseRelativeSynonymousCodonUsage(args).run()
@staticmethod
def relative_synonymous_codon_usage(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate relative synonymous codon usage.
Relative synonymous codon usage is the ratio
of the observed frequency of codons over the
expected frequency given that all the synonymous
codons for the same amino acids are used equally.
Custom genetic codes can be used as input and should
be formatted with the codon in first column and the
resulting amino acid in the second column.
Aliases:
relative_synonymous_codon_usage, rscu
Command line interfaces:
bk_relative_synonymous_codon_usage, bk_rscu
Usage:
biokit relative_synonymous_codon_usage <fasta>
[-tt/--translation_table <code>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-tt/--translation_table Code for the translation table
to be used. Default: 1, which
is the standard code.
{translation_table_codes}
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument(
"-tt", "--translation_table", type=str, required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
RelativeSynonymousCodonUsage(args).run()
@staticmethod
def translate_sequence(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Translates coding sequences to amino acid
sequences. Sequences can be translated using
diverse genetic codes. For codons that can
encode two amino acids (e.g., TAG encodes
Glu or STOP in the Blastocrithidia Nuclear Code),
the standard genetic code is used.
Custom genetic codes can be used as input and should
be formatted with the codon in first column and the
resulting amino acid in the second column.
Aliases:
translate_sequence, translate_seq, trans_seq
Command line interfaces:
bk_translate_sequence, bk_translate_seq, bk_trans_seq
Usage:
biokit translate_sequence <fasta> [-tt/--translation_table <code>
-o/--output <output_file>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-tt/--translation_table Code for the translation table
to be used. Default: 1, which
is the standard code.
-o/--output optional argument to write
the translated fasta file to.
Default output has the same
name as the input file with
the suffix ".translated.fa"
added to it.
{translation_table_codes}
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument(
"-tt", "--translation_table", type=str, required=False, help=SUPPRESS
)
parser.add_argument("-o", "--output", type=str, required=False, help=SUPPRESS)
args = parser.parse_args(argv)
TranslateSequence(args).run()
# fastq file functions
@staticmethod
def fastq_read_lengths(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Determine lengths of fastq reads.
Using default arguments, the average and
standard deviation of read lengths in a
fastq file will be reported. To obtain
the lengths of all fastq reads, use the
verbose option.
Aliases:
fastq_read_lengths, fastq_read_lens
Command line interfaces:
bk_fastq_read_lengths, bk_fastq_read_lens
Usage:
biokit fastq_read_lengths <fastq> [-v/--verbose]
Options
=====================================================
<fastq> first argument after
function name should be
a fastq file
-v/--verbose print length of each fastq
read
""" # noqa
),
)
parser.add_argument("fastq", type=str, help=SUPPRESS)
parser.add_argument(
"-v", "--verbose", action="store_true", required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
FastQReadLengths(args).run()
@staticmethod
def subset_pe_fastq_reads(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Subset paired-end FASTQ data.
Subsetting FASTQ data may be helpful for
running test scripts or achieving equal
coverage between samples. A percentage of
total reads in paired-end FASTQ data can
be obtained with this function. Random
subsamples are obtained using seeds for
reproducibility. If no seed is specified,
a seed is generated based off of the date
and time.
Output files will have the suffice "_subset.fq"
Aliases:
subset_pe_fastq_reads, subset_pe_fastq
Command line interfaces:
bk_subset_pe_fastq_reads, bk_subset_pe_fastq
Usage:
biokit subset_pe_fastq_reads <fastq1> <fastq2>
[-p/--percent <percent> -s/--seed <seed>]
Options
=====================================================
<fastq1> first argument after
function name should be
a fastq file
<fastq2> second argument after
function name should be
a fastq file
-p/--percent percentage of reads to
maintain in subsetted data.
Default: 10
-s/--seed seed for random sampling.
Default: date and time
""" # noqa
),
)
parser.add_argument("fastq1", type=str, help=SUPPRESS)
parser.add_argument("fastq2", type=str, help=SUPPRESS)
parser.add_argument("-p", "--percent", type=str, required=False, help=SUPPRESS)
parser.add_argument("-s", "--seed", type=str, required=False, help=SUPPRESS)
args = parser.parse_args(argv)
SubsetPEFastQReads(args).run()
@staticmethod
def subset_se_fastq_reads(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Subset single-end FASTQ data.
Output file will have the suffice "_subset.fq"
Aliases:
subset_se_fastq_reads, subset_se_fastq
Command line interfaces:
bk_subset_se_fastq_reads, bk_subset_se_fastq
Usage:
biokit subset_se_fastq_reads <fastq>
Options
=====================================================
<fastq> first argument after
function name should be
a fastq file
-p/--percent percentage of reads to
maintain in subsetted data.
Default: 10
-s/--seed seed for random sampling.
Default: date and time
-o/--output_file output file name
""" # noqa
),
)
parser.add_argument("fastq", type=str, help=SUPPRESS)
parser.add_argument("-p", "--percent", type=str, required=False, help=SUPPRESS)
parser.add_argument("-s", "--seed", type=str, required=False, help=SUPPRESS)
parser.add_argument(
"-o", "--output_file", type=str, required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
SubsetSEFastQReads(args).run()
@staticmethod
def trim_pe_adapters_fastq(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Trim adapters from paired-end FastQ data.
FASTQ data will be trimmed according to
exact match to known adapter sequences.
Output file has the suffix "_adapter_removed.fq"
or can be named by the user with the
output_file argument.
Aliases:
trim_pe_adapters_fastq_reads, trim_pe_adapters_fastq
Command line interfaces:
bk_trim_pe_adapters_fastq_reads, bk_trim_pe_adapters_fastq
Usage:
biokit trim_pe_adapters_fastq <fastq>
[-a/--adapters TruSeq2-PE -l/--length 20]
Options
=====================================================
<fastq> first argument after
function name should be
a fastq file
-a/--adapters adapter sequences to trim.
Default: TruSeq2-PE
-l/--length minimum length of read
to be kept. Default: 20
{adapters_available}
""" # noqa
),
)
parser.add_argument("fastq1", type=str, help=SUPPRESS)
parser.add_argument("fastq2", type=str, help=SUPPRESS)
parser.add_argument("-a", "--adapters", type=str, required=False, help=SUPPRESS)
parser.add_argument("-l", "--length", type=str, required=False, help=SUPPRESS)
args = parser.parse_args(argv)
TrimPEAdaptersFastQ(args).run()
@staticmethod
def trim_pe_fastq(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Quality trim paired-end FastQ data.
FASTQ data will be trimmed according to
quality score and length of the reads.
Specifically, the program will iterate
over a read and once a base with a quality
below quality threshold, the remainder
of the read will be trimmed. Thereafter,
the read is ensured to be long enough to kept.
Users can specify quality and length thresholds.
Paired reads that are maintained and saved
to files with the suffix "_paired_trimmed.fq."
Single reads that passed quality thresholds
are saved to files with the suffix
"_unpaired_trimmed.fq."
Aliases:
trim_pe_fastq_reads, trim_pe_fastq
Command line interfaces:
bk_trim_pe_fastq_reads, bk_trim_pe_fastq
Usage:
biokit trim_pe_fastq_reads <fastq1> <fastq2>
[-m/--minimum 20 -l/--length 20]
Options
=====================================================
<fastq1> first argument after
function name should be
a fastq file
<fastq2> second argument after
function name should be
a fastq file
-m/--minimum minimum quality of read
to be kept. Default: 20
-l/--length minimum length of read
to be kept. Default: 20
""" # noqa
),
)
parser.add_argument("fastq1", type=str, help=SUPPRESS)
parser.add_argument("fastq2", type=str, help=SUPPRESS)
parser.add_argument("-m", "--minimum", type=str, required=False, help=SUPPRESS)
parser.add_argument("-l", "--length", type=str, required=False, help=SUPPRESS)
args = parser.parse_args(argv)
TrimPEFastQ(args).run()
@staticmethod
def trim_se_adapters_fastq(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Trim adapters from single-end FastQ data.
FASTQ data will be trimmed according to
exact match to known adapter sequences.
Output file has the suffix "_adapter_removed.fq"
or can be named by the user with the
output_file argument.
Aliases:
trim_se_adapters_fastq_reads, trim_se_adapters_fastq
Command line interfaces:
bk_trim_se_adapters_fastq_reads, bk_trim_se_adapters_fastq
Usage:
biokit trim_se_adapters_fastq <fastq>
[-a/--adapters TruSeq2-SE -l/--length 20]
Options
=====================================================
<fastq> first argument after
function name should be
a fastq file
-a/--adapters adapter sequences to trim.
Default: TruSeq2-SE
-l/--length minimum length of read
to be kept. Default: 20
-o/--output_file output file name
{adapters_available}
""" # noqa
),
)
parser.add_argument("fastq", type=str, help=SUPPRESS)
parser.add_argument("-a", "--adapters", type=str, required=False, help=SUPPRESS)
parser.add_argument("-l", "--length", type=str, required=False, help=SUPPRESS)
parser.add_argument(
"-o", "--output_file", type=str, required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
TrimSEAdaptersFastQ(args).run()
@staticmethod
def trim_se_fastq(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Quality trim single-end FastQ data.
FASTQ data will be trimmed according to
quality score and length of the reads.
Specifically, the program will iterate
over a read and once a base with a quality
below quality threshold, the remainder
of the read will be trimmed. Thereafter,
the read is ensured to be long enough to kept.
Users can specify quality and length thresholds.
Output file has the suffix "_trimmed.fq"
or can be named by the user with the
output_file argument.
Aliases:
trim_se_fastq_reads, trim_se_fastq
Command line interfaces:
bk_trim_se_fastq_reads, bk_trim_se_fastq
Usage:
biokit trim_se_fastq_reads <fastq>
[-m/--minimum 20 -l/--length 20]
Options
=====================================================
<fastq> first argument after
function name should be
a fastq file
-m/--minimum minimum quality of read
to be kept. Default: 20
-l/--length minimum length of read
to be kept. Default: 20
-o/--output_file output file name
""" # noqa
),
)
parser.add_argument("fastq", type=str, help=SUPPRESS)
parser.add_argument("-m", "--minimum", type=str, required=False, help=SUPPRESS)
parser.add_argument("-l", "--length", type=str, required=False, help=SUPPRESS)
parser.add_argument(
"-o", "--output_file", type=str, required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
TrimSEFastQ(args).run()
# genome functions
@staticmethod
def gc_content(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate GC content of a fasta file.
GC content is the fraction of bases that are
either guanines or cytosines.
Aliases:
gc_content, gc
Command line interfaces:
bk_gc_content, bk_gc
Usage:
biokit gc_content <fasta> [-v/--verbose]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-v, --verbose optional argument to print
the GC content of each fasta
entry
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument(
"-v", "--verbose", action="store_true", required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
GCContent(args).run()
@staticmethod
def genome_assembly_metrics(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate L50, L90, N50, N90, GC content, assembly size,
number of scaffolds, number and sum length
of large scaffolds, frequency of A, T, C, and G.
L50: The smallest number of contigs whose length sum makes up half of the genome size.
L90: The smallest number of contigs whose length sum makes up 90% of the genome size.
N50: The sequence length of the shortest contig at half of the genome size.
N90: The sequence length of the shortest contig at 90% of the genome size.
GC content: The fraction of bases that are either guanines or cytosines.
Assembly size: The sum length of all contigs in an assembly.
Number of scaffolds: The total number of scaffolds in an assembly.
Number of large scaffolds: The total number of scaffolds that are greater than the threshold for small scaffolds.
Sum length of large scaffolds: The sum length of all large scaffolds.
Frequency of A: The number of occurences of A corrected by assembly size.
Frequency of T: The number of occurences of T corrected by assembly size.
Frequency of C: The number of occurences of C corrected by assembly size.
Frequency of G: The number of occurences of G corrected by assembly size.
Aliases:
genome_assembly_metrics, assembly_metrics
Command line interfaces:
bk_genome_assembly_metrics, bk_assembly_metrics
Usage:
biokit genome_assembly_metrics <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-t/--threshold threshold for what is considered
a large scaffold. Only scaffolds
with a length greater than this
value will be counted.
Default: 500
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-t", "--threshold", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
GenomeAssemblyMetrics(args).run()
@staticmethod
def l50(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculates L50 for a genome assembly.
L50 is the smallest number of contigs whose length sum
makes up half of the genome size.
Aliases:
l50
Command line interfaces:
bk_l50
Usage:
biokit l50 <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
L50(args).run()
@staticmethod
def l90(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculates L90 for a genome assembly.
L90 is the smallest number of contigs whose length sum
makes up 90% of the genome size.
Aliases:
l90
Command line interfaces:
bk_l90
Usage:
biokit l90 <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
L90(args).run()
@staticmethod
def longest_scaffold(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Determine the length of the longest scaffold in a genome assembly.
Aliases:
longest_scaffold, longest_scaff, longest_contig, longest_cont
Command line interfaces:
bk_longest_scaffold, bk_longest_scaff, bk_longest_contig, bk_longest_cont
Usage:
biokit longest_scaffold <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
LongestScaffold(args).run()
@staticmethod
def n50(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculates N50 for a genome assembly.
N50 is the sequence length of the shortest contig at half of the genome size.
Aliases:
n50
Command line interfaces:
bk_n50
Usage:
biokit n50 <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
N50(args).run()
@staticmethod
def n90(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculates N90 for a genome assembly.
N90 is the sequence length of the shortest contig at 90% of the genome size.
Aliases:
n90
Command line interfaces:
bk_n90
Usage:
biokit n90 <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
N90(args).run()
@staticmethod
def number_of_large_scaffolds(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate number and total sequence length of
large scaffolds. Each value is represented as
column 1 and column 2 in the output, respectively.
Aliases:
number_of_large_scaffolds, num_of_lrg_scaffolds,
number_of_large_contigs, num_of_lrg_cont
Command line interfaces:
bk_number_of_large_scaffolds, bk_num_of_lrg_scaffolds,
bk_number_of_large_contigs, bk_num_of_lrg_cont
Usage:
biokit number_of_large_scaffolds <fasta> [-t/--threshold <int>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-t/--threshold threshold for what is considered
a large scaffold. Only scaffolds
with a length greater than this
value will be counted.
Default: 500
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-t", "--threshold", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
NumberOfLargeScaffolds(args).run()
@staticmethod
def number_of_scaffolds(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate the number of scaffolds or entries
in a FASTA file. In this way, a user can also
determine the number of predicted genes in a
coding sequence or protein FASTA file with this
function.
Aliases:
number_of_scaffolds, num_of_scaffolds,
number_of_contigs, num_of_cont
Command line interfaces:
bk_number_of_scaffolds, bk_num_of_scaffolds,
bk_number_of_contigs, bk_num_of_cont
Usage:
biokit number_of_scaffolds <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
NumberOfScaffolds(args).run()
@staticmethod
def sum_of_scaffold_lengths(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Determine the sum of scaffold lengths.
The intended use of this function is to determine
the length of a genome assembly, but can also be
used, for example, to determine the sum length
of all coding sequences.
Aliases:
sum_of_scaffold_lengths, sum_of_contig_lengths
Command line interfaces:
bk_sum_of_scaffold_lengths, bk_sum_of_contig_lengths
Usage:
biokit sum_of_scaffold_lengths <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
SumOfScaffoldLengths(args).run()
# text functions
@staticmethod
def character_frequency(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate the frequency of characters in a FASTA file.
Aliases:
character_frequency, char_freq
Command line interfaces:
bk_character_frequency, bk_char_freq
Usage:
biokit character_frequency <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
CharacterFrequency(args).run()
@staticmethod
def faidx(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Extracts sequence entry from fasta file.
This function works similarly to the faidx function
in samtools, but does not requiring an indexing the
sequence file.
Aliases:
faidx, get_entry, ge
Command line interfaces:
bk_faidx, bk_get_entry, bk_ge
Usage:
biokit faidx <fasta> -e/--entry <fasta entry>
Options
=====================================================
<fasta> first argument after
function name should be a
query fasta file
-e/--entry entry name to be extracted
from the inputted fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-e", "--entry", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
Faidx(args).run()
@staticmethod
def file_format_converter(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Converts a multiple sequence file from one format to another.
Acceptable file formats include FASTA, Clustal, MAF, Mauve,
Phylip, Phylip-sequential, Phylip-relaxed, and Stockholm.
Input and output file formats are specified with the
--input_file_format and --output_file_format arguments; input
and output files are specified with the --input_file and
--output_file arguments.
Aliases:
file_format_converter, format_converter, ffc
Command line interfaces:
bk_file_format_converter, bk_format_converter, bk_ffc
Usage:
biokit file_format_converter -i/--input_file <input_file>
-iff/--input_file_format <input_file_format>
-o/--output_file <output_file>
-off/--output_file_format <output_file_format>
Options
=====================================================
-i/--input_file input file name
-iff/--input_file_format input file format
-o/--output_file output file name
-off/--output_file_format output file format
Input and output file formats are specified using one of
the following strings: fasta, clustal, maf, mauve, phylip,
phylip_sequential, phylip_relaxed, & stockholm.
""" # noqa
),
)
parser.add_argument("-i", "--input_file", type=str, help=SUPPRESS)
parser.add_argument("-off", "--output_file_format", type=str, help=SUPPRESS)
parser.add_argument("-iff", "--input_file_format", type=str, help=SUPPRESS)
parser.add_argument("-o", "--output_file", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
FileFormatConverter(args).run()
@staticmethod
def multiple_line_to_single_line_fasta(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Converts FASTA files with multiple lines
per sequence to a FASTA file with the sequence
represented on one line.
Aliases:
multiple_line_to_single_line_fasta, ml2sl
Command line interfaces:
bk_multiple_line_to_single_line_fasta, bk_ml2sl
Usage:
biokit multiple_line_to_single_line_fasta <fasta>
[-o/--output <output_file>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-o/--output optional argument to name
the output file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
MultipleLineToSingleLineFasta(args).run()
@staticmethod
def remove_fasta_entry(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Remove FASTA entry from multi-FASTA file.
Output will have the suffix "pruned.fa" unless
the user specifies a different output file name.
Aliases:
remove_fasta_entry
Command line interfaces:
bk_remove_fasta_entry
Usage:
biokit remove_fasta_entry <fasta> -e/--entry <entry>
[-o/--output <output_file>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-e/--entry entry name to be removed
from the inputted fasta file
-o/--output optional argument to write
the renamed fasta file to.
Default output has the same
name as the input file with
the suffix "pruned.fa" added
to it.
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-e", "--entry", type=str, help=SUPPRESS)
parser.add_argument("-o", "--output", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
RemoveFastaEntry(args).run()
@staticmethod
def remove_short_sequences(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Remove short sequences from a multi-FASTA file.
Short sequences are defined as having a length
less than 500. Users can specify their own threshold.
All sequences greater than the threshold will be
kept in the resulting file.
Output will have the suffix "long_seqs.fa" unless
the user specifies a different output file name.
Aliases:
remove_short_sequences; remove_short_seqs
Command line interfaces:
bk_remove_short_sequences; bk_remove_short_seqs
Usage:
biokit remove_short_sequences <fasta> -t/--threshold
<threshold> [-o/--output <output_file>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-t/--threshold threshold for short sequences.
Sequences greater than this
value will be kept
-o/--output optional argument to write
the renamed fasta file to.
Default output has the same
name as the input file with
the suffix "long_seqs.fa" added
to it.
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-t", "--threshold", type=str, help=SUPPRESS)
parser.add_argument("-o", "--output", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
RemoveShortSequences(args).run()
@staticmethod
def rename_fasta_entries(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Renames fasta entries.
Renaming fasta entries will follow the scheme of a tab-delimited
file wherein the first column is the current fasta entry name and
the second column is the new fasta entry name in the resulting
output alignment.
Aliases:
rename_fasta_entries, rename_fasta
Command line interfaces:
bk_rename_fasta_entries, bk_rename_fasta
Usage:
biokit rename_fasta_entries <fasta> -i/--idmap <idmap>
[-o/--output <output_file>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-i/--idmap identifier map of current FASTA
names (col1) and desired FASTA
names (col2)
-o/--output optional argument to write
the renamed fasta file to.
Default output has the same
name as the input file with
the suffix ".renamed.fa" added
to it.
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-i", "--idmap", type=str, help=SUPPRESS)
parser.add_argument("-o", "--output", type=str, required=False, help=SUPPRESS)
args = parser.parse_args(argv)
RenameFastaEntries(args).run()
@staticmethod
def reorder_by_sequence_length(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Reorder FASTA file entries from the longest entry
to the shortest entry.
Aliases:
reorder_by_sequence_length, reorder_by_seq_len
Command line interfaces:
bk_reorder_by_sequence_length, bk_reorder_by_seq_len
Usage:
biokit reorder_by_sequence_length <fasta> [-o/--output <output_file>]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-o/--output optional argument to write
the reordered fasta file to.
Default output has the same
name as the input file with
the suffix ".reordered.fa" added
to it.
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument("-o", "--output", type=str, required=False, help=SUPPRESS)
args = parser.parse_args(argv)
ReorderBySequenceLength(args).run()
@staticmethod
def sequence_complement(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Generates the sequence complement for all entries
in a multi-FASTA file. To generate a reverse sequence
complement, add the -r/--reverse argument.
Aliases:
sequence_complement, seq_comp
Command line interfaces:
bk_sequence_complement, bk_seq_comp
Usage:
biokit sequence_complement <fasta> [-r/--reverse]
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
-r/--reverse if used, the reverse complement
sequence will be generated
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
parser.add_argument(
"-r", "--reverse", action="store_true", required=False, help=SUPPRESS
)
args = parser.parse_args(argv)
SequenceComplement(args).run()
@staticmethod
def sequence_length(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Calculate sequence length of each FASTA entry.
Aliases:
sequence_length, seq_len
Command line interfaces:
bk_sequence_length, bk_seq_len
Usage:
biokit sequence_length <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
SequenceLength(args).run()
@staticmethod
def single_line_to_multiple_line_fasta(argv):
parser = ArgumentParser(
add_help=True,
usage=SUPPRESS,
formatter_class=RawDescriptionHelpFormatter,
description=textwrap.dedent(
f"""\
{help_header}
Converts FASTA files with single lines per
sequence to a FASTA file with the sequence
on multiple lines. Each line with have 60
characters following standard NCBI format.
Aliases:
single_line_to_multiple_line_fasta, sl2ml
Command line interfaces:
bk_single_line_to_multiple_line_fasta, bk_sl2ml
Usage:
biokit single_line_to_multiple_line_fasta <fasta>
Options
=====================================================
<fasta> first argument after
function name should be
a fasta file
""" # noqa
),
)
parser.add_argument("fasta", type=str, help=SUPPRESS)
args = parser.parse_args(argv)
SingleLineToMultipleLineFasta(args).run()
def main(argv=None):
Biokit()
# Alignment-based functions
def alignment_length(argv=None):
Biokit.alignment_length(sys.argv[1:])
def alignment_recoding(argv=None):
Biokit.alignment_recoding(sys.argv[1:])
def alignment_summary(argv=None):
Biokit.alignment_summary(sys.argv[1:])
def consensus_sequence(argv=None):
Biokit.consensus_sequence(sys.argv[1:])
def constant_sites(argv=None):
Biokit.constant_sites(sys.argv[1:])
def parsimony_informative_sites(argv=None):
Biokit.parsimony_informative_sites(sys.argv[1:])
def position_specific_score_matrix(argv=None):
Biokit.position_specific_score_matrix(sys.argv[1:])
def variable_sites(argv=None):
Biokit.variable_sites(sys.argv[1:])
# Coding sequences-based functions
def gc_content_first_position(argv=None):
Biokit.gc_content_first_position(sys.argv[1:])
def gc_content_second_position(argv=None):
Biokit.gc_content_second_position(sys.argv[1:])
def gc_content_third_position(argv=None):
Biokit.gc_content_third_position(sys.argv[1:])
def gene_wise_relative_synonymous_codon_usage(argv=None):
Biokit.gene_wise_relative_synonymous_codon_usage(sys.argv[1:])
def relative_synonymous_codon_usage(argv=None):
Biokit.relative_synonymous_codon_usage(sys.argv[1:])
def translate_sequence(argv=None):
Biokit.translate_sequence(sys.argv[1:])
# FASTQ-based functions
def fastq_read_lengths(argv=None):
Biokit.fastq_read_lengths(sys.argv[1:])
def subset_pe_fastq_reads(argv=None):
Biokit.subset_pe_fastq_reads(sys.argv[1:])
def subset_se_fastq_reads(argv=None):
Biokit.subset_se_fastq_reads(sys.argv[1:])
def trim_pe_adapters_fastq(argv=None):
Biokit.trim_pe_adapters_fastq(sys.argv[1:])
def trim_pe_fastq(argv=None):
Biokit.trim_pe_fastq(sys.argv[1:])
def trim_se_adapters_fastq(argv=None):
Biokit.trim_se_adapters_fastq(sys.argv[1:])
def trim_se_fastq(argv=None):
Biokit.trim_se_fastq(sys.argv[1:])
# genome-based functions
def gc_content(argv=None):
Biokit.gc_content(sys.argv[1:])
def genome_assembly_metrics(argv=None):
Biokit.genome_assembly_metrics(sys.argv[1:])
def l50(argv=None):
Biokit.l50(sys.argv[1:])
def l90(argv=None):
Biokit.l90(sys.argv[1:])
def longest_scaffold(argv=None):
Biokit.longest_scaffold(sys.argv[1:])
def n50(argv=None):
Biokit.n50(sys.argv[1:])
def n90(argv=None):
Biokit.n90(sys.argv[1:])
def number_of_large_scaffolds(argv=None):
Biokit.number_of_large_scaffolds(sys.argv[1:])
def number_of_scaffolds(argv=None):
Biokit.number_of_scaffolds(sys.argv[1:])
def sum_of_scaffold_lengths(argv=None):
Biokit.sum_of_scaffold_lengths(sys.argv[1:])
# sequence-based functions
def character_frequency(argv=None):
Biokit.character_frequency(sys.argv[1:])
def faidx(argv=None):
Biokit.faidx(sys.argv[1:])
def file_format_converter(argv=None):
Biokit.file_format_converter(sys.argv[1:])
def multiple_line_to_single_line_fasta(argv=None):
Biokit.multiple_line_to_single_line_fasta(sys.argv[1:])
def remove_fasta_entry(argv=None):
Biokit.remove_fasta_entry(sys.argv[1:])
def remove_short_sequences(argv=None):
Biokit.remove_short_sequences(sys.argv[1:])
def rename_fasta_entries(argv=None):
Biokit.rename_fasta_entries(sys.argv[1:])
def reorder_by_sequence_length(argv=None):
Biokit.reorder_by_sequence_length(sys.argv[1:])
def sequence_complement(argv=None):
Biokit.sequence_complement(sys.argv[1:])
def sequence_length(argv=None):
Biokit.sequence_length(sys.argv[1:])
def single_line_to_multiple_line_fasta(argv=None):
Biokit.single_line_to_multiple_line_fasta(sys.argv[1:])
```
#### File: services/alignment/base.py
```python
from ..base import BaseService
class Alignment(BaseService):
def __init__(
self,
*args,
code=None,
fasta=None,
ambiguous_character=None,
threshold=None,
verbose=None,
):
self.code = code
self.fasta = fasta
self.ambiguous_character = ambiguous_character
self.threshold = threshold
self.verbose = verbose
def determine_pis_vs_cs(self, alignment, alignment_length):
"""
determine number of parsimony informative,
variable, and constant sites in an alignment
"""
parsimony_informative_sites = 0
variable_sites = 0
constant_sites = 0
site_summary = []
for i in range(0, alignment_length):
temp = []
temp.append(i)
seq_at_position = ""
seq_at_position += alignment[:, i]
seq_at_position = seq_at_position.upper()
seq_at_position = seq_at_position.replace("?", "")
seq_at_position = seq_at_position.replace("-", "")
num_occurences = {}
for char in set(seq_at_position):
num_occurences[char] = seq_at_position.count(char)
d = dict((k, v) for k, v in num_occurences.items() if v >= 2)
# two characters that occur at least twice
if len(d) >= 2:
parsimony_informative_sites += 1
temp.append("parsimony_informative_site")
# if one character occurs at least twice and is the only character,
# the site is not parismony informative but it is constant
elif len(d) == 1 and len(num_occurences) >= 1:
constant_sites += 1
temp.append("constant_site")
else:
temp.append("Not_pis_vs_cs")
if len(d) > 1 and len(num_occurences) >= 2:
variable_sites += 1
if temp[1]:
new_str = temp[1] + "_and_variable_site"
temp[1] = new_str
else:
temp.append("variable_site")
site_summary.append(temp)
return parsimony_informative_sites, variable_sites, constant_sites, site_summary
```
#### File: services/alignment/consensus_sequence.py
```python
import re
from Bio.Align import AlignInfo
from .base import Alignment
from ...helpers.files import read_alignment_alignio
class ConsensusSequence(Alignment):
def __init__(self, args) -> None:
super().__init__(**self.process_args(args))
def run(self):
alignment = read_alignment_alignio(self.fasta)
summary_align = AlignInfo.SummaryInfo(alignment)
if not self.ambiguous_character:
ambiguous_character = "N"
else:
ambiguous_character = self.ambiguous_character
if not self.threshold:
threshold = 0.7
else:
threshold = float(self.threshold)
consensus = summary_align.dumb_consensus(
threshold=threshold, ambiguous=ambiguous_character
)
header = ">" + re.sub("^.*/", "", str(self.fasta)) + ".consensus"
print(f"{header}\n{consensus}")
def process_args(self, args):
return dict(
fasta=args.fasta,
threshold=args.threshold,
ambiguous_character=args.ambiguous_character,
)
```
#### File: services/fastq/trim_pe_adapters_fastq.py
```python
from os import path
import re
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from .base import FastQ
here = path.dirname(__file__)
class TrimPEAdaptersFastQ(FastQ):
def __init__(self, args) -> None:
super().__init__(**self.process_args(args))
def run(self):
adapter_table = self.read_adapter_table(self.adapters) # noqa
good_reads_paired_1 = []
good_reads_paired_2 = []
good_reads_unpaired_1 = []
good_reads_unpaired_2 = []
cnt = 0
kept = 0
removed = 0
adapter_removed = 0
with open(self.fastq1) as in_handle1, open(self.fastq2) as in_handle2:
for (title1, seq1, qual1), (title2, seq2, qual2) in zip(
FastqGeneralIterator(in_handle1), FastqGeneralIterator(in_handle2)
):
# logic for keeping a read or not
keep_1 = True
keep_2 = True
seq_record_len = len(seq1) # cache this for later
if seq_record_len < self.length:
# Too short to keep
keep_1 = False
for _, adapter_seq in adapter_table.items():
try:
if index1 < seq1.find(adapter_seq) and index1 != -1: # noqa
index1 = seq1.find(adapter_seq)
len_adaptor = len(adapter_seq)
except UnboundLocalError:
index1 = seq1.find(adapter_seq)
len_adaptor = len(adapter_seq)
if seq_record_len - index1 - len_adaptor < self.length:
keep_1 = False
seq_record_len = len(seq2) # cache this for later
if seq_record_len < self.length:
# Too short to keep
keep_2 = False
for _, adapter_seq in adapter_table.items():
try:
if index2 < seq2.find(adapter_seq) and index1 != -1: # noqa
index2 = seq2.find(adapter_seq)
len_adaptor = len(adapter_seq)
except UnboundLocalError:
index2 = seq2.find(adapter_seq)
len_adaptor = len(adapter_seq)
if seq_record_len - index2 - len_adaptor < self.length:
keep_2 = False
# keep both reads
if keep_1 and keep_2:
if index1 == -1:
# adaptor not found, so won't trim
good_reads_paired_1.append("@" + title1)
good_reads_paired_1.append(seq1)
good_reads_paired_1.append("+" + title1)
good_reads_paired_1.append(qual1)
kept += 1
elif seq_record_len - index1 - len_adaptor >= self.length:
# after trimming this will still be long enough
good_reads_paired_1.append("@" + title1)
good_reads_paired_1.append(seq1[index1 + len_adaptor:])
good_reads_paired_1.append("+" + title1)
good_reads_paired_1.append(qual1)
kept += 1
adapter_removed += 1
if index2 == -1:
# adaptor not found, so won't trim
good_reads_paired_2.append("@" + title2)
good_reads_paired_2.append(seq2)
good_reads_paired_2.append("+" + title2)
good_reads_paired_2.append(qual2)
kept += 1
elif seq_record_len - index2 - len_adaptor >= self.length:
# after trimming this will still be long enough
good_reads_paired_2.append("@" + title2)
good_reads_paired_2.append(seq2[index2 + len_adaptor:])
good_reads_paired_2.append("+" + title2)
good_reads_paired_2.append(qual2)
kept += 1
adapter_removed += 1
# keep 1 but not 2
elif keep_1 and not keep_2:
if index1 == -1:
# adaptor not found, so won't trim
good_reads_unpaired_1.append("@" + title1)
good_reads_unpaired_1.append(seq1)
good_reads_unpaired_1.append("+" + title1)
good_reads_unpaired_1.append(qual1)
kept += 1
elif seq_record_len - index1 - len_adaptor >= self.length:
# after trimming this will still be long enough
good_reads_unpaired_1.append("@" + title1)
good_reads_unpaired_1.append(seq1[index1 + len_adaptor:])
good_reads_unpaired_1.append("+" + title1)
good_reads_unpaired_1.append(qual1)
kept += 1
adapter_removed += 1
# keep 2 but not 1
else:
if index2 == -1:
# adaptor not found, so won't trim
good_reads_unpaired_2.append("@" + title2)
good_reads_unpaired_2.append(seq2)
good_reads_unpaired_2.append("+" + title2)
good_reads_unpaired_2.append(qual2)
kept += 1
elif seq_record_len - index2 - len_adaptor >= self.length:
# after trimming this will still be long enough
good_reads_unpaired_2.append("@" + title2)
good_reads_unpaired_2.append(seq2[index2 + len_adaptor:])
good_reads_unpaired_2.append("+" + title2)
good_reads_unpaired_2.append(qual2)
kept += 1
adapter_removed += 1
cnt += 1
print(
f"Reads processed: {cnt}\nReads kept: {kept}\nReads removed: {removed}\nAdapaters removed: {adapter_removed}"
)
# write output files
# write paired output files
if len(good_reads_paired_1) != 0:
output_file_paired_1 = re.sub(
".fastq$|.fq$", "_paired_adapter_trimmed.fq", self.fastq1
)
with open(output_file_paired_1, "w") as output_fastq_file_name_1:
output_fastq_file_name_1.write("\n".join(good_reads_paired_1))
if len(good_reads_paired_2) != 0:
output_file_paired_2 = re.sub(
".fastq$|.fq$", "_paired_adapter_trimmed.fq", self.fastq2
)
with open(output_file_paired_2, "w") as output_fastq_file_name_2:
output_fastq_file_name_2.write("\n".join(good_reads_paired_2))
# write unpaired output files
if len(good_reads_unpaired_1) != 0:
output_file_unpaired_1 = re.sub(
".fastq$|.fq$", "_unpaired_adapter_trimmed.fq", self.fastq1
)
with open(output_file_unpaired_1, "w") as output_fastq_file_name_1:
output_fastq_file_name_1.write("\n".join(good_reads_unpaired_1))
if len(good_reads_unpaired_2) != 0:
output_file_unpaired_2 = re.sub(
".fastq$|.fq$", "_unpaired_adapter_trimmed.fq", self.fastq2
)
with open(output_file_unpaired_2, "w") as output_fastq_file_name_2:
output_fastq_file_name_2.write("\n".join(good_reads_unpaired_2))
def process_args(self, args):
if args.adapters is None:
adapters = "TruSeq2-PE"
else:
adapters = args.adapters
if args.length is None:
length = 20
else:
length = int(args.length)
return dict(
fastq1=args.fastq1,
fastq2=args.fastq2,
adapters=adapters,
length=length,
)
```
#### File: services/text/remove_fasta_entry.py
```python
from Bio import SeqIO
from .base import Text
from ...helpers.files import read_and_parse_fasta_seqio
class RemoveFastaEntry(Text):
def __init__(self, args) -> None:
super().__init__(**self.process_args(args))
def run(self):
record_dict = read_and_parse_fasta_seqio(self.fasta)
out_records = []
for i in record_dict:
if i.name != self.entry:
out_records.append(i)
SeqIO.write(out_records, self.output, "fasta")
def process_args(self, args):
if args.output is None:
output = args.fasta + ".pruned.fa"
else:
output = args.output
if output == "-.pruned.fa":
output = "pruned.fa"
return dict(fasta=args.fasta, entry=args.entry, output=output)
```
#### File: services/text/remove_short_sequences.py
```python
from Bio import SeqIO
from .base import Text
from ...helpers.files import read_and_parse_fasta_seqio
class RemoveShortSequences(Text):
def __init__(self, args) -> None:
super().__init__(**self.process_args(args))
def run(self):
record_dict = read_and_parse_fasta_seqio(self.fasta)
out_records = []
for i in record_dict:
if len(i.seq) > self.threshold:
out_records.append(i)
SeqIO.write(out_records, self.output, "fasta")
def process_args(self, args):
if args.threshold is None:
threshold = 500
else:
threshold = int(args.threshold)
if args.output is None:
output = args.fasta + ".long_seqs.fa"
else:
output = args.output
return dict(
fasta=args.fasta,
output=output,
threshold=threshold
)
```
#### File: services/text/sequence_complement.py
```python
from .base import Text
from ...helpers.files import read_and_parse_fasta_seqio
class SequenceComplement(Text):
def __init__(self, args) -> None:
super().__init__(**self.process_args(args))
def run(self):
records = read_and_parse_fasta_seqio(self.fasta)
if not self.reverse:
for seq_record in records:
print(f">{seq_record.id}\n{seq_record.seq.complement()}")
else:
for seq_record in records:
print(f">{seq_record.id}\n{seq_record.seq.reverse_complement()}")
def process_args(self, args):
return dict(
fasta=args.fasta,
reverse=args.reverse,
)
```
#### File: BioKIT/tests/conftest.py
```python
from pathlib import Path
here = Path(__file__)
def pytest_configure(config):
config.addinivalue_line("markers", "integration: mark as integration test")
```
#### File: integration/alignment/test_parsimony_informative_sites.py
```python
import pytest
from mock import patch, call
from pathlib import Path
import sys
from biokit.biokit import Biokit
here = Path(__file__)
@pytest.mark.integration
class TestParsimonyInformativeSites(object):
@patch("builtins.print")
def test_parsimony_informative_sites_invalid_input(self, mocked_print): # noqa
with pytest.raises(SystemExit) as pytest_wrapped_e:
Biokit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
@patch("builtins.print")
def test_parsimony_informative_sites_simple(self, mocked_print):
expected_result = 3
testargs = [
"biokit",
"parsimony_informative_sites",
f"{here.parent.parent.parent}/sample_files/simple.fa",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_parsimony_informative_sites_alias0(self, mocked_print):
expected_result = 3
testargs = [
"biokit",
"pi_sites",
f"{here.parent.parent.parent}/sample_files/simple.fa",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_parsimony_informative_sites_alias1(self, mocked_print):
expected_result = 3
testargs = [
"biokit",
"pis",
f"{here.parent.parent.parent}/sample_files/simple.fa",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
```
#### File: integration/coding_sequences/test_gene_wise_relative_synonymous_codon_usage.py
```python
import pytest
from mock import patch, call
from pathlib import Path
import sys
from biokit.biokit import Biokit
here = Path(__file__)
@pytest.mark.integration
class TestGeneWiseRelativeSynonymousCodonUsage(object):
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_invalid_input(self, mocked_print): # noqa
with pytest.raises(SystemExit) as pytest_wrapped_e:
Biokit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1543\t1.2233\t0.4166\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.166\t1.2411\t0.4201\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1321\t1.2233\t0.3879\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1782\t1.2411\t0.4411\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2074\t1.2749\t0.4157\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1584\t1.2411\t0.4245\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1528\t1.2411\t0.419\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1722\t1.2411\t0.4286\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1532\t1.2411\t0.4178\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.167\t1.2411\t0.4159\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1703\t1.2411\t0.4139\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1522\t1.2411\t0.4257"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt1(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1543\t1.2233\t0.4166\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.166\t1.2411\t0.4201\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1321\t1.2233\t0.3879\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1782\t1.2411\t0.4411\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2074\t1.2749\t0.4157\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1584\t1.2411\t0.4245\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1528\t1.2411\t0.419\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1722\t1.2411\t0.4286\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1532\t1.2411\t0.4178\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.167\t1.2411\t0.4159\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1703\t1.2411\t0.4139\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1522\t1.2411\t0.4257"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"1",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt2(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1751\t1.2411\t0.424\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1774\t1.2411\t0.4238\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1396\t1.2411\t0.3906\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1954\t1.2587\t0.4469\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2245\t1.3265\t0.4233\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1806\t1.2411\t0.4346\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1724\t1.2411\t0.4257\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.188\t1.2411\t0.4351\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1757\t1.2411\t0.4283\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.188\t1.2587\t0.426\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.187\t1.2411\t0.4226\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1665\t1.2411\t0.4291"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"2",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt3(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1479\t1.2233\t0.394\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1449\t1.2233\t0.3953\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1216\t1.2233\t0.362\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1606\t1.2411\t0.4179\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1726\t1.2411\t0.3888\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1449\t1.2233\t0.3983\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1502\t1.2411\t0.4032\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1508\t1.2233\t0.4085\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1506\t1.2233\t0.4003\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1546\t1.2411\t0.3951\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1591\t1.2411\t0.3938\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.134\t1.2233\t0.4009"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"3",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt4(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1688\t1.2411\t0.428\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1732\t1.2411\t0.4257\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1349\t1.2411\t0.3906\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1846\t1.2411\t0.4462\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2126\t1.2952\t0.42\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1692\t1.2411\t0.4327\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1655\t1.2411\t0.4291\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1817\t1.2411\t0.4361\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1674\t1.2411\t0.429\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1776\t1.2411\t0.424\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1815\t1.2411\t0.4224\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1585\t1.2411\t0.4307"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"4",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt5(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1468\t1.2411\t0.3763\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1477\t1.2411\t0.3749\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1253\t1.2411\t0.3636\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.157\t1.2411\t0.3817\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1989\t1.2749\t0.3887\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1496\t1.2411\t0.3868\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.141\t1.2411\t0.3778\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1562\t1.2411\t0.3854\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1489\t1.2411\t0.3831\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1626\t1.2411\t0.3848\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.156\t1.2411\t0.3711\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1365\t1.2411\t0.3804"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"5",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt6(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.2\t1.2233\t0.4932\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1959\t1.2411\t0.4703\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1704\t1.2233\t0.4683\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.212\t1.2411\t0.4952\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2459\t1.2749\t0.4784\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.205\t1.2411\t0.4982\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.2022\t1.2411\t0.5036\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.2189\t1.2411\t0.4998\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1855\t1.2411\t0.4706\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.2091\t1.2411\t0.487\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.2087\t1.2411\t0.4787\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1884\t1.2411\t0.4876"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"6",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt9(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1452\t1.2463\t0.3785\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1447\t1.2463\t0.3772\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1309\t1.2463\t0.3609\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1575\t1.2587\t0.3832\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2017\t1.2946\t0.3885\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1476\t1.2463\t0.389\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.142\t1.2463\t0.3785\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.157\t1.2587\t0.3863\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1498\t1.2587\t0.3843\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1637\t1.2587\t0.3851\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1566\t1.2587\t0.3727\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.135\t1.2463\t0.3825"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"9",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt10(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1629\t1.2411\t0.4194\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.173\t1.2411\t0.4243\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1371\t1.2411\t0.3892\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1838\t1.2411\t0.4442\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2117\t1.2952\t0.4168\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1634\t1.2411\t0.4266\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1591\t1.2411\t0.4223\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1756\t1.2411\t0.4311\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1584\t1.2411\t0.4203\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1708\t1.2411\t0.4169\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1759\t1.2411\t0.4169\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1586\t1.2411\t0.4289"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"10",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt11(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1543\t1.2233\t0.4166\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.166\t1.2411\t0.4201\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1321\t1.2233\t0.3879\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1782\t1.2411\t0.4411\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2074\t1.2749\t0.4157\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1584\t1.2411\t0.4245\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1528\t1.2411\t0.419\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1722\t1.2411\t0.4286\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1532\t1.2411\t0.4178\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.167\t1.2411\t0.4159\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1703\t1.2411\t0.4139\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1522\t1.2411\t0.4257"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"11",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt12(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1484\t1.2233\t0.4063\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1612\t1.2411\t0.4111\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1289\t1.2233\t0.3797\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1728\t1.2411\t0.4325\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1992\t1.2749\t0.4044\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1516\t1.2411\t0.4137\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1475\t1.2411\t0.4115\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1677\t1.2411\t0.419\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1506\t1.2411\t0.4089\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1611\t1.2411\t0.4071\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1646\t1.2411\t0.4053\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1477\t1.2411\t0.4171"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"12",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt13(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.149\t1.2411\t0.38\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1508\t1.2411\t0.3782\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1251\t1.2411\t0.3658\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1621\t1.2411\t0.3873\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1971\t1.3265\t0.3862\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1548\t1.2411\t0.3903\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1439\t1.2411\t0.3795\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.158\t1.2411\t0.3879\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1492\t1.2411\t0.3858\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1623\t1.2411\t0.3861\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1588\t1.2411\t0.3753\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1398\t1.2411\t0.3835"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"13",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt14(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1643\t1.2587\t0.3864\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1623\t1.2587\t0.3865\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1515\t1.2749\t0.3696\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1756\t1.2749\t0.392\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2114\t1.3265\t0.3912\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1649\t1.2587\t0.3951\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1578\t1.2587\t0.388\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1715\t1.2749\t0.3911\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1684\t1.2587\t0.3935\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1778\t1.2749\t0.3913\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1703\t1.2587\t0.3803\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.153\t1.2587\t0.3905"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"14",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt16(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.181\t1.2233\t0.4423\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1918\t1.2411\t0.4451\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1546\t1.2233\t0.4087\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.2023\t1.2411\t0.4626\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2363\t1.2749\t0.4442\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1853\t1.2411\t0.4504\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1765\t1.2411\t0.4382\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1951\t1.2411\t0.4528\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1736\t1.2411\t0.438\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.191\t1.2411\t0.4389\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1935\t1.2411\t0.4366\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1752\t1.2411\t0.4497"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"16",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt21(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1455\t1.2463\t0.3766\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1443\t1.2463\t0.3753\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1302\t1.2463\t0.3596\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1571\t1.2587\t0.3815\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2\t1.2946\t0.3868\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1477\t1.2463\t0.3872\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.142\t1.2463\t0.3765\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1559\t1.2587\t0.3848\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1497\t1.2587\t0.3823\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1628\t1.2587\t0.3838\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1561\t1.2587\t0.3708\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1342\t1.2463\t0.3805"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"21",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt22(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.222\t1.2233\t0.5161\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.2511\t1.2411\t0.5485\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1965\t1.2233\t0.4882\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.2394\t1.2411\t0.525\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.265\t1.2749\t0.491\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.2187\t1.2411\t0.5099\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.2093\t1.2411\t0.4965\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.2326\t1.2411\t0.5159\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.2193\t1.2411\t0.5214\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.2258\t1.2411\t0.5005\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.2274\t1.2411\t0.4974\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.2249\t1.2411\t0.5354"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"22",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt23(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.2586\t1.2233\t0.6873\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.2623\t1.2411\t0.6688\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.2078\t1.2233\t0.5957\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.2717\t1.2411\t0.6768\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.321\t1.2749\t0.6996\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.2636\t1.2411\t0.692\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.2313\t1.2411\t0.6184\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.2649\t1.2411\t0.6736\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.2357\t1.2411\t0.6451\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.259\t1.2411\t0.6581\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.2609\t1.2411\t0.656\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.2435\t1.2411\t0.6674"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"23",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt24(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.165\t1.2233\t0.3934\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1596\t1.2233\t0.3882\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1533\t1.2233\t0.3768\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1721\t1.2587\t0.3937\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2147\t1.3397\t0.3981\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1645\t1.2233\t0.4006\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1588\t1.2233\t0.3941\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1738\t1.2587\t0.3984\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1654\t1.2587\t0.3972\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1824\t1.2587\t0.3974\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.181\t1.2587\t0.3891\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1475\t1.2233\t0.3938"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"24",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt25(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1643\t1.2411\t0.4235\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1743\t1.2411\t0.4255\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1377\t1.2411\t0.3903\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1889\t1.2411\t0.4505\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2228\t1.2749\t0.4338\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1653\t1.2411\t0.43\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1638\t1.2411\t0.4296\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1844\t1.2411\t0.4395\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1655\t1.2411\t0.4276\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1799\t1.2411\t0.4278\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1813\t1.2411\t0.4212\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1606\t1.2411\t0.4323"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"25",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt26(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1473\t1.2233\t0.4041\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1589\t1.2411\t0.4085\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.128\t1.2233\t0.377\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1719\t1.2411\t0.4308\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1997\t1.2749\t0.4033\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1517\t1.2411\t0.4119\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1468\t1.2411\t0.4096\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1662\t1.2411\t0.4172\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1493\t1.2411\t0.4069\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1605\t1.2411\t0.4057\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1632\t1.2411\t0.4037\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1455\t1.2411\t0.4151"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"26",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt27(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.2\t1.2233\t0.4932\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1959\t1.2411\t0.4703\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1704\t1.2233\t0.4683\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.212\t1.2411\t0.4952\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2459\t1.2749\t0.4784\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.205\t1.2411\t0.4982\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.2022\t1.2411\t0.5036\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.2189\t1.2411\t0.4998\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1855\t1.2411\t0.4706\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.2091\t1.2411\t0.487\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.2087\t1.2411\t0.4787\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1884\t1.2411\t0.4876"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"27",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt28(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1543\t1.2233\t0.4166\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.166\t1.2411\t0.4201\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1321\t1.2233\t0.3879\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1782\t1.2411\t0.4411\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2074\t1.2749\t0.4157\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1584\t1.2411\t0.4245\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1528\t1.2411\t0.419\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1722\t1.2411\t0.4286\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1532\t1.2411\t0.4178\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.167\t1.2411\t0.4159\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1703\t1.2411\t0.4139\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1522\t1.2411\t0.4257"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"28",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt29(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1928\t1.2411\t0.4513\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.2015\t1.2587\t0.455\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1729\t1.2411\t0.4287\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.2144\t1.2587\t0.4747\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2268\t1.3265\t0.432\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1926\t1.2587\t0.4542\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1839\t1.2411\t0.4543\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.2013\t1.2587\t0.4523\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1906\t1.2411\t0.4554\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1952\t1.2587\t0.4435\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1979\t1.2411\t0.4427\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1876\t1.2587\t0.4591"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"29",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt30(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.2261\t1.2233\t0.5256\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.2285\t1.2411\t0.5159\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.2633\t1.2233\t0.5909\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.2625\t1.2411\t0.5581\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.3005\t1.2749\t0.5519\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.2359\t1.2411\t0.5469\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.2391\t1.2411\t0.5448\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.2569\t1.2411\t0.5531\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.2309\t1.2411\t0.5401\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.2727\t1.2411\t0.5754\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.2404\t1.2411\t0.5208\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.2196\t1.2411\t0.5323"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"30",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt31(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1688\t1.2411\t0.428\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1732\t1.2411\t0.4257\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.1349\t1.2411\t0.3906\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1846\t1.2411\t0.4462\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2126\t1.2952\t0.42\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1692\t1.2411\t0.4327\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1655\t1.2411\t0.4291\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1817\t1.2411\t0.4361\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1674\t1.2411\t0.429\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1776\t1.2411\t0.424\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1815\t1.2411\t0.4224\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1585\t1.2411\t0.4307"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"31",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt33(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1841\t1.2587\t0.4\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1773\t1.2587\t0.3966\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.174\t1.2749\t0.3839\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1902\t1.2749\t0.4016\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.2244\t1.3627\t0.4004\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1817\t1.2587\t0.4058\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1745\t1.2587\t0.4026\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1883\t1.2749\t0.4025\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.184\t1.2587\t0.4054\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1964\t1.2749\t0.4028\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1946\t1.2587\t0.3955\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1654\t1.2587\t0.401"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"33",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt50(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1473\t1.2233\t0.4041\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1589\t1.2411\t0.4085\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.128\t1.2233\t0.377\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1719\t1.2411\t0.4308\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1997\t1.2749\t0.4033\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1517\t1.2411\t0.4119\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1468\t1.2411\t0.4096\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1662\t1.2411\t0.4172\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1493\t1.2411\t0.4069\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1605\t1.2411\t0.4057\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1632\t1.2411\t0.4037\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1455\t1.2411\t0.4151"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
"50",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt_custom(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1473\t1.2233\t0.4041\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1589\t1.2411\t0.4085\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.128\t1.2233\t0.377\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1719\t1.2411\t0.4308\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1997\t1.2749\t0.4033\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1517\t1.2411\t0.4119\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1468\t1.2411\t0.4096\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1662\t1.2411\t0.4172\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1493\t1.2411\t0.4069\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1605\t1.2411\t0.4057\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1632\t1.2411\t0.4037\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1455\t1.2411\t0.4151"
testargs = [
"biokit",
"gene_wise_relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
f"{here.parent.parent.parent}/sample_files/CUG_ala_code.txt",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt_custom_alias0(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1473\t1.2233\t0.4041\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1589\t1.2411\t0.4085\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.128\t1.2233\t0.377\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1719\t1.2411\t0.4308\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1997\t1.2749\t0.4033\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1517\t1.2411\t0.4119\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1468\t1.2411\t0.4096\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1662\t1.2411\t0.4172\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1493\t1.2411\t0.4069\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1605\t1.2411\t0.4057\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1632\t1.2411\t0.4037\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1455\t1.2411\t0.4151"
testargs = [
"biokit",
"gene_wise_rscu",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
f"{here.parent.parent.parent}/sample_files/CUG_ala_code.txt",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gene_wise_relative_synonymous_codon_usage_tt_custom_alias1(self, mocked_print):
expected_result = "lcl|NC_001134.8_cds_NP_009465.2_119\t1.1473\t1.2233\t0.4041\nlcl|NC_001134.8_cds_NP_009698.3_345\t1.1589\t1.2411\t0.4085\nlcl|NC_001136.10_cds_NP_010434.1_1057\t1.128\t1.2233\t0.377\nlcl|NC_001136.10_cds_NP_010745.3_1362\t1.1719\t1.2411\t0.4308\nlcl|NC_001139.9_cds_NP_011320.3_1926\t1.1997\t1.2749\t0.4033\nlcl|NC_001140.6_cds_NP_011967.1_2560\t1.1517\t1.2411\t0.4119\nlcl|NC_001143.9_cds_NP_012980.1_3530\t1.1468\t1.2411\t0.4096\nlcl|NC_001144.5_cds_NP_013060.1_3608\t1.1662\t1.2411\t0.4172\nlcl|NC_001144.5_cds_NP_013188.1_3730\t1.1493\t1.2411\t0.4069\nlcl|NC_001144.5_cds_NP_013207.1_3749\t1.1605\t1.2411\t0.4057\nlcl|NC_001144.5_cds_NP_013559.1_4092\t1.1632\t1.2411\t0.4037\nlcl|NC_001147.6_cds_NP_014560.1_5058\t1.1455\t1.2411\t0.4151"
testargs = [
"biokit",
"gw_rscu",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.long_sequences.fa.long_seqs.fa",
"-tt",
f"{here.parent.parent.parent}/sample_files/CUG_ala_code.txt",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
```
#### File: integration/coding_sequences/test_relative_synonymous_codon_usage.py
```python
import pytest
from mock import patch, call
from pathlib import Path
import sys
from biokit.biokit import Biokit
here = Path(__file__)
@pytest.mark.integration
class TestRelativeSynonymousCodonUsage(object):
@patch("builtins.print")
def test_relative_synonymous_codon_usage_invalid_input(self, mocked_print): # noqa
with pytest.raises(SystemExit) as pytest_wrapped_e:
Biokit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
@pytest.mark.slow
@patch("builtins.print")
def test_relative_synonymous_codon_usage(self, mocked_print):
expected_result = "AGA\t2.8477\nGGU\t1.8214\nUUA\t1.6781\nUUG\t1.6693\nCCA\t1.6278\nUCU\t1.5612\nGUU\t1.5466\nGCU\t1.4788\nGAA\t1.4019\nUAA\t1.3971\nAUU\t1.3842\nCAA\t1.3716\nACU\t1.3709\nGAU\t1.3034\nCAU\t1.2854\nAGG\t1.2769\nUCA\t1.2765\nUGU\t1.2441\nCCU\t1.2432\nACA\t1.2332\nAAU\t1.1952\nUUU\t1.1904\nGCA\t1.1875\nAAA\t1.1703\nUAU\t1.1366\nAUG\t1.0\nUGG\t1.0\nAGU\t0.9766\nUCC\t0.9386\nUGA\t0.929\nGGA\t0.9028\nGCC\t0.8833\nGUA\t0.8741\nUAC\t0.8634\nCUA\t0.853\nCGU\t0.848\nACC\t0.8439\nAUA\t0.84\nAAG\t0.8297\nUUC\t0.8096\nGUC\t0.8073\nAAC\t0.8048\nGGC\t0.7874\nCUU\t0.7781\nAUC\t0.7758\nGUG\t0.772\nUGC\t0.7559\nCAC\t0.7146\nGAC\t0.6966\nUAG\t0.6739\nCUG\t0.6719\nAGC\t0.6655\nCCC\t0.6323\nCAG\t0.6284\nGAG\t0.5981\nUCG\t0.5817\nACG\t0.552\nCCG\t0.4967\nGGG\t0.4885\nGCG\t0.4504\nCGA\t0.4208\nCGC\t0.358\nCUC\t0.3497\nCGG\t0.2485"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.fna",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt1(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"1",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt2(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nUUA\t2.1892\nAGA\t2.1667\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9355\nCGU\t1.8462\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAUU\t1.434\nAUG\t1.3913\nCAA\t1.3636\nAGG\t1.3333\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCGG\t1.2308\nCAU\t1.1667\nAAU\t1.0769\nACC\t1.0435\nGUG\t1.0169\nUAU\t1.0\nUAC\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nGUA\t0.7458\nUUC\t0.7442\nCUA\t0.6486\nCAG\t0.6364\nCGA\t0.6154\nAUA\t0.6087\nCUU\t0.5676\nCUG\t0.5676\nAUC\t0.566\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nUAG\t0.3333\nUGC\t0.3333\nGGC\t0.3158\nCGC\t0.3077\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUAA\t0.1667\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"2",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt3(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nACU\t2.1622\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9355\nGAA\t1.7714\nUGU\t1.6667\nAAA\t1.5\nAUU\t1.434\nAGG\t1.4118\nAUG\t1.3913\nCAA\t1.3636\nUAG\t1.3333\nACC\t1.2973\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nACA\t1.1892\nUUA\t1.1739\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nGUG\t1.0169\nUAU\t1.0\nUAC\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nGUC\t0.9492\nAAC\t0.9231\nCUA\t0.8649\nCCU\t0.8571\nCAC\t0.8333\nUUG\t0.8261\nGCA\t0.8148\nCUU\t0.7568\nCUG\t0.7568\nGAC\t0.7568\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nUAA\t0.6667\nCUC\t0.6486\nCAG\t0.6364\nAUA\t0.6087\nAUC\t0.566\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nUGC\t0.3333\nACG\t0.3243\nGGC\t0.3158\nCCC\t0.2857\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"3",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt4(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nUAG\t1.3333\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nUAA\t0.6667\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"4",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt5(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9277\nCGU\t1.8462\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAUU\t1.434\nAUG\t1.3913\nCAA\t1.3636\nUAG\t1.3333\nGUU\t1.2881\nUUU\t1.2558\nAGU\t1.253\nAGA\t1.253\nGAU\t1.2432\nCGG\t1.2308\nCAU\t1.1667\nAAU\t1.0769\nACC\t1.0435\nGUG\t1.0169\nUAU\t1.0\nUAC\t1.0\nUCC\t0.9639\nUCA\t0.9639\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nAGG\t0.7711\nGAC\t0.7568\nGUA\t0.7458\nUUC\t0.7442\nUAA\t0.6667\nCUA\t0.6486\nCAG\t0.6364\nCGA\t0.6154\nAUA\t0.6087\nCUU\t0.5676\nCUG\t0.5676\nAUC\t0.566\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4819\nCCG\t0.4286\nAGC\t0.3855\nUGC\t0.3333\nGGC\t0.3158\nCGC\t0.3077\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"5",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt6(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nCAA\t2.4\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nCAG\t1.12\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCUA\t0.6486\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nUAG\t0.32\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUAA\t0.16\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"6",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt9(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9277\nAUU\t1.9\nCGU\t1.8462\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.44\nCAA\t1.3636\nUAG\t1.3333\nGUU\t1.2881\nUUU\t1.2558\nAGU\t1.253\nAGA\t1.253\nGAU\t1.2432\nCGG\t1.2308\nCAU\t1.1667\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nAAG\t1.0\nUCC\t0.9639\nUCA\t0.9639\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nCCU\t0.8571\nAAU\t0.84\nCAC\t0.8333\nGCA\t0.8148\nAGG\t0.7711\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nAAC\t0.72\nUAA\t0.6667\nCUA\t0.6486\nCAG\t0.6364\nCGA\t0.6154\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nCUC\t0.4865\nUCG\t0.4819\nCCG\t0.4286\nAGC\t0.3855\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCGC\t0.3077\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"9",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt10(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nUAG\t1.3333\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nUAA\t0.6667\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUGU\t0\nUGC\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"10",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt11(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"11",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt12(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUCU\t2.029\nUUA\t2.0149\nGCU\t2.0\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nAAA\t1.5\nUUG\t1.4179\nAGG\t1.4118\nCAA\t1.3636\nAGU\t1.3188\nGUU\t1.2881\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nUCC\t1.0145\nUCA\t1.0145\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCUG\t0.7101\nCGG\t0.7059\nCAG\t0.6364\nCUA\t0.597\nGGA\t0.5263\nGGG\t0.5263\nCUU\t0.5224\nUCG\t0.5072\nAAG\t0.5\nCUC\t0.4478\nCCG\t0.4286\nAGC\t0.4058\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"12",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt13(self, mocked_print):
expected_result = "GGU\t2.5424\nCCA\t2.4286\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9355\nCGU\t1.8462\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAUU\t1.434\nAUG\t1.3913\nCAA\t1.3636\nUAG\t1.3333\nAGA\t1.322\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCGG\t1.2308\nCAU\t1.1667\nAAU\t1.0769\nACC\t1.0435\nGUG\t1.0169\nUAU\t1.0\nUAC\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nAGG\t0.8136\nGAC\t0.7568\nGUA\t0.7458\nUUC\t0.7442\nUAA\t0.6667\nCUA\t0.6486\nCAG\t0.6364\nCGA\t0.6154\nAUA\t0.6087\nCUU\t0.5676\nCUG\t0.5676\nAUC\t0.566\nGGA\t0.5085\nGGG\t0.5085\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nUGC\t0.3333\nCGC\t0.3077\nGGC\t0.3051\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"13",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt14(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9277\nAUU\t1.9\nCGU\t1.8462\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nUAU\t1.4681\nUAC\t1.4681\nAAA\t1.44\nCAA\t1.3636\nGUU\t1.2881\nUUU\t1.2558\nAGU\t1.253\nAGA\t1.253\nGAU\t1.2432\nCGG\t1.2308\nCAU\t1.1667\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAG\t1.0\nAAG\t1.0\nUCC\t0.9639\nUCA\t0.9639\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nCCU\t0.8571\nAAU\t0.84\nCAC\t0.8333\nGCA\t0.8148\nAGG\t0.7711\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nAAC\t0.72\nCUA\t0.6486\nCAG\t0.6364\nCGA\t0.6154\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nCUC\t0.4865\nUCG\t0.4819\nCCG\t0.4286\nAGC\t0.3855\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCGC\t0.3077\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUAA\t0.0638\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"14",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt16(self, mocked_print):
expected_result = "GGU\t2.6316\nUUA\t2.4868\nCCA\t2.4286\nAGA\t2.2941\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nUUG\t1.75\nACU\t1.7391\nUGU\t1.6667\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCUA\t0.7368\nCGG\t0.7059\nCUU\t0.6447\nCUG\t0.6447\nCAG\t0.6364\nCUC\t0.5526\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUAG\t0.1842\nCGC\t0.1765\nUAA\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"16",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt21(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9277\nCGU\t1.8462\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.44\nAUU\t1.434\nAUG\t1.3913\nCAA\t1.3636\nUAG\t1.3333\nGUU\t1.2881\nUUU\t1.2558\nAGU\t1.253\nAGA\t1.253\nGAU\t1.2432\nCGG\t1.2308\nCAU\t1.1667\nACC\t1.0435\nGUG\t1.0169\nUAU\t1.0\nUAC\t1.0\nAAG\t1.0\nUCC\t0.9639\nUCA\t0.9639\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nCCU\t0.8571\nAAU\t0.84\nCAC\t0.8333\nGCA\t0.8148\nAGG\t0.7711\nGAC\t0.7568\nGUA\t0.7458\nUUC\t0.7442\nAAC\t0.72\nUAA\t0.6667\nCUA\t0.6486\nCAG\t0.6364\nCGA\t0.6154\nAUA\t0.6087\nCUU\t0.5676\nCUG\t0.5676\nAUC\t0.566\nGGA\t0.5263\nGGG\t0.5263\nCUC\t0.4865\nUCG\t0.4819\nCCG\t0.4286\nAGC\t0.3855\nUGC\t0.3333\nGGC\t0.3158\nCGC\t0.3077\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"21",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt22(self, mocked_print):
expected_result = "GGU\t2.6316\nUUA\t2.4868\nCCA\t2.4286\nAGA\t2.2941\nGCU\t2.0\nUCU\t1.9231\nAUU\t1.9\nGAA\t1.7714\nUUG\t1.75\nACU\t1.7391\nUGU\t1.6667\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nUUU\t1.2558\nAGU\t1.25\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nGCC\t0.963\nUCC\t0.9615\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCUA\t0.7368\nCGG\t0.7059\nCUU\t0.6447\nCUG\t0.6447\nCAG\t0.6364\nCUC\t0.5526\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nUCG\t0.4808\nCCG\t0.4286\nAGC\t0.3846\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUAG\t0.1842\nCGC\t0.1765\nUCA\t0\nUAA\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"22",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt23(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUG\t2.0213\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCUA\t0.8511\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nCUU\t0.7447\nCUG\t0.7447\nUUC\t0.7442\nCGG\t0.7059\nCUC\t0.6383\nCAG\t0.6364\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUUA\t0\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"23",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt24(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nAAA\t1.9286\nAUU\t1.9\nUCU\t1.8667\nCGU\t1.8462\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nCAA\t1.3636\nUAG\t1.3333\nGUU\t1.2881\nUUU\t1.2558\nGAU\t1.2432\nCGG\t1.2308\nAGU\t1.2133\nAGA\t1.2133\nCAU\t1.1667\nAAU\t1.0769\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nUCC\t0.9333\nUCA\t0.9333\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nUAA\t0.6667\nCUA\t0.6486\nAAG\t0.6429\nCAG\t0.6364\nCGA\t0.6154\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nCUC\t0.4865\nUCG\t0.4667\nCCG\t0.4286\nAGG\t0.4286\nAGC\t0.3733\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCGC\t0.3077\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"24",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt25(self, mocked_print):
expected_result = "GGU\t3.2895\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nUAG\t1.3333\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nUAA\t0.6667\nGGA\t0.6579\nGGG\t0.6579\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nGGC\t0.3947\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"25",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt26(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nGCU\t2.2131\nUUA\t2.0149\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nAAA\t1.5\nUUG\t1.4179\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nGCC\t1.0656\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nGCA\t0.9016\nCCU\t0.8571\nCAC\t0.8333\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCAG\t0.6364\nCUA\t0.597\nCUG\t0.5738\nGGA\t0.5263\nGGG\t0.5263\nCUU\t0.5224\nAAG\t0.5\nUCG\t0.4839\nCUC\t0.4478\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGCG\t0.2459\nGAG\t0.2286\nCGC\t0.1765\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"26",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt27(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nCAA\t2.4\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nCAG\t1.12\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCUA\t0.6486\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nUAG\t0.32\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUAA\t0.16\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"27",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt28(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"28",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt29(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nUAU\t1.8776\nUAC\t1.8776\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUAG\t0.1633\nUAA\t0.0816\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"29",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt30(self, mocked_print):
expected_result = "GAA\t3.2632\nGGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUCU\t1.9355\nAUU\t1.9\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nGAG\t0.4211\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGCG\t0.2222\nUAG\t0.2105\nCGC\t0.1765\nUAA\t0.1053\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"30",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt31(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nAAA\t1.5\nAGG\t1.4118\nCAA\t1.3636\nUAG\t1.3333\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUCC\t0.9677\nUCA\t0.9677\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nUAA\t0.6667\nCUA\t0.6486\nCAG\t0.6364\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nAAG\t0.5\nCUC\t0.4865\nUCG\t0.4839\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nCGC\t0.1765\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"31",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt33(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nUUA\t2.1892\nGCU\t2.0\nUGG\t2.0\nAAA\t1.9286\nAUU\t1.9\nUCU\t1.8667\nCGU\t1.8462\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nUUG\t1.5405\nUAU\t1.4681\nUAC\t1.4681\nCAA\t1.3636\nGUU\t1.2881\nUUU\t1.2558\nGAU\t1.2432\nCGG\t1.2308\nAGU\t1.2133\nAGA\t1.2133\nCAU\t1.1667\nAAU\t1.0769\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAG\t1.0\nGCC\t0.963\nACA\t0.9565\nGUC\t0.9492\nUCC\t0.9333\nUCA\t0.9333\nAAC\t0.9231\nCCU\t0.8571\nCAC\t0.8333\nGCA\t0.8148\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCUA\t0.6486\nAAG\t0.6429\nCAG\t0.6364\nCGA\t0.6154\nCUU\t0.5676\nCUG\t0.5676\nGGA\t0.5263\nGGG\t0.5263\nCUC\t0.4865\nUCG\t0.4667\nCCG\t0.4286\nAGG\t0.4286\nAGC\t0.3733\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCGC\t0.3077\nCCC\t0.2857\nACG\t0.2609\nGAG\t0.2286\nGCG\t0.2222\nUAA\t0.0638\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"33",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt50(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nGCU\t2.2131\nUUA\t2.0149\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nAAA\t1.5\nUUG\t1.4179\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nGCC\t1.0656\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nGCA\t0.9016\nCCU\t0.8571\nCAC\t0.8333\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCAG\t0.6364\nCUA\t0.597\nCUG\t0.5738\nGGA\t0.5263\nGGG\t0.5263\nCUU\t0.5224\nAAG\t0.5\nUCG\t0.4839\nCUC\t0.4478\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGCG\t0.2459\nGAG\t0.2286\nCGC\t0.1765\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
"50",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt_custom(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nGCU\t2.2131\nUUA\t2.0149\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nAAA\t1.5\nUUG\t1.4179\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nGCC\t1.0656\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nGCA\t0.9016\nCCU\t0.8571\nCAC\t0.8333\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCAG\t0.6364\nCUA\t0.597\nCUG\t0.5738\nGGA\t0.5263\nGGG\t0.5263\nCUU\t0.5224\nAAG\t0.5\nUCG\t0.4839\nCUC\t0.4478\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGCG\t0.2459\nGAG\t0.2286\nCGC\t0.1765\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"relative_synonymous_codon_usage",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
f"{here.parent.parent.parent}/sample_files/CUG_ala_code.txt",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_relative_synonymous_codon_usage_tt_custom_alias(self, mocked_print):
expected_result = "GGU\t2.6316\nCCA\t2.4286\nAGA\t2.2941\nGCU\t2.2131\nUUA\t2.0149\nUCU\t1.9355\nAUU\t1.9\nGAA\t1.7714\nACU\t1.7391\nUGU\t1.6667\nAAA\t1.5\nUUG\t1.4179\nAGG\t1.4118\nCAA\t1.3636\nGUU\t1.2881\nAGU\t1.2581\nUUU\t1.2558\nGAU\t1.2432\nCAU\t1.1667\nAAU\t1.0769\nGCC\t1.0656\nCGU\t1.0588\nACC\t1.0435\nGUG\t1.0169\nAUG\t1.0\nUAU\t1.0\nUAC\t1.0\nUGG\t1.0\nUCC\t0.9677\nUCA\t0.9677\nACA\t0.9565\nGUC\t0.9492\nAAC\t0.9231\nGCA\t0.9016\nCCU\t0.8571\nCAC\t0.8333\nGAC\t0.7568\nAUC\t0.75\nGUA\t0.7458\nUUC\t0.7442\nCGG\t0.7059\nCAG\t0.6364\nCUA\t0.597\nCUG\t0.5738\nGGA\t0.5263\nGGG\t0.5263\nCUU\t0.5224\nAAG\t0.5\nUCG\t0.4839\nCUC\t0.4478\nCCG\t0.4286\nAGC\t0.3871\nCGA\t0.3529\nAUA\t0.35\nUGC\t0.3333\nGGC\t0.3158\nCCC\t0.2857\nACG\t0.2609\nGCG\t0.2459\nGAG\t0.2286\nCGC\t0.1765\nUAA\t0\nUAG\t0\nUGA\t0"
testargs = [
"biokit",
"rscu",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
f"{here.parent.parent.parent}/sample_files/CUG_ala_code.txt",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
```
#### File: integration/coding_sequences/test_translate_sequence.py
```python
import pytest
from mock import patch, call # noqa
from pathlib import Path
import sys
from biokit.biokit import Biokit
here = Path(__file__)
@pytest.mark.integration
class TestRelativeSynonymousCodonUsage(object):
@patch("builtins.print")
def test_translate_sequence_invalid_input(self, mocked_print): # noqa
with pytest.raises(SystemExit) as pytest_wrapped_e:
Biokit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
@pytest.mark.slow
@patch("builtins.print")
def test_translate_sequence(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.fna_trans_seq_default",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt1(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"1",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt1",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt2(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"2",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt2",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt3(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"3",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt3",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt4(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"4",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt4",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt5(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"5",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt5",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt6(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"6",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt6",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt9(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"9",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt9",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt10(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"10",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt10",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt11(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"11",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt11",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt12(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"12",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt12",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt13(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"13",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt13",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt14(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"14",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt14",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt16(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"16",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt16",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt21(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"21",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt21",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt22(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"22",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt22",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt23(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"23",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt23",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt24(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"24",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt24",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt25(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"25",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt25",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt26(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"26",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt26",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt27(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"27",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt27",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt28(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"28",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt28",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt29(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"29",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt29",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt30(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"30",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt30",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt31(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"31",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt31",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt33(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"33",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt33",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt50(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
input_file,
"-tt",
"50",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt50",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt_custom(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_sequence",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
f"{here.parent.parent.parent}/sample_files/CUG_ala_code.txt",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt50",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt_custom_alias0(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"translate_seq",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
f"{here.parent.parent.parent}/sample_files/CUG_ala_code.txt",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt50",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
@patch("builtins.print")
def test_translate_sequence_tt_custom_alias1(self, mocked_print):
input_file = f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna"
testargs = [
"biokit",
"trans_seq",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-tt",
f"{here.parent.parent.parent}/sample_files/CUG_ala_code.txt",
]
with patch.object(sys, "argv", testargs):
Biokit()
with open(
f"{here.parent.parent}/expected/GCF_000146045.2_R64_cds_from_genomic.small.fna.tt50",
"r",
) as expected_fa:
expected_fa_content = expected_fa.read()
with open(f"{input_file}.translated.fa", "r") as out_fa:
out_fa_content = out_fa.read()
assert expected_fa_content == out_fa_content
```
#### File: integration/genome/test_gc_content.py
```python
import pytest
from mock import patch, call
from pathlib import Path
import sys
from biokit.biokit import Biokit
here = Path(__file__)
@pytest.mark.integration
class TestGCContent(object):
@patch("builtins.print")
def test_gc_content_invalid_input(self, mocked_print): # noqa
with pytest.raises(SystemExit) as pytest_wrapped_e:
Biokit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
@patch("builtins.print")
def test_gc_content_simple(self, mocked_print):
expected_result = 0.2273
testargs = [
"biokit",
"gc_content",
f"{here.parent.parent.parent}/sample_files/simple.fa",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_gc_content_verbose(self, mocked_print):
expected_result_0 = "lcl|NC_001133.9_cds_NP_009332.1_1\t0.4959"
expected_result_1 = "lcl|NC_001133.9_cds_NP_878038.1_2\t0.4123"
expected_result_2 = "lcl|NC_001133.9_cds_NP_009333.1_3\t0.3608"
testargs = [
"biokit",
"gc_content",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_cds_from_genomic.small.fna",
"-v",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [
call(expected_result_0),
call(expected_result_1),
call(expected_result_2),
]
@pytest.mark.slow
@patch("builtins.print")
def test_gc_content_verbose_slow(self, mocked_print):
expected_result_00 = "NC_001133.9\t0.3927"
expected_result_01 = "NC_001134.8\t0.3834"
expected_result_02 = "NC_001135.5\t0.3853"
expected_result_03 = "NC_001136.10\t0.3791"
expected_result_04 = "NC_001137.3\t0.3851"
expected_result_05 = "NC_001138.5\t0.3873"
expected_result_06 = "NC_001139.9\t0.3806"
expected_result_07 = "NC_001140.6\t0.385"
expected_result_08 = "NC_001141.2\t0.389"
expected_result_09 = "NC_001142.9\t0.3837"
expected_result_10 = "NC_001143.9\t0.3807"
expected_result_11 = "NC_001144.5\t0.3848"
expected_result_12 = "NC_001145.3\t0.382"
expected_result_13 = "NC_001146.8\t0.3864"
expected_result_14 = "NC_001147.6\t0.3816"
expected_result_15 = "NC_001148.4\t0.3806"
expected_result_16 = "NC_001224.1\t0.1711"
testargs = [
"biokit",
"gc_content",
f"{here.parent.parent.parent}/sample_files/GCF_000146045.2_R64_genomic.fna",
"-v",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [
call(expected_result_00),
call(expected_result_01),
call(expected_result_02),
call(expected_result_03),
call(expected_result_04),
call(expected_result_05),
call(expected_result_06),
call(expected_result_07),
call(expected_result_08),
call(expected_result_09),
call(expected_result_10),
call(expected_result_11),
call(expected_result_12),
call(expected_result_13),
call(expected_result_14),
call(expected_result_15),
call(expected_result_16),
]
@patch("builtins.print")
def test_gc_content_simple_alias(self, mocked_print):
expected_result = 0.2273
testargs = [
"biokit",
"gc",
f"{here.parent.parent.parent}/sample_files/simple.fa",
]
with patch.object(sys, "argv", testargs):
Biokit()
assert mocked_print.mock_calls == [call(expected_result)]
``` |
{
"source": "JLSteenwyk/ClipKIT",
"score": 2
} |
#### File: ClipKIT/clipkit/clipkit.py
```python
import getopt
import logging
import os.path
import sys
import time
from Bio import AlignIO, SeqIO
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import numpy as np
from .args_processing import process_args
from .files import get_alignment_and_format, FileFormat
from .helpers import (
keep_trim_and_log,
write_keepD,
write_trimD
)
from .modes import TrimmingMode
from .parser import create_parser
from .smart_gap_helper import smart_gap_threshold_determination
from .warnings import (
check_if_all_sites_were_trimmed,
check_if_entry_contains_only_gaps,
)
from .write import write_determining_smart_gap_threshold, write_user_args, write_output_stats
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
def execute(
input_file: str,
input_file_format: FileFormat,
output_file: str,
output_file_format: FileFormat,
gaps: float,
complement: bool,
mode: TrimmingMode,
use_log: bool,
):
"""
Master execute Function
This function executes the main functions and calls other
subfunctions to trim the input file
"""
# logic for whether or not to create a log file
if use_log:
# write INFO level logging to file for user
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(f"{output_file}.log", mode="w")
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# create start time logger
start_time = time.time()
# read in alignment and save the format of the alignment
alignment, input_file_format = get_alignment_and_format(
input_file, file_format=input_file_format
)
# set output file format if not specified
if not output_file_format:
output_file_format = input_file_format
else:
output_file_format = FileFormat[output_file_format]
# determine smart_gap threshold
if mode in {TrimmingMode.smart_gap, TrimmingMode.kpi_smart_gap, TrimmingMode.kpic_smart_gap}:
write_determining_smart_gap_threshold()
gaps = smart_gap_threshold_determination(alignment)
# Print to stdout the user arguments
write_user_args(
input_file,
input_file_format,
output_file,
output_file_format,
gaps,
mode,
complement,
use_log,
)
# create dictionaries of sequences to keep or trim from the alignment
keepD, trimD = keep_trim_and_log(
alignment, gaps, mode, use_log, output_file, complement
)
# check if resulting alingment length is 0
check_if_all_sites_were_trimmed(keepD)
# checking if any sequence entry contains only gaps
check_if_entry_contains_only_gaps(keepD)
# convert keepD and trimD to multiple sequence alignment objects
# and write out file
write_keepD(keepD, output_file, output_file_format)
# if the -c/--complementary argument was used,
# create an alignment of the trimmed sequences
if complement:
write_trimD(trimD, output_file, output_file_format)
# print out output statistics
write_output_stats(alignment, keepD, trimD, start_time)
def main(argv=None):
"""
Function that parses and collects arguments
"""
# parse and assign arguments
parser = create_parser()
args = parser.parse_args()
# pass to master execute function
execute(**process_args(args))
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: ClipKIT/clipkit/smart_gap_helper.py
```python
from collections import Counter
import numpy as np
from .helpers import get_sequence_at_position_and_report_features
def smart_gap_threshold_determination(
alignment
) -> float:
# loop through alignment and determine site-wise gappyness
alignment_length = alignment.get_alignment_length()
# get distribution of gaps rounded to the fourth decimal place
gaps_dist = get_gaps_distribution(alignment, alignment_length)
# count freq of gaps and convert to sorted np array
gaps_arr = count_and_sort_gaps(gaps_dist)
# calculate gap-to-gap slope
slopes = gap_to_gap_slope(gaps_arr, alignment_length)
# find the greatest difference in slopes and set
# gaps to y1
return greatest_diff_in_slopes(slopes, gaps_arr)
def greatest_diff_in_slopes(
slopes,
gaps_arr
):
diffs = []
# if there is only one slope, use that value to determine
# the threshold. Otherwise, calculate the greatest difference
# in slopes
if len(slopes) > 1:
for val0, val1 in zip(slopes, slopes[1:]):
diff0 = abs(val0-val1)
diffs.append(diff0)
elif len(slopes) == 0:
return 1
else:
diffs = slopes
return gaps_arr[diffs.index(max(diffs))][0]
def gap_to_gap_slope(
gaps_arr, alignment_length
):
sum_sites_current = gaps_arr[0][1]/alignment_length
sum_sites_previous = 0
slopes = []
for previous, current in zip(gaps_arr, gaps_arr[1:]):
sum_sites_previous+=(previous[1]/alignment_length)
sum_sites_current+=(current[1]/alignment_length)
slopes.append(abs((sum_sites_current-sum_sites_previous)/(current[0] - previous[0])))
# only use first half of slopes
return slopes[:(len(slopes)//2)]
def get_gaps_distribution(alignment, alignment_length: int):
gaps_dist = []
for i in range(0, alignment_length):
seqAtPosition, gappyness = get_sequence_at_position_and_report_features(alignment, i)
gappyness = round(gappyness, 4)
gaps_dist.append(gappyness)
return gaps_dist
def count_and_sort_gaps(gaps_dist: list):
gaps_count = dict(Counter(gaps_dist))
gaps_arr = np.array(list(gaps_count.items()))
return gaps_arr[np.argsort(-gaps_arr[:,0])].tolist()
```
#### File: tests/integration/test_gappy_mode.py
```python
import pytest
from pathlib import Path
from clipkit.clipkit import execute
from clipkit.files import FileFormat
from clipkit.modes import TrimmingMode
here = Path(__file__)
@pytest.mark.integration
class TestGappyMode(object):
def test_simple_no_change(self):
"""
test gappy where no changes are expected in the resulting
output alignment.
usage: clipkit simple.fa
"""
input_file = f"{here.parent}/samples/simple.fa"
output_file = "output/simpla.fa.clipkit"
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(input_file, "r") as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_12_YIL115C_Anc_2_253_codon_aln(self):
"""
test gappy with codon alignment of yeast sequences
usage: clipkit 12_YIL115C_Anc_2.253_codon_aln.fasta
"""
input_file = f"{here.parent}/samples/12_YIL115C_Anc_2.253_codon_aln.fasta"
output_file = "output/12_YIL115C_Anc_2.253_codon_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/12_YIL115C_Anc_2.253_codon_aln.fasta_gappy", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_12_YIL115C_Anc_2_253_aa_aln(self):
"""
test gappy with amino acid alignment of yeast sequences
usage: clipkit 12_YIL115C_Anc_2.253_aa_aln.fasta
"""
input_file = f"{here.parent}/samples/12_YIL115C_Anc_2.253_aa_aln.fasta"
output_file = "output/12_YIL115C_Anc_2.253_aa_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/12_YIL115C_Anc_2.253_aa_aln.fasta_gappy", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_24_ENSG00000163519_aa_aln(self):
"""
test gappy with amino acid alignment of mammalian sequences
usage: clipkit 24_ENSG00000163519_aa_aln.fasta
"""
input_file = f"{here.parent}/samples/24_ENSG00000163519_aa_aln.fasta"
output_file = "output/24_ENSG00000163519_aa_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/24_ENSG00000163519_aa_aln.fasta_gappy", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_24_ENSG00000163519_codon_aln(self):
"""
test gappy with codon alignment of mammalian sequences
usage: clipkit 24_ENSG00000163519_codon_aln.fasta
"""
input_file = f"{here.parent}/samples/24_ENSG00000163519_codon_aln.fasta"
output_file = "output/24_ENSG00000163519_codon_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/24_ENSG00000163519_codon_aln.fasta_gappy", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_EOG091N44M8_aa(self):
"""
test gappy with amino acid alignment of Penicillium sequences
usage: clipkit EOG091N44M8_aa.fa
"""
input_file = f"{here.parent}/samples/EOG091N44M8_aa.fa"
output_file = "output/EOG091N44M8_aa.fa.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(f"{here.parent}/expected/EOG091N44M8_aa.fa_gappy", "r") as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_EOG091N44M8_nt(self):
"""
test gappy with nucleotide alignment of Penicillium sequences
usage: clipkit EOG091N44M8_nt.fa
"""
input_file = f"{here.parent}/samples/EOG091N44M8_nt.fa"
output_file = "output/EOG091N44M8_nt.fa.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(f"{here.parent}/expected/EOG091N44M8_nt.fa_gappy", "r") as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
@pytest.mark.slow
def test_EOG092C0CZK_aa(self):
"""
test gappy with amino alignment of fungal sequences
usage: clipkit EOG092C0CZK_aa_aln.fasta
"""
input_file = f"{here.parent}/samples/EOG092C0CZK_aa_aln.fasta"
output_file = "output/EOG092C0CZK_aa_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/EOG092C0CZK_aa_aln.fasta_gappy", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_EOG092C4VOX_aa(self):
"""
test gappy with amino alignment of fungal sequences
usage: clipkit EOG092C4VOX_aa_aln.fasta
"""
input_file = f"{here.parent}/samples/EOG092C4VOX_aa_aln.fasta"
output_file = "output/EOG092C4VOX_aa_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/EOG092C4VOX_aa_aln.fasta_gappy", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
@pytest.mark.integration
class TestGappyModeCustomGapsParameter(object):
def test_simple(self):
"""
test gappy with a custom gaps parameter
usage: clipkit simple.fa -g 0.2
"""
input_file = f"{here.parent}/samples/simple.fa"
output_file = "output/simpla.fa.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.2,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/simple.fa_gappy_gaps_set_to_0.2", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_12_YIL115C_Anc_2_253_codon_aln(self):
"""
test gappy with codon alignment of yeast sequences
usage: clipkit 12_YIL115C_Anc_2.253_codon_aln.fasta -g 0.3
"""
input_file = f"{here.parent}/samples/12_YIL115C_Anc_2.253_codon_aln.fasta"
output_file = "output/12_YIL115C_Anc_2.253_codon_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.3,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/12_YIL115C_Anc_2.253_codon_aln.fasta_gappy_custom_gaps",
"r",
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_24_ENSG00000163519_codon_aln(self):
"""
test gappy with codon alignment of mammalian sequences
usage: clipkit 24_ENSG00000163519_codon_aln.fasta -g .4
"""
input_file = f"{here.parent}/samples/24_ENSG00000163519_codon_aln.fasta"
output_file = "output/24_ENSG00000163519_codon_aln.fasta.clipkit"
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.4,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/24_ENSG00000163519_codon_aln.fasta_gappy_custom_gaps",
"r",
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_EOG091N44M8_nt(self):
"""
test gappy with nucleotide alignment of Penicillium sequences
usage: clipkit EOG091N44M8_nt.fa -g .1
"""
input_file = f"{here.parent}/samples/EOG091N44M8_nt.fa"
output_file = "output/EOG091N44M8_nt.fa.clipkit"
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.1,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/EOG091N44M8_nt.fa_gappy_custom_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
@pytest.mark.slow
def test_EOG092C0CZK_aa(self):
"""
test gappy with amino alignment of fungal sequences
usage: clipkit EOG092C0CZK_aa_aln.fasta -g .5
"""
input_file = f"{here.parent}/samples/EOG092C0CZK_aa_aln.fasta"
output_file = "output/EOG092C0CZK_aa_aln.fasta.clipkit"
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.5,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/EOG092C0CZK_aa_aln.fasta_gappy_custom_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_EOG092C4VOX_aa(self):
"""
test gappy with amino alignment of fungal sequences
usage: clipkit EOG092C4VOX_aa_aln.fasta -g .25
"""
input_file = f"{here.parent}/samples/EOG092C4VOX_aa_aln.fasta"
output_file = "output/EOG092C4VOX_aa_aln.fasta.clipkit"
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.25,
mode=TrimmingMode.gappy,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/EOG092C4VOX_aa_aln.fasta_gappy_custom_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
```
#### File: tests/unit/test_args_parsing.py
```python
import pytest
from argparse import Namespace
from clipkit.modes import TrimmingMode
from clipkit.args_processing import process_args
@pytest.fixture
def args():
kwargs = dict(
complementary=False,
gaps=None,
input="tests/integration/samples/simple.fa",
input_file_format=None,
log=False,
mode=None,
output="output/simple",
output_file_format=None,
)
return Namespace(**kwargs)
class TestArgsProcessing(object):
def test_process_args_input_file_dne(self, args):
args.input = "some/file/that/doesnt/exist"
with pytest.raises(SystemExit):
process_args(args)
def test_process_args_in_equals_out(self, args):
args.output = args.input
with pytest.raises(SystemExit):
process_args(args)
def test_process_args_default_mode(self, args):
res = process_args(args)
assert res["mode"] == TrimmingMode.smart_gap
def test_process_args_default_complementary(self, args):
args.complementary = None
res = process_args(args)
assert res["complement"] is False
def test_process_args_default_gaps(self, args):
res = process_args(args)
assert res["gaps"] == 0.9
def test_process_args_default_use_logs(self, args):
args.log = None
res = process_args(args)
assert res["use_log"] is False
def test_process_args_default_output_file(self, args):
args.output = None
res = process_args(args)
assert res["output_file"] == f"{args.input}.clipkit"
def test_process_args_expected_keywords(self, args):
res = process_args(args)
expected_keys = [
"input_file",
"output_file",
"input_file_format",
"output_file_format",
"complement",
"gaps",
"mode",
"use_log",
]
assert sorted(res.keys()) == sorted(expected_keys)
```
#### File: tests/unit/test_modes.py
```python
import pytest
from pathlib import Path
import numpy as np
from Bio import AlignIO
from clipkit.modes import TrimmingMode, trim, shouldKeep
here = Path(__file__)
class TestModes(object):
def test_shouldKeep_kpi_gappy_keep(self):
## setup
mode = TrimmingMode.kpi_gappy
gappyness = 0.00
gaps = 0.9
parsimony_informative = True
constant_site = False
assert True == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_kpi_gappy_trim(self):
## setup
mode = TrimmingMode.kpi_gappy
gappyness = 0.00
gaps = 0.9
parsimony_informative = False
constant_site = False
assert False == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_gappy_keep(self):
## setup
mode = TrimmingMode.gappy
gappyness = 0.00
gaps = 0.9
parsimony_informative = True
constant_site = False
assert True == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_gappy_trim(self):
## setup
mode = TrimmingMode.gappy
gappyness = 0.95
gaps = 0.9
parsimony_informative = True
constant_site = False
assert False == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_kpi_keep(self):
## setup
mode = TrimmingMode.kpi
gappyness = 0.00
gaps = 0.9
parsimony_informative = True
constant_site = False
assert True == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_kpi_trim(self):
## setup
mode = TrimmingMode.kpi
gappyness = 0.95
gaps = 0.9
parsimony_informative = False
constant_site = False
assert False == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_kpic_keep(self):
## setup
mode = TrimmingMode.kpic
gappyness = 0.95
gaps = 0.9
parsimony_informative = False
constant_site = True
assert True == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_kpic_trim(self):
## setup
mode = TrimmingMode.kpic
gappyness = 0.95
gaps = 0.9
parsimony_informative = False
constant_site = False
assert False == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_kpic_gappy_keep(self):
## setup
mode = TrimmingMode.kpic_gappy
gappyness = 0.70
gaps = 0.9
parsimony_informative = False
constant_site = True
assert True == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_shouldKeep_kpic_gappy_trim(self):
## setup
mode = TrimmingMode.kpic_gappy
gappyness = 0.95
gaps = 0.9
parsimony_informative = False
constant_site = True
assert False == shouldKeep(
mode, parsimony_informative, constant_site, gappyness, gaps
)
def test_gappy_mode(self):
## setup
gappyness = 0.00
parsimony_informative = True
keepD = {}
trimD = {}
i = 2
gaps = 0.9
alignment = AlignIO.read(f"{here.parent}/examples/simple.fa", "fasta")
use_log = False
constant_site = False
for entry in alignment:
keepD[entry.id] = np.empty([6], dtype=str)
trimD = {}
for entry in alignment:
trimD[entry.id] = np.empty([6], dtype=str)
## execution
keepD, trimD = trim(
gappyness,
parsimony_informative,
constant_site,
keepD,
trimD,
i,
gaps,
alignment,
TrimmingMode.gappy,
use_log,
)
## check results
expected_keepD = {
"1": np.array(["", "", "G", "", "", ""]),
"2": np.array(["", "", "G", "", "", ""]),
"3": np.array(["", "", "G", "", "", ""]),
"4": np.array(["", "", "A", "", "", ""]),
"5": np.array(["", "", "a", "", "", ""]),
}
expected_trimD = {
"1": np.array(["", "", "", "", "", ""]),
"2": np.array(["", "", "", "", "", ""]),
"3": np.array(["", "", "", "", "", ""]),
"4": np.array(["", "", "", "", "", ""]),
"5": np.array(["", "", "", "", "", ""]),
}
assert expected_keepD.keys() == keepD.keys()
assert all(
np.array_equal(expected_keepD[key], keepD[key]) for key in expected_keepD
)
assert expected_trimD.keys() == trimD.keys()
assert all(
np.array_equal(expected_trimD[key], trimD[key]) for key in expected_trimD
)
def test_kpi_gappy_mode(self):
## setup
gappyness = 0.6
parsimony_informative = False
constant_site = True
keepD = {}
trimD = {}
i = 1
gaps = 0.9
alignment = AlignIO.read(f"{here.parent}/examples/simple.fa", "fasta")
use_log = False
for entry in alignment:
keepD[entry.id] = np.empty([6], dtype=str)
trimD = {}
for entry in alignment:
trimD[entry.id] = np.empty([6], dtype=str)
## execution
keepD, trimD = trim(
gappyness,
parsimony_informative,
constant_site,
keepD,
trimD,
i,
gaps,
alignment,
TrimmingMode.kpi_gappy,
use_log,
)
## check results
expected_keepD = {
"1": np.array(["", "", "", "", "", ""]),
"2": np.array(["", "", "", "", "", ""]),
"3": np.array(["", "", "", "", "", ""]),
"4": np.array(["", "", "", "", "", ""]),
"5": np.array(["", "", "", "", "", ""]),
}
expected_trimD = {
"1": np.array(["", "-", "", "", "", ""]),
"2": np.array(["", "-", "", "", "", ""]),
"3": np.array(["", "-", "", "", "", ""]),
"4": np.array(["", "G", "", "", "", ""]),
"5": np.array(["", "C", "", "", "", ""]),
}
assert expected_keepD.keys() == keepD.keys()
assert all(
np.array_equal(expected_keepD[key], keepD[key]) for key in expected_keepD
)
assert expected_trimD.keys() == trimD.keys()
assert all(
np.array_equal(expected_trimD[key], trimD[key]) for key in expected_trimD
)
def test_kpi_mode(self):
## setup
gappyness = 0.2
parsimony_informative = True
constant_site = False
keepD = {}
trimD = {}
i = 5
gaps = 0.9
alignment = AlignIO.read(f"{here.parent}/examples/simple.fa", "fasta")
use_log = False
for entry in alignment:
keepD[entry.id] = np.empty([6], dtype=str)
trimD = {}
for entry in alignment:
trimD[entry.id] = np.empty([6], dtype=str)
## execution
keepD, trimD = trim(
gappyness,
parsimony_informative,
constant_site,
keepD,
trimD,
i,
gaps,
alignment,
TrimmingMode.kpi,
use_log,
)
## check results
expected_keepD = {
"1": np.array(["", "", "", "", "", "T"]),
"2": np.array(["", "", "", "", "", "T"]),
"3": np.array(["", "", "", "", "", "A"]),
"4": np.array(["", "", "", "", "", "A"]),
"5": np.array(["", "", "", "", "", "-"]),
}
expected_trimD = {
"1": np.array(["", "", "", "", "", ""]),
"2": np.array(["", "", "", "", "", ""]),
"3": np.array(["", "", "", "", "", ""]),
"4": np.array(["", "", "", "", "", ""]),
"5": np.array(["", "", "", "", "", ""]),
}
assert expected_keepD.keys() == keepD.keys()
assert all(
np.array_equal(expected_keepD[key], keepD[key]) for key in expected_keepD
)
assert expected_trimD.keys() == trimD.keys()
assert all(
np.array_equal(expected_trimD[key], trimD[key]) for key in expected_trimD
)
def test_kpic_mode(self):
## setup
gappyness = 0.2
parsimony_informative = False
constant_site = True
keepD = {}
trimD = {}
i = 0
gaps = 0.9
alignment = AlignIO.read(f"{here.parent}/examples/simple.fa", "fasta")
use_log = False
for entry in alignment:
keepD[entry.id] = np.empty([6], dtype=str)
trimD = {}
for entry in alignment:
trimD[entry.id] = np.empty([6], dtype=str)
## execution
keepD, trimD = trim(
gappyness,
parsimony_informative,
constant_site,
keepD,
trimD,
i,
gaps,
alignment,
TrimmingMode.kpic,
use_log,
)
## check results
expected_keepD = {
"1": np.array(["A", "", "", "", "", ""]),
"2": np.array(["A", "", "", "", "", ""]),
"3": np.array(["A", "", "", "", "", ""]),
"4": np.array(["A", "", "", "", "", ""]),
"5": np.array(["A", "", "", "", "", ""]),
}
expected_trimD = {
"1": np.array(["", "", "", "", "", ""]),
"2": np.array(["", "", "", "", "", ""]),
"3": np.array(["", "", "", "", "", ""]),
"4": np.array(["", "", "", "", "", ""]),
"5": np.array(["", "", "", "", "", ""]),
}
assert expected_keepD.keys() == keepD.keys()
assert all(
np.array_equal(expected_keepD[key], keepD[key]) for key in expected_keepD
)
assert expected_trimD.keys() == trimD.keys()
assert all(
np.array_equal(expected_trimD[key], trimD[key]) for key in expected_trimD
)
def test_kpic_gappy_mode(self):
## setup
gappyness = 0.2
parsimony_informative = False
constant_site = False
keepD = {}
trimD = {}
i = 3
gaps = 0.9
alignment = AlignIO.read(f"{here.parent}/examples/simple.fa", "fasta")
use_log = False
for entry in alignment:
keepD[entry.id] = np.empty([6], dtype=str)
trimD = {}
for entry in alignment:
trimD[entry.id] = np.empty([6], dtype=str)
## execution
keepD, trimD = trim(
gappyness,
parsimony_informative,
constant_site,
keepD,
trimD,
i,
gaps,
alignment,
TrimmingMode.kpic_gappy,
use_log,
)
## check results
expected_keepD = {
"1": np.array(["", "", "", "", "", ""]),
"2": np.array(["", "", "", "", "", ""]),
"3": np.array(["", "", "", "", "", ""]),
"4": np.array(["", "", "", "", "", ""]),
"5": np.array(["", "", "", "", "", ""]),
}
expected_trimD = {
"1": np.array(["", "", "", "T", "", ""]),
"2": np.array(["", "", "", "-", "", ""]),
"3": np.array(["", "", "", "-", "", ""]),
"4": np.array(["", "", "", "-", "", ""]),
"5": np.array(["", "", "", "-", "", ""]),
}
assert expected_keepD.keys() == keepD.keys()
assert all(
np.array_equal(expected_keepD[key], keepD[key]) for key in expected_keepD
)
assert expected_trimD.keys() == trimD.keys()
assert all(
np.array_equal(expected_trimD[key], trimD[key]) for key in expected_trimD
)
``` |
{
"source": "JLSteenwyk/orthofisher",
"score": 2
} |
#### File: tests/unit/test_args_parsing.py
```python
import pytest
from argparse import Namespace
from orthofisher.args_processing import process_args
from orthofisher.parser import create_parser
@pytest.fixture
def args():
kwargs = dict(
fasta="tests/samples/input.txt",
hmm="tests/samples/hmms.txt",
evalue=0.001,
bitscore=0.85
)
return Namespace(**kwargs)
class TestArgsProcessing(object):
def test_process_args_fasta_file_dne(self, args):
args.fasta = "some/file/that/doesnt/exist"
with pytest.raises(SystemExit):
process_args(args)
def test_process_args_hmm_file_dne(self, args):
args.hmm = "some/file/that/doesnt/exist"
with pytest.raises(SystemExit):
process_args(args)
def test_process_args_default_evalue(self, args):
args.evalue = None
res = process_args(args)
assert res["evalue"] == 0.001
def test_process_args_custom_evalue(self, args):
args.evalue = 1e-5
res = process_args(args)
assert res["evalue"] == 0.00001
def test_process_args_default_bitscore(self, args):
args.bitscore = None
res = process_args(args)
assert res["percent_bitscore"] == 0.85
def test_process_args_custom_bitscore(self, args):
args.bitscore = 0.5
res = process_args(args)
assert res["percent_bitscore"] == 0.5
def test_process_args_expected_keywords(self, args):
res = process_args(args)
expected_keys = [
"fasta_file_list",
"hmms_file_list",
"evalue",
"percent_bitscore"
]
assert sorted(res.keys()) == sorted(expected_keys)
class TestParser(object):
def test_create_parser(self, args):
parser = create_parser()
assert parser.add_help == False
assert parser.conflict_handler == 'error'
assert parser.prog == '__main__.py'
```
#### File: tests/unit/test_entrypoint.py
```python
import os
import pytest
import subprocess
class TestEntrypoint(object):
@pytest.mark.slow
def test_help(self):
cmd = "orthofisher --help"
exit_status = os.system(cmd)
assert exit_status == 0
@pytest.mark.slow
def test_no_args(self):
cmd = "orthofisher"
exit_status = os.system(cmd)
assert exit_status == 0
@pytest.mark.slow
def test_run(self):
cmd = "orthofisher -m tests/samples/hmms.txt -f tests/samples/input.txt"
exit_status = os.system(cmd)
assert exit_status == 0
@pytest.mark.slow
def test_hmm_list_input_error(self):
cmd = "orthofisher -m /file/doesnt/exist -f tests/samples/input.txt"
response = subprocess.check_output([cmd], stderr=subprocess.STDOUT, shell=True)
assert response == b"HMM file does not exist\n"
@pytest.mark.slow
def test_fasta_list_input_error(self):
cmd = "orthofisher -f /file/doesnt/exist -m tests/samples/hmms.txt"
response = subprocess.check_output([cmd], stderr=subprocess.STDOUT, shell=True)
assert response == b"Fasta file list does not exist\n"
@pytest.mark.slow
def test_evalue_input_error(self):
cmd = "orthofisher -f tests/samples/input.txt -m tests/samples/hmms.txt -e error"
exit_status = os.system(cmd)
assert exit_status == 0
@pytest.mark.slow
def test_bitscore_input_error(self):
cmd = "orthofisher -f tests/samples/input.txt -m tests/samples/hmms.txt -b error"
exit_status = os.system(cmd)
assert exit_status == 512
``` |
{
"source": "JLSteenwyk/orthosnap",
"score": 3
} |
#### File: orthosnap/orthosnap/helper.py
```python
from collections import Counter
import copy
import re
from Bio import Phylo
from Bio import SeqIO
def collapse_low_support_bipartitions(newtree, support: float):
"""
collapse bipartitions with support less than threshold
"""
newtree.collapse_all(lambda c: c.confidence is not None and c.confidence < support)
return newtree
def determine_if_dups_are_sister(subtree_tips: list):
"""
determine if dups are sister to one another
"""
# get first set of subtree tips
first_set_of_subtree_tips = subtree_tips[0]
# set if duplicate sequences are sister as True
are_sisters = True
# check if duplicate sequences are sister
for set_of_subtree_tips in subtree_tips[1:]:
if first_set_of_subtree_tips != set_of_subtree_tips:
are_sisters = False
if not are_sisters:
break
return are_sisters
def get_all_tips_and_taxa_names(tree):
"""
get all taxa and tip names in a phylogeny
return lists with information from each
"""
taxa = []
all_tips = []
# loop through the tree and collect terminal names
for term in tree.get_terminals():
taxa_name = term.name[:term.name.index("|")]
if taxa_name not in taxa:
taxa.append(taxa_name)
all_tips.append(term.name)
return taxa, all_tips
def get_tips_and_taxa_names_and_taxa_counts_from_subtrees(inter):
"""
get taxa, counts of each taxa, and all terminal names
"""
taxa_from_terms = []
terms = []
# get taxa and terminal names from subtree
for term in inter.get_terminals():
taxa_from_terms.append(term.name.split("|", 1)[0])
terms.append(term.name)
# count number of taxa in subtree
counts_of_taxa_from_terms = Counter(taxa_from_terms)
counts = []
# count number of times each taxon is present
for count in counts_of_taxa_from_terms.values():
counts.append(count)
return taxa_from_terms, terms, counts_of_taxa_from_terms, counts
def get_subtree_tips(terms: list, name: str, tree):
"""
get lists of subsubtrees from subtree
"""
# get the duplicate sequences
dups = [e for e in terms if e.startswith(name)]
subtree_tips = []
# for individual sequence among duplicate sequences
for dup in dups:
# create a copy of the tree
temptree = copy.deepcopy(tree)
# get the node path for the duplicate sequence
node_path = temptree.get_path(dup)
# for the terminals of the parent of the duplicate sequence
# get the terminal names and append them to temp
temp = []
for term in node_path[-2].get_terminals():
temp.append(term.name)
subtree_tips.append(temp)
return subtree_tips, dups
def handle_multi_copy_subtree(
all_tips: list,
terms: list,
newtree,
subgroup_counter: int,
fasta: str,
support: float,
fasta_dict: dict,
assigned_tips: list,
counts_of_taxa_from_terms,
tree,
):
"""
handling case where subtree contains all single copy genes
"""
# prune subtree to get subtree of interest
newtree = prune_subtree(all_tips, terms, newtree)
# collapse bipartition with low support
newtree = collapse_low_support_bipartitions(newtree, support)
# remove duplicate sequences if they are sister to one another
# following the approach in PhyloTreePruner
for name in counts_of_taxa_from_terms:
# if the taxon is represented by more than one sequence
if counts_of_taxa_from_terms[name] > 1:
# get subtree tips
subtree_tips, dups = get_subtree_tips(terms, name, tree)
# check if subtrees are sister to one another
are_sisters = determine_if_dups_are_sister(subtree_tips)
# if duplicate sequences are sister, get the longest sequence
if are_sisters:
# trim short sequences and keep long sequences in newtree
newtree, terms = keep_long_sequences(newtree, fasta_dict, dups, terms)
# if the resulting subtree has only single copy genes
# create a fasta file with sequences from tip labels
_, _, _, counts = get_tips_and_taxa_names_and_taxa_counts_from_subtrees(newtree)
if set(counts) == set([1]):
(
subgroup_counter,
assigned_tips,
) = write_output_fasta_and_account_for_assigned_tips_single_copy_case(
fasta, subgroup_counter, terms, fasta_dict, assigned_tips
)
return subgroup_counter, assigned_tips
def handle_single_copy_subtree(
all_tips: list,
terms: list,
newtree,
subgroup_counter: int,
fasta: str,
support: float,
fasta_dict: dict,
assigned_tips: list,
):
"""
handling case where subtree contains all single copy genes
"""
# prune subtree to get subtree of interest
newtree = prune_subtree(all_tips, terms, newtree)
# collapse bipartition with low support
newtree = collapse_low_support_bipartitions(newtree, support)
# add list of terms to assigned_tips list
# and create subgroup fasta files
(
subgroup_counter,
assigned_tips,
) = write_output_fasta_and_account_for_assigned_tips_single_copy_case(
fasta, subgroup_counter, terms, fasta_dict, assigned_tips
)
return subgroup_counter, assigned_tips
def keep_long_sequences(newtree, fasta_dict: dict, dups: list, terms: list):
"""
remove_short_sequences_among_duplicates_that_are_sister
"""
seq_lengths = dict()
for dup in dups:
seq_lengths[dup] = len(re.sub("-", "", str(fasta_dict[dup].seq)))
longest_seq = max(seq_lengths, key=seq_lengths.get)
# trim shorter sequences from the tree
for seq_name, _ in seq_lengths.items():
if seq_name != longest_seq:
newtree.prune(seq_name)
terms.remove(seq_name)
return newtree, terms
def prune_subtree(all_tips: list, terms: list, newtree):
"""
prune tips not of interest for subtree
"""
tips_to_prune = [i for i in all_tips + terms if i not in all_tips or i not in terms]
for tip in tips_to_prune:
newtree.prune(tip)
return newtree
def read_input_files(tree: str, fasta: str):
"""
read input files and midpoint root tree
"""
tree = Phylo.read(tree, "newick")
tree.root_at_midpoint()
fasta = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
return tree, fasta
def write_output_fasta_and_account_for_assigned_tips_single_copy_case(
fasta: str,
subgroup_counter: int,
terms: list,
fasta_dict: dict,
assigned_tips: list,
):
# write output
output_file_name = f"{fasta}.orthosnap.{subgroup_counter}.fa"
with open(output_file_name, "w") as output_handle:
for term in terms:
SeqIO.write(fasta_dict[term], output_handle, "fasta")
assigned_tips.append(term)
subgroup_counter += 1
return subgroup_counter, assigned_tips
```
#### File: tests/unit/test_parser.py
```python
import pytest
from orthosnap.parser import create_parser
@pytest.fixture
def parser():
return create_parser()
class TestParser(object):
def test_required_only(self, parser):
fasta = "my/input/file.fa"
tree = "my/input/tree.tree"
parsed = parser.parse_args(["-f", fasta, "-t", tree])
assert parsed.fasta == fasta
assert parsed.tree == tree
def test_occupancy(self, parser):
fasta = "my/input/file.fa"
tree = "my/input/tree.tree"
occupancy = ".2"
parsed = parser.parse_args(["-f", fasta, "-t", tree, "-o", occupancy])
assert parsed.fasta == fasta
assert parsed.tree == tree
assert parsed.occupancy == float(occupancy)
def test_support(self, parser):
fasta = "my/input/file.fa"
tree = "my/input/tree.tree"
support = "70"
parsed = parser.parse_args(["-f", fasta, "-t", tree, "-s", support])
assert parsed.fasta == fasta
assert parsed.tree == tree
assert parsed.support == float(support)
``` |
{
"source": "JLSteenwyk/PhyKIT",
"score": 2
} |
#### File: integration/tree/test_root_tree.py
```python
import pytest
import sys
from math import isclose
from mock import patch, call
from pathlib import Path
from textwrap import dedent
from phykit.phykit import Phykit
here = Path(__file__)
@pytest.mark.integration
class TestRootTree(object):
@patch("builtins.print")
def test_root_tree(self, mocked_print):
testargs = [
"phykit",
"root_tree",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
"-r",
f"{here.parent.parent.parent}/sample_files/tree_simple.outgroup.txt",
]
with patch.object(sys, "argv", testargs):
Phykit()
with open(f"{here.parent.parent}/expected/tree_simple.tre.rooted", "r") as expected_tree:
expected_tree_content = expected_tree.read()
with open(f"{here.parent.parent.parent}/sample_files/tree_simple.tre.rooted", "r") as out_tree:
out_tree_content = out_tree.read()
assert expected_tree_content == out_tree_content
@patch("builtins.print")
def test_root_tree_alias0(self, mocked_print):
testargs = [
"phykit",
"root",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
"-r",
f"{here.parent.parent.parent}/sample_files/tree_simple.outgroup.txt",
]
with patch.object(sys, "argv", testargs):
Phykit()
with open(f"{here.parent.parent}/expected/tree_simple.tre.rooted", "r") as expected_tree:
expected_tree_content = expected_tree.read()
with open(f"{here.parent.parent.parent}/sample_files/tree_simple.tre.rooted", "r") as out_tree:
out_tree_content = out_tree.read()
assert expected_tree_content == out_tree_content
@patch("builtins.print")
def test_root_tree_alias1(self, mocked_print):
testargs = [
"phykit",
"rt",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
"-r",
f"{here.parent.parent.parent}/sample_files/tree_simple.outgroup.txt",
]
with patch.object(sys, "argv", testargs):
Phykit()
with open(f"{here.parent.parent}/expected/tree_simple.tre.rooted", "r") as expected_tree:
expected_tree_content = expected_tree.read()
with open(f"{here.parent.parent.parent}/sample_files/tree_simple.tre.rooted", "r") as out_tree:
out_tree_content = out_tree.read()
assert expected_tree_content == out_tree_content
@patch("builtins.print")
def test_root_tree_incorrect_tree_path(self, mocked_print):
testargs = [
"phykit",
"root_tree",
f"{here.parent.parent.parent}/sample_files/tree_simple.tr",
"-r",
f"{here.parent.parent.parent}/sample_files/tree_simple.outgroup.txt",
]
with pytest.raises(SystemExit) as pytest_wrapped_e:
Phykit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
@patch("builtins.print")
def test_root_tree_incorrect_root_path(self, mocked_print):
testargs = [
"phykit",
"root_tree",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
"-r",
f"{here.parent.parent.parent}/sample_files/tree_simple.outgroup.tx",
]
with patch.object(sys, "argv", testargs):
with pytest.raises(SystemExit) as pytest_wrapped_e:
Phykit()
assert pytest_wrapped_e.type == SystemExit
mocked_print.assert_has_calls([
call("Please check file name and pathing"),
])
@patch("builtins.print")
def test_root_tree_custom_output(self, mocked_print):
testargs = [
"phykit",
"root_tree",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
"-r",
f"{here.parent.parent.parent}/sample_files/tree_simple.outgroup.txt",
"-o",
f"{here.parent.parent.parent}/sample_files/tree_simple_rooted_custom_out.tre"
]
with patch.object(sys, "argv", testargs):
Phykit()
with open(f"{here.parent.parent}/expected/tree_simple.tre.rooted", "r") as expected_tree:
expected_tree_content = expected_tree.read()
with open(f"{here.parent.parent.parent}/sample_files/tree_simple_rooted_custom_out.tre", "r") as out_tree:
out_tree_content = out_tree.read()
assert expected_tree_content == out_tree_content
```
#### File: services/alignment/test_alignment_length.py
```python
import pytest
from argparse import Namespace
from mock import patch, call
from phykit.services.alignment.alignment_length import AlignmentLength
from phykit.services.alignment.base import Alignment
@pytest.fixture
def args():
kwargs = dict(alignment="/some/path/to/file.fa")
return Namespace(**kwargs)
class TestAlignmentLength(object):
def test_init_sets_alignment_file_path(self, args):
aln = AlignmentLength(args)
assert aln.alignment_file_path == args.alignment
assert aln.output_file_path is None
def test_alignment_length_is_printed(self, mocker, args):
expected_length = "6"
aln = mocker.MagicMock(
get_alignment_length=mocker.MagicMock(return_value=expected_length)
)
mocked_print = mocker.patch("builtins.print")
mocked_get_alignment_and_format = mocker.patch("phykit.services.alignment.alignment_length.AlignmentLength.get_alignment_and_format", return_value=(aln, ''))
aln_len = AlignmentLength(args)
res = aln_len.run()
assert mocked_get_alignment_and_format.called
assert mocked_print.mock_calls == [
call(expected_length)
]
``` |
{
"source": "jlstevens/awesome-panel",
"score": 3
} |
#### File: templates/bootstrap_dashboard/bootstrap_dashboard.py
```python
import pathlib
import panel as pn
import awesome_panel.express as pnx
from awesome_panel.express.assets import SCROLLBAR_PANEL_EXPRESS_CSS
BOOTSTRAP_DASHBOARD_CSS = pathlib.Path(__file__).parent / "bootstrap_dashboard.css"
BOOTSTRAP_DASHBOARD_TEMPLATE = pathlib.Path(__file__).parent / "bootstrap_dashboard.html"
HEADER_HEIGHT = 58
SIDEBAR_WIDTH = 200
# Hack to make dynamically adding plotly work:
# See https://github.com/holoviz/panel/issues/840
pn.extension("plotly")
class BootstrapDashboardTemplate(pn.Template):
"""A Basic App Template"""
def __init__(self, app_title: str = "App Name", app_url="#"):
pn.config.raw_css.append(BOOTSTRAP_DASHBOARD_CSS.read_text())
pn.config.raw_css.append(SCROLLBAR_PANEL_EXPRESS_CSS.read_text())
pnx.Code.extend()
pnx.bootstrap.extend()
pnx.fontawesome.extend()
template = BOOTSTRAP_DASHBOARD_TEMPLATE.read_text()
app_title = pn.Row(
pn.layout.HSpacer(),
pn.pane.Markdown(f"[{app_title}]({app_url})", css_classes=["app-title"],),
pn.layout.HSpacer(),
width=SIDEBAR_WIDTH,
)
header = pn.Row(
app_title, pn.layout.HSpacer(), sizing_mode="stretch_width", height=HEADER_HEIGHT,
)
top_spacer = pn.layout.HSpacer(height=15)
self.header = header
self.sidebar = pn.Column(top_spacer, height_policy="max", width=SIDEBAR_WIDTH)
self.main = pn.Column(sizing_mode="stretch_width", margin=(25, 50, 25, 50))
items = {"header": header, "sidebar": self.sidebar, "main": self.main}
super().__init__(template=template, items=items)
```
#### File: scripts/ddd/algo.py
```python
from typing import Dict, List, Optional
class Person:
def __init__(self, name: str, drunk_factor: int, leader_name: Optional[str]):
self.name = name
self.drunk_factor = drunk_factor
self.leader_name = leader_name
self.subs: List["Person"] = []
self._max_drunk_factor_tree = None
self._max_communication_time_tree = None
@staticmethod
def create_from_line(line: str) -> "Person":
split = line.split(" ")
if len(split) == 2:
return Person(split[0], int(split[1]), None)
if len(split) == 3:
return Person(split[0], int(split[1]), split[2])
@staticmethod
def create_from_lines(lines: str) -> Dict[str, "Person"]:
persons = {}
for person in (Person.create_from_line(line) for line in lines.splitlines()):
persons[person.name] = person
for person in persons.values():
if person.leader_name:
persons[person.leader_name].subs.append(person)
return persons
@classmethod
def create_from_input(cls) -> Dict[str, "Person"]:
n = int(input())
lines = []
for _ in range(n):
lines.append(input())
return cls.create_from_lines("\n".join(lines))
@property
def max_drunk_factor_tree(self) -> int:
if self._max_drunk_factor_tree:
return self._max_drunk_factor_tree
if not self.subs:
return self.drunk_factor
self._max_drunk_factor_tree = self.drunk_factor + max(
(person.max_drunk_factor_tree) for person in self.subs
)
return self._max_drunk_factor_tree
@property
def max_communication_time_tree(self) -> int:
if self._max_communication_time_tree:
return self._max_communication_time_tree
if not self.subs:
return 0
if len(self.subs) == 1:
if self.subs[0].max_communication_time_tree > 0:
return self.subs[0].max_communication_time_tree
return self.max_drunk_factor_tree
drunk_factor_highest = 0
drunk_factor_second_highest = 0
max_communication_time_sub_tree = 0
for person in self.subs:
if person.max_drunk_factor_tree > drunk_factor_highest:
drunk_factor_second_highest = drunk_factor_highest
drunk_factor_highest = person.max_drunk_factor_tree
elif person.max_drunk_factor_tree > drunk_factor_second_highest:
drunk_factor_second_highest = person.max_drunk_factor_tree
if person.max_communication_time_tree > max_communication_time_sub_tree:
max_communication_time_sub_tree = person.max_communication_time_tree
max_communication_time_two_subs = (
self.drunk_factor + drunk_factor_highest + drunk_factor_second_highest
)
self._max_communication_time_tree = max(
max_communication_time_sub_tree, max_communication_time_two_subs
)
return self._max_communication_time_tree
@property
def is_leader(self) -> bool:
return self.leader_name is None
def __str__(self):
if self.is_leader:
return self.name + " " + str(self.drunk_factor)
return self.name + " " + str(self.drunk_factor) + " " + self.leader_name
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
persons = Person.create_from_input()
leader_name = list(persons)[0]
leader = persons[leader_name]
print(leader.max_communication_time_tree)
```
#### File: gallery/awesome_panel_express_tests/test_bootstrap_alerts.py
```python
import panel as pn
import awesome_panel.express as pnx
from awesome_panel.express.testing import TestApp
pnx.bootstrap.extend()
def test_info_alert():
"""We can show an InfoAlert
- Blue Div with normal and bold text
- Curently not full width
"""
return TestApp(
test_info_alert, pnx.InfoAlert("This is an **Info Alert**!"), sizing_mode="stretch_width",
)
def test_warning_alert():
"""We can show a Warning Alert
- Yellow Div with normal and bold text
- Curently not full width
"""
return TestApp(
test_warning_alert,
pnx.WarningAlert("This is a **Warning Alert**!"),
sizing_mode="stretch_width",
)
def test_error_alert():
"""We can show an Error Alert
- Red Div with normal and bold text
- Curently not full width
"""
return TestApp(
test_error_alert,
pnx.ErrorAlert("This is an **Error Alert**!"),
sizing_mode="stretch_width",
)
def test_info_alert_height_problem():
"""The Bokeh Layout Engine does not layout the height of the Markdown Alerts very well.
We see that the height of the InfoAlert is much greater than it needs to be. This is a general
problem for the Panel Markdown pane. See [Issue 829](https://github.com/holoviz/panel/issues/829)
"""
text = """\
Navigate to the **Dashboard Page** via the **Sidebar** to see the result.
Or Navigate to the **Limitations Page** to learn of some of the limitations of Panel that
I've experienced."""
return TestApp(
test_info_alert_height_problem,
pnx.InfoAlert(text, sizing_mode="stretch_width"),
sizing_mode="stretch_width",
)
def view() -> pn.Column:
"""Wraps all tests in a Column that can be included in the Gallery or served independently
Returns:
pn.Column -- An Column containing all the tests
"""
return pn.Column(
pnx.Markdown(__doc__),
test_info_alert(),
test_error_alert(),
test_warning_alert(),
test_info_alert_height_problem(),
sizing_mode="stretch_width",
)
if __name__.startswith("bk"):
view().servable()
```
#### File: gallery/awesome_panel_express_tests/test_markdown.py
```python
import pathlib
import panel as pn
import awesome_panel.express as pnx
from awesome_panel.express.testing import TestApp
TEST_MD_FILE = pathlib.Path(__file__).parent / "data" / "test.md"
pnx.Code.extend()
def test_markdown():
"""We test that
- A "Header is shown"
- The background is blue
- The sizing_mode is "stretch_width" by default. DOES NOT WORK CURRENTLY
"""
return TestApp(
test_markdown,
pnx.Markdown("# Header", name="basic", background="lightblue"),
sizing_mode="stretch_width",
background="lightgray",
max_width=600,
)
def test_markdown_from_file():
"""We test that
- A path to a markdown file can used directly in one line
"""
return TestApp(
test_markdown_from_file,
pnx.Markdown(path=TEST_MD_FILE, name="file", background="lightblue"),
)
def test_markdown_indendation():
"""We test the Markdown pane
- can handle leading spaces, i.e. this line shows as a bullited list and not in mono-space
"""
return TestApp(test_markdown_indendation, sizing_mode="stretch_width",)
def test_markdown_code_block():
"""We test that
- A code blocks are supported. Sort of. BUT THE INDENTATION IS CURRENTLY LOST!
- Indented markdown test from editors is supported. The Panel Markdown does not support this.
"""
code_block = """
This is not indented
```python
print("Hello Awesome Panel World")
return TestApp(
test_markdown_code_block,
pnx.Markdown(code_block, name="code block", background="lightblue"),
```
This is indented```
"""
return TestApp(
test_markdown_code_block,
pnx.Markdown(code_block, name="code block", background="lightblue"),
)
def view() -> pn.Column:
"""Wraps all tests in a Column that can be included in the Gallery or served independently
Returns:
pn.Column -- An Column containing all the tests
"""
return pn.Column(
pnx.Markdown(__doc__),
test_markdown,
test_markdown_from_file,
test_markdown_indendation,
test_markdown_code_block,
)
if __name__.startswith("bk"):
view().servable("test_markdown")
```
#### File: src/pages/home.py
```python
import pathlib
from panel import Column
from awesome_panel.express._pane._panes import Markdown
HOME_PATH = pathlib.Path(__file__).parent / "home.md"
def view() -> Column:
"""The home view of awesome-panel.org"""
return Column(Markdown(path=HOME_PATH), name="Home", sizing_mode="stretch_width")
```
#### File: src/pages/resources.py
```python
import pathlib
from panel import Column
from awesome_panel.express._pane._panes import Markdown
RESOURCES_PATH = pathlib.Path(__file__).parent / "resources.md"
def view() -> Column:
"""The resources view of awesome-panel.org"""
return Column(Markdown(path=RESOURCES_PATH), sizing_mode="stretch_width", name="Resources")
``` |
{
"source": "jlstevens/constructor",
"score": 3
} |
#### File: constructor/tests/test_utils.py
```python
from ..utils import make_VIProductVersion, fill_template, preprocess
def test_make_VIProductVersion():
f = make_VIProductVersion
assert f('3') == '3.0.0.0'
assert f('1.5') == '1.5.0.0'
assert f('2.71.6') == '2.71.6.0'
assert f('5.2.10.7') == '5.2.10.7'
assert f('5.2dev') == '5.0.0.0'
assert f('5.26.8.9.3') == '5.26.8.9'
assert f('x') == '0.0.0.0'
def test_fill_template():
template = """\
My name is __NAME__!
I am __AGE__ years old.
Sincerely __NAME__
"""
res = """\
My name is Hugo!
I am 44 years old.
Sincerely Hugo
"""
info = {'NAME': 'Hugo', 'AGE': '44', 'SEX': 'male'}
assert fill_template(template, info) == res
def test_preprocess():
code = """\
A
#if True
always True
another line
#endif
B
#if False
never see this
#endif
C
#if x == 0
x = 0
#else
x != 0
#endif
D
#if x != 0
x != 0
#endif
E
"""
res = """\
A
always True
another line
B
C
x != 0
D
x != 0
E
"""
assert preprocess(code, dict(x=1)) == res
def main():
test_make_VIProductVersion()
test_fill_template()
test_preprocess()
if __name__ == '__main__':
main()
``` |
{
"source": "jlstrick83/stix-shifter",
"score": 3
} |
#### File: modules/dummy/data_mapping.py
```python
from os import path
import json
from stix_shifter.stix_translation.src.exceptions import DataMappingException
def _fetch_mapping():
try:
basepath = path.dirname(__file__)
filepath = path.abspath(
path.join(basepath, "json", "from_stix_map.json"))
map_file = open(filepath).read()
map_data = json.loads(map_file)
return map_data
except Exception as ex:
print('exception in main():', ex)
return {}
class DataMapper:
def __init__(self, options):
mapping_json = options['mapping'] if 'mapping' in options else {}
self.map_data = mapping_json or _fetch_mapping()
def map_field(self, stix_object_name, stix_property_name):
if stix_object_name in self.map_data and stix_property_name in self.map_data[stix_object_name]["fields"]:
return self.map_data[stix_object_name]["fields"][stix_property_name]
else:
raise DataMappingException("Unable to map property `{}:{}` into data source query".format(
stix_object_name, stix_property_name))
```
#### File: stix_shifter/stix_translation/stix_translation_error_mapper.py
```python
from ..utils.error_mapper_base import ErrorMapperBase
from ..utils.error_response import ErrorCode
from .src.exceptions import DataMappingException
from ..stix_translation.stix_translation import StixValidationException
from .src.patterns.errors import SearchFeatureNotSupportedError
from ..stix_translation.stix_translation import TranslationResultException
error_mapping = {
NotImplementedError.__name__: ErrorCode.TRANSLATION_NOTIMPLEMENTED_MODE,
DataMappingException.__name__: ErrorCode.TRANSLATION_MAPPING_ERROR,
StixValidationException.__name__: ErrorCode.TRANSLATION_STIX_VALIDATION,
SearchFeatureNotSupportedError.__name__: ErrorCode.TRANSLATION_NOTSUPPORTED,
TranslationResultException.__name__: ErrorCode.TRANSLATION_RESULT
}
class ErrorMapper():
DEFAULT_ERROR = ErrorCode.TRANSLATION_MODULE_DEFAULT_ERROR
@staticmethod
def set_error_code(data_dict, return_obj):
exception = None
if 'exception' in data_dict:
exception = data_dict['exception']
error_code = ErrorMapper.DEFAULT_ERROR
error_message = 'Error when converting STIX pattern to data source query'
if exception is not None:
exception_type = type(exception).__name__
print("received exception => {}: {}".format(exception_type, exception))
if exception_type in error_mapping:
error_code = error_mapping[exception_type]
error_message = str(exception)
ErrorMapperBase.set_error_code(return_obj, error_code, message=error_message)
```
#### File: modules/async_dummy/apiclient.py
```python
from ..utils.RestApiClient import RestApiClient
class APIClient():
def __init__(self, connection, configuration):
self.client = "data source API client"
def ping_data_source(self):
# Pings the data source
return "async ping"
def create_search(self, query_expression):
# Queries the data source
return {
"code": 200,
"query_id": "uuid_1234567890"
}
def get_search_status(self, search_id):
# Check the current status of the search
return {"code": 200, "search_id": search_id, "status": "COMPLETED"}
def get_search_results(self, search_id, range_start=None, range_end=None):
# Return the search results. Results must be in JSON format before being translated into STIX
return {"code": 200, "search_id": search_id, "data": "Results for search"}
def delete_search(self, search_id):
# Optional since this may not be supported by the data source API
# Delete the search
return "Deleted query: {}".format(search_id)
```
#### File: tests/stix_translation/test_splunk_stix_to_spl.py
```python
from stix_shifter.stix_translation import stix_translation
from stix_shifter.stix_translation.src.exceptions import DataMappingException
from stix_shifter.stix_translation.src.modules.splunk import stix_to_splunk
from stix_shifter.utils.error_response import ErrorCode
import unittest
import random
protocols = {
"tcp": "6",
"udp": "17",
"icmp": "1",
"idpr-cmtp": "38",
"ipv6": "40",
"rsvp": "46",
"gre": "47",
"esp": "50",
"ah": "51",
"narp": "54",
"ospfigp": "89",
"ipip": "94",
"any": "99",
"sctp": "132"
}
default_timerange_spl = '-' + str(stix_to_splunk.DEFAULT_TIMERANGE) + 'minutes'
translation = stix_translation.StixTranslation()
class TestStixToSpl(unittest.TestCase, object):
def test_ipv4_query(self):
stix_pattern = "[ipv4-addr:value = '192.168.122.83' OR ipv4-addr:value = '192.168.122.84']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search (((src_ip = "192.168.122.84") OR (dest_ip = "192.168.122.84")) OR ((src_ip = "192.168.122.83") OR (dest_ip = "192.168.122.83"))) earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'ipv4-addr:value', 'comparison_operator': '=', 'value': '192.168.122.84'}, {'attribute': 'ipv4-addr:value', 'comparison_operator': '=', 'value': '192.168.122.83'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_ipv6_query(self):
stix_pattern = "[ipv6-addr:value = 'fe80::8c3b:a720:dc5c:2abf%19']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search ((src_ipv6 = "fe80::8c3b:a720:dc5c:2abf%19") OR (dest_ipv6 = "fe80::8c3b:a720:dc5c:2abf%19")) earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'ipv6-addr:value', 'comparison_operator': '=', 'value': 'fe80::8c3b:a720:dc5c:2abf%19'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_url_query(self):
stix_pattern = "[url:value = 'http://www.testaddress.com']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
parsed_stix = [{'attribute': 'url:value', 'comparison_operator': '=', 'value': 'http://www.testaddress.com'}]
queries = 'search (url = "http://www.testaddress.com") earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_mac_address_query(self):
stix_pattern = "[mac-addr:value = '00-00-5E-00-53-00']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search ((src_mac = "00-00-5E-00-53-00") OR (dest_mac = "00-00-5E-00-53-00")) earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'mac-addr:value', 'comparison_operator': '=', 'value': '00-00-5E-00-53-00'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_domain_query(self):
stix_pattern = "[domain-name:value = 'example.com']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search (url = "example.com") earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'domain-name:value', 'comparison_operator': '=', 'value': 'example.com'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_query_from_multiple_observation_expressions_joined_by_AND(self):
stix_pattern = "[domain-name:value = 'example.com'] AND [mac-addr:value = '00-00-5E-00-53-00']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
# Expect the STIX AND to convert to an SPL OR.
queries = 'search (url = "example.com") OR ((src_mac = "00-00-5E-00-53-00") OR (dest_mac = "00-00-5E-00-53-00")) earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'domain-name:value', 'comparison_operator': '=', 'value': 'example.com'}, {'attribute': 'mac-addr:value', 'comparison_operator': '=', 'value': '00-00-5E-00-53-00'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_query_from_multiple_comparison_expressions_joined_by_AND(self):
stix_pattern = "[domain-name:value = 'example.com' AND mac-addr:value = '00-00-5E-00-53-00']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
# Expect the STIX AND to convert to an AQL AND.
queries = 'search (((src_mac = "00-00-5E-00-53-00") OR (dest_mac = "00-00-5E-00-53-00")) AND (url = "example.com")) earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'mac-addr:value', 'comparison_operator': '=', 'value': '00-00-5E-00-53-00'}, {'attribute': 'domain-name:value', 'comparison_operator': '=', 'value': 'example.com'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_file_query(self):
stix_pattern = "[file:name = 'some_file.exe']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search (file_name = "some_file.exe") earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'file:name', 'comparison_operator': '=', 'value': 'some_file.exe'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_port_queries(self):
stix_pattern = "[network-traffic:src_port = 12345 OR network-traffic:dst_port = 23456]"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search ((dest_port = 23456) OR (src_port = 12345)) earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'network-traffic:dst_port', 'comparison_operator': '=', 'value': 23456}, {'attribute': 'network-traffic:src_port', 'comparison_operator': '=', 'value': 12345}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_unmapped_attribute(self):
stix_pattern = "[network-traffic:some_invalid_attribute = 'whatever']"
result = translation.translate('splunk', 'query', '{}', stix_pattern)
assert False == result['success']
assert ErrorCode.TRANSLATION_MAPPING_ERROR.value == result['code']
assert result['error'].startswith('Unable to map property')
def test_invalid_stix_pattern(self):
stix_pattern = "[not_a_valid_pattern]"
result = translation.translate('splunk', 'query', '{}', stix_pattern)
assert False == result['success']
assert ErrorCode.TRANSLATION_STIX_VALIDATION.value == result['code']
assert stix_pattern[1:-1] in result['error']
def test_network_traffic_protocols(self):
for key, value in protocols.items():
# Test for both upper and lower case protocols in the STIX pattern
if random.randint(0, 1) == 0:
key = key.upper()
stix_pattern = "[network-traffic:protocols[*] = '" + key + "']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search (protocol = "'+key+'") earliest="{}" | head {} | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'.format(default_timerange_spl, stix_to_splunk.DEFAULT_LIMIT)
parsed_stix = [{'attribute': 'network-traffic:protocols[*]', 'comparison_operator': '=', 'value': key}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_network_traffic_start_stop(self):
stix_pattern = "[network-traffic:'start' = '2018-06-14T08:36:24.000Z' OR network-traffic:end = '2018-06-14T08:36:24.000Z']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search ((latest = "2018-06-14T08:36:24.000Z") OR (earliest = "2018-06-14T08:36:24.000Z")) earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'network-traffic:end', 'comparison_operator': '=', 'value': '2018-06-14T08:36:24.000Z'}, {'attribute': 'network-traffic:start', 'comparison_operator': '=', 'value': '2018-06-14T08:36:24.000Z'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_start_stop_qualifiers(self):
stix_pattern = "[network-traffic:src_port = 37020] START t'2016-06-01T01:30:00.000Z' STOP t'2016-06-01T02:20:00.000Z' OR [ipv4-addr:value = '192.168.122.83'] START t'2016-06-01T03:55:00.000Z' STOP t'2016-06-01T04:30:00.000Z'"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search (src_port = 37020) earliest="06/01/2016:01:30:00" latest="06/01/2016:02:20:00" OR ((src_ip = "192.168.122.83") OR (dest_ip = "192.168.122.83")) earliest="06/01/2016:03:55:00" latest="06/01/2016:04:30:00" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'network-traffic:src_port', 'comparison_operator': '=', 'value': 37020}, {'attribute': 'ipv4-addr:value', 'comparison_operator': '=', 'value': '192.168.122.83'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_start_stop_qualifiers_one_time(self):
stix_pattern = "[network-traffic:src_port = 37020] START t'2016-06-01T01:30:00.000Z' STOP t'2016-06-01T02:20:00.000Z'"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search (src_port = 37020) earliest="06/01/2016:01:30:00" latest="06/01/2016:02:20:00" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'network-traffic:src_port', 'comparison_operator': '=', 'value': 37020}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_issubset_operator(self):
stix_pattern = "[ipv4-addr:value ISSUBSET '198.51.100.0/24']"
query = translation.translate('splunk', 'query', '{}', stix_pattern)
queries = 'search ((src_ip = "198.51.100.0/24") OR (dest_ip = "198.51.100.0/24")) earliest="-5minutes" | head 10000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'ipv4-addr:value', 'comparison_operator': 'ISSUBSET', 'value': '198.51.100.0/24'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_custom_time_limit_and_result_count(self):
stix_pattern = "[ipv4-addr:value = '192.168.122.83']"
timerange = 25
result_limit = 5000
options = {"timerange": timerange, "result_limit": result_limit}
query = translation.translate('splunk', 'query', '{}', stix_pattern, options)
queries = 'search ((src_ip = "192.168.122.83") OR (dest_ip = "192.168.122.83")) earliest="-25minutes" | head 5000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'ipv4-addr:value', 'comparison_operator': '=', 'value': '192.168.122.83'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_custom_mapping(self):
stix_pattern = "[ipv4-addr:value = '192.168.122.83' AND mac-addr:value = '00-00-5E-00-53-00']"
timerange = 15
result_limit = 1000
options = {
"timerange": timerange,
"result_limit": result_limit,
"mapping": {
"mac-addr": {
"cim_type": "flow",
"fields": {
"value": "mac"
}
},
"ipv4-addr": {
"cim_type": "flow",
"fields": {
"value": ["src_ip", "dest_ip"]
}
}
},
"select_fields": {
"default":
[
"src_ip",
"src_port",
]
}
}
query = translation.translate('splunk', 'query', '{}', stix_pattern, options)
queries = 'search ((mac = "00-00-5E-00-53-00") AND ((src_ip = "192.168.122.83") OR (dest_ip = "192.168.122.83"))) earliest="-15minutes" | head 1000 | fields src_ip, src_port'
parsed_stix = [{'attribute': 'mac-addr:value', 'comparison_operator': '=', 'value': '00-00-5E-00-53-00'}, {'attribute': 'ipv4-addr:value', 'comparison_operator': '=', 'value': '192.168.122.83'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
def test_free_search(self):
stix_pattern = "[x-readable-payload:value = 'malware']"
timerange = 25
result_limit = 5000
options = {"timerange": timerange, "result_limit": result_limit}
query = translation.translate('splunk', 'query', '{}', stix_pattern, options)
queries = 'search _raw=*malware* earliest="-25minutes" | head 5000 | fields src_ip, src_port, src_mac, src_ipv6, dest_ip, dest_port, dest_mac, dest_ipv6, file_hash, user, url, protocol'
parsed_stix = [{'attribute': 'x-readable-payload:value', 'comparison_operator': '=', 'value': 'malware'}]
assert query == {'queries': queries, 'parsed_stix': parsed_stix}
``` |
{
"source": "jlsutherland/gpo_tools",
"score": 3
} |
#### File: lib/gpo_tools/scrape.py
```python
import json
import re
from urllib.request import urlopen
import psycopg2
from bs4 import BeautifulSoup
from psycopg2 import IntegrityError
from psycopg2.extras import Json
class Scraper:
def __init__(self, db, user, password, api_key, min_congress, max_congress,
host='localhost', update_stewart_meta=False):
"""
GPO scraper class, which also handles database setup.
"""
self.con = psycopg2.connect('dbname={} user={} password={} host={}'.format(db, user, password, host))
self.cur = self.con.cursor(cursor_factory=psycopg2.extras.DictCursor)
self._execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' ")
table_names = [t[0] for t in self.cur.fetchall()]
if len(set(table_names)) == 0:
self._execute("""
CREATE TABLE members(
id integer PRIMARY KEY,
metadata json,
committee_membership json);
CREATE TABLE hearings(
id text PRIMARY KEY,
transcript text,
congress integer,
session integer,
chamber text,
date date,
committees text[],
subcommittees text[],
uri text,
url text,
sudoc text,
number text,
witness_meta json,
member_meta json,
parsed json);
""")
elif set(table_names) != {'members', 'hearings'}:
raise ValueError(""" Improperly configured postgresql database given! Please give either a blank database
or one that has been previously configured by this package.
""")
if update_stewart_meta:
self._update_stewart_meta()
self._execute('SELECT url FROM hearings;')
self.searched = [e[0] for e in self.cur.fetchall()]
self.api_key = api_key
self.congresses = range(int(min_congress), int(max_congress) + 1)
def scrape(self):
"""
Scrape data from the GPO website. Loops through the list of results until all pages are exhausted.
"""
print("Crawling and scraping the GPO website. As pages are scraped, page URLs will be printed in terminal. If "
"you're running the scraper for the first time, the initial crawl will take some time.")
results_page = 'https://api.govinfo.gov/collections/CHRG/1776-01-28T20%3A18%3A10Z?offset=0&pageSize=10000&' + \
'congress={1}&api_key={0}'
for congress in self.congresses:
hearings_list = json.loads(urlopen(results_page.format(self.api_key, congress)).read())
for hearing in hearings_list['packages']:
if hearing['packageLink'] not in self.searched:
print(hearing['packageLink'])
self._save_data(hearing)
self.searched.append(hearing['packageLink'])
def _extract_nav(self, url_element):
""" Helper function - grabs all unobserved links out of a given HTML element. """
url = 'http://www.gpo.gov' + re.search('(?<=\').*?(?=\')', url_element.get('onclick')).group(0)
if url not in self.searched:
page = urlopen('http://www.gpo.gov' + re.search('(?<=\').*?(?=\')', url_element.get('onclick')).group(0))
soup = BeautifulSoup(page.read(), 'lxml')
elements = [l for l in soup.find_all('a') if l.get('onclick') is not None]
self.searched.append(url)
return elements
else:
return []
def _save_data(self, hearing_json):
""" Dumps scraped text and metadata to the appropriate location in the document file structure. """
def extract_doc_meta(meta_html):
"""
Function to extract hearing metadata from the metadata file. Program searches through the HTML metadata and
locates various features, and combines them into a json object.
"""
def locate_string(key, name=False):
""" Helper function. Checks for a unique match on a given metadata element, and returns the value. """
elements_from_meta = meta_html.find(key)
if elements_from_meta is not None:
elements = list(set(elements_from_meta))
if len(elements) == 1 and name is False:
return elements[0].string
elif len(elements) == 1 and name is True:
return elements[0].find('name').string
else:
return ''
else:
return ''
# gathering a few unusual variables
uri = [link.string for link in meta_html.find_all('identifier') if link.get('type') == 'uri'][0]
congress = re.search('(?<=-)[0-9]+', uri).group(0)
committee_meta = meta_html.find_all('congcommittee')
committee_names = []
subcommittee_names = []
# first pass, using short committee names
for committee in committee_meta:
if committee.find('name', type='authority-short') is not None:
committee_names.append(committee.find('name', type='authority-short').string)
if committee.find('subcommittee') is not None:
try:
subcommittee = committee.find('subcommittee')
subcommittee_names.append(subcommittee.find('name', type='authority-short').string)
except:
pass
# occasionally, short names are missing - fall back standard names if no short ones are found
if len(committee_names) == 0:
for committee in committee_meta:
if committee.find('name', type='authority-standard') is not None:
committee_names.append(committee.find('name', type='authority-standard').string)
if meta_html.find('congserial') is not None:
serials = meta_html.find_all('congserial')
numbers = [serial.get('number') for serial in serials if serial.get('number') is not None]
else:
numbers = []
# the main variable collection and output construction.
meta_dictionary = {'Identifier': locate_string('recordidentifier'),
'Congress': congress,
'Session': locate_string('session'),
'Chamber': locate_string('chamber'),
'Date': locate_string('helddate'),
'Committees': committee_names,
'Subcommittees': subcommittee_names,
'Title': locate_string('title'),
'uri': uri,
'url': url,
'sudoc': locate_string('classification'),
'Number': numbers}
return meta_dictionary
def extract_member_meta(meta_html):
""" Function to extract member metadata from the metadata file. This information is often absent. """
import re
member_dictionary = {}
member_elements = [link for link in meta_html.find_all('congmember')]
# loop over all of the member elements in a given page, and get relevant data
for member in member_elements:
party = member.get('party')
state_short = member.get('state')
chamber = member.get('chamber')
bio_id = member.get('bioguideid')
name_elements = member.find_all('name')
name_parsed = [link.string for link in name_elements if link.get('type') == 'parsed'][0]
state_long = re.search('(?<= of ).*', name_parsed).group(0)
member_dictionary[name_parsed] = {'Name': name_parsed,
'State_Short': state_short,
'State_Long': state_long,
'Party': party,
'Chamber': chamber,
'GPO_ID': bio_id}
return member_dictionary
def extract_witness_meta(meta_html):
""" Function to extract witness metadata from the metadata file. This information is often absent. """
witness_list = [w.string for w in meta_html.find_all('witness') if w.string is not None]
return witness_list
htm_page = 'https://api.govinfo.gov/packages/{1}/htm?api_key={0}'
mods_page = 'https://api.govinfo.gov/packages/{1}/mods?api_key={0}'
hearing_id = hearing_json['packageId']
transcript = urlopen(htm_page.format(self.api_key, hearing_id)).read()
meta = urlopen(mods_page.format(self.api_key, hearing_id)).read()
transcript = re.sub('\x00', '', transcript.decode('utf8'))
meta_soup = BeautifulSoup(meta)
url = hearing_json['packageLink']
# Metadata is divided into three pieces: hearing info, member info, and witness info.
# See functions for details on each of these metadata elements.
hearing_meta = extract_doc_meta(meta_soup)
witness_meta = extract_witness_meta(meta_soup)
member_meta = extract_member_meta(meta_soup)
try:
self._execute('INSERT INTO hearings VALUES (' + ','.join(['%s'] * 14) + ')',
(hearing_meta['Identifier'],
transcript,
hearing_meta['Congress'],
hearing_meta['Session'],
hearing_meta['Chamber'],
hearing_meta['Date'],
hearing_meta['Committees'],
hearing_meta['Subcommittees'],
hearing_meta['uri'],
hearing_meta['url'],
hearing_meta['sudoc'],
hearing_meta['Number'],
Json(witness_meta),
Json(member_meta)))
except IntegrityError:
print('Duplicate key. Link not included.')
self.con.rollback()
def _update_stewart_meta(self):
"""
Generate the member table. The member table lists party seniority, majority status, leadership,
committee membership, congress, and state. All member data are drawn from Stewart's committee assignments data
(assumed to be saved as CSV files), which are available at the link below.
http://web.mit.edu/17.251/www/data_page.html
"""
import csv
def update(inputs, table, chamber):
"""
Helper function, which updates a given member table with metadata from Stewart's metadata. Given data from
a csv object, the function interprets that file and adds the data to a json output. See Stewart's data and
codebook for descriptions of the variables.
"""
def update_meta(meta_entry, datum):
meta_entry.append(datum)
meta_entry = [e for e in list(set(meta_entry)) if e != '']
return meta_entry
for row in inputs:
name = str(row[3].lower())
name = name.translate(str.maketrans(dict.fromkeys('!"#$%&\'()*+-./:;<=>?[\\]_`{|}~')))
congress = row[0].lower()
committee_code = row[1]
member_id = row[2]
majority = row[4].lower()
party_seniority = row[5].lower()
leadership = row[9]
committee_name = row[15]
state = row[18]
if row[6] == '100':
party = 'D'
elif row[6] == '200':
party = 'R'
else:
party = 'I'
entry = {'Party Seniority': party_seniority, 'Majority': majority, 'Leadership': leadership,
'Chamber': chamber, 'Party': party, 'State': state, 'Committee Name': committee_name}
if committee_code != '' and member_id != '':
if member_id in table:
member_meta = table[member_id]['Metadata']
member_membership = table[member_id]['Membership']
member_meta['Name'] = update_meta(member_meta['Name'], name)
member_meta['State'] = update_meta(member_meta['State'], state)
member_meta['Chamber'] = update_meta(member_meta['Chamber'], chamber)
member_meta['Party'] = update_meta(member_meta['Party'], party)
member_meta['Committee'] = update_meta(member_meta['Committee'], committee_name)
if congress in table[member_id]['Membership']:
member_membership[congress][committee_code] = entry
else:
member_membership[congress] = {committee_code: entry}
else:
table[member_id] = {'Metadata': {'Name': [name],
'State': [state],
'Chamber': [chamber],
'Party': [party],
'Committee': [committee_name]},
'Membership': {congress: {committee_code: entry}}}
self._execute('DELETE FROM members;')
member_table = {}
house_path = eval(input('Path to Stewart\'s House committee membership data (as csv): '))
senate_path = eval(input('Path to Stewart\'s Senate committee membership data (as csv): '))
# Loop through the house and senate assignment files, and save the output.
with open(house_path, 'r', encoding='ascii', errors='ignore') as f:
house_inputs = list(csv.reader(f))[2:]
with open(senate_path, 'r', encoding='ascii', errors='ignore') as f:
senate_inputs = list(csv.reader(f))[2:]
update(house_inputs, member_table, 'HOUSE')
update(senate_inputs, member_table, 'SENATE')
for k, v in list(member_table.items()):
self._execute('INSERT INTO members VALUES (%s, %s, %s)', (k, Json(v['Metadata']), Json(v['Membership'])),
errors='strict')
def _execute(self, cmd, data=None, errors='strict'):
""" Wrapper function for pyscopg2 commands. """
if errors not in ['strict', 'ignore']:
raise ValueError("""errors argument must be \'strict\' (raise exception on bad command)
or \'ignore\' (return None on bad command). '""")
self.cur = self.con.cursor()
if errors == 'ignore':
try:
self.cur.execute(cmd, data)
except:
self.con.rollback()
elif errors == 'strict':
self.cur.execute(cmd, data)
self.con.commit()
``` |
{
"source": "JLTastet/autolux",
"score": 3
} |
#### File: autolux/autolux/opts.py
```python
import os
import time
import models
# BRIGHTNESS LEVELS (should be between 1 and 100)
MIN_LEVEL=5
MAX_LEVEL=100
# interpolate over our threshold (should be between 1 and 65K)
MAX_WHITE=60000
MIN_WHITE=5000
# interval between screenshots
SLEEP_TIME=67
SCREENSHOT_TIME=1200
TRANSITION_MS=800
RECALIBRATE_MS=60 * 1000
# EXAMPLE: 100x200+300+400
# 100 width, 200 height, 300 offset from left, 400 offset from top
CROP_SCREEN="10x100%+400+0"
HORIZ_CROP_SCREEN="90%x10+200+400"
SCREENSHOT_CMD='import -silent -colorspace gray -screen -w root -quality 20'
BRIGHTNESS_CMD='-format "%[mean]" info:'
# change brightness when PID changes or
# change brightness when window changes
CHECK_PID=False
CHECK_PID_CMD='xdotool getwindowfocus getwindowpid'
# change brightness when window name changes
CHECK_TITLE_CMD='xdotool getwindowfocus getwindowname'
# default to True, now that we can skip using xbacklight
LEARN_MODE=True
VIZ_LUMA_MAP=False
PLOT_LUMA=True
PLOT_BRIGHT=False
RUN_AS_DAEMON=False
# do we use software dimming or not
XRANDR_OUTPUT = None
ADJUSTMENT = None
RESET = False
VERBOSE=False
def load_options():
global MIN_LEVEL, MAX_LEVEL, MAX_WHITE, MIN_WHITE, CROP_SCREEN
global SLEEP_TIME, TRANSITION_MS, RECALIBRATE_MS, SCREENSHOT_TIME
global VERBOSE, CHECK_PID, LEARN_MODE,VIZ_LUMA_MAP
global PLOT_LUMA, PLOT_BRIGHT
global RUN_AS_DAEMON, XRANDR_OUTPUT
global ADJUSTMENT, RESET
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--daemon", dest="run_as_daemon", help="run autolux as a daemon",
default=RUN_AS_DAEMON, action="store_true")
parser.add_option("--file", dest="luma_file", help="luma file to load", default=models.LUMA_FILE_DEFAULT)
parser.add_option("--sleep-interval", dest="sleep_interval", type="int", default=SLEEP_TIME,
help="check for window change ever SLEEP_INTERVAL ms, default is %s" % SLEEP_TIME)
parser.add_option("--interval", dest="interval", type="int", default=SCREENSHOT_TIME,
help="take screen snapshot every INTERVAL ms and readjust the screen brightness, default is %s" % SCREENSHOT_TIME)
parser.add_option("--min", "--min-level", dest="min_level", type="int", default=MIN_LEVEL,
help="min brightness level (from 1 to 100, default is %s)" % MIN_LEVEL)
parser.add_option("--max", "--max-level", dest="max_level", type="int", default=MAX_LEVEL,
help="max brightness level (from 1 to 100, default is %s)" % MAX_LEVEL)
parser.add_option("--lower", "--lower-threshold", dest="min_white", type="int", default=MIN_WHITE,
help="lower whiteness threshold before setting screen to highest brightness (1K to 15K, default is %s)" % MIN_WHITE)
parser.add_option("--upper", "--upper-threshold", dest="max_white", type="int", default=MAX_WHITE,
help="upper whiteness threshold before setting screen to lowest brightness (45K to 65K, default is %s)" % MAX_WHITE)
parser.add_option("--recalibrate-time", dest="recalibrate", type="int",
default=RECALIBRATE_MS, help="ms before recalibrating even if the window hasn't changed. set to 0 to disable, default is 60K")
parser.add_option("--fade-time", dest="fade_time", type="int", default=TRANSITION_MS,
help="time to fade backlight in ms, default is %s" % TRANSITION_MS)
parser.add_option("--crop", dest="crop_screen", type='str', default=CROP_SCREEN,
help="area to inspect, use imagemagick geometry style string (f.e. 50%x20%+400+100 means 50% width, 20% height at offset 400x and 100y)")
parser.add_option("--pid", dest="check_pid", action="store_true", help="check screen brightness when PID changes")
parser.add_option("--title", dest="check_pid", action="store_false", help="check screen brightness when window changes")
parser.add_option("--horizontal", dest="horizontal", action="store_true", help="take a horizontal screenshot instead of vertical")
parser.add_option("--no-learn", dest="learn", action="store_false", help="disable learning", default=LEARN_MODE)
parser.add_option("--verbose", dest="verbose", action="store_true", help="turn on verbose output, including screenshot timing info")
parser.add_option("--visualize", dest="visualize", action="store_true", help="visualize your brightness model", default=VIZ_LUMA_MAP)
parser.add_option("--plot-luma", dest="plot_luma", action="store_true", help="plot screen luminence on y axis and predicted brightness as color, good for observing prefered brightness by time of day", default=PLOT_LUMA)
parser.add_option("--plot-brightness", dest="plot_luma", action="store_false", help="plot predicted brightness on y axis and input luminence as color, good for observing eye strain", default=not PLOT_LUMA)
parser.add_option("--xrandr", dest="xrandr_output", type="str", default=None)
parser.add_option("--adjust", dest="adjustment", type="float", default=None)
parser.add_option("--reset", dest="reset", action="store_true", default=None)
options, args = parser.parse_args()
MIN_LEVEL = options.min_level
MAX_LEVEL = options.max_level
RUN_AS_DAEMON = options.run_as_daemon
SCREENSHOT_TIME = options.interval
SLEEP_TIME = options.sleep_interval
MAX_LEVEL = options.max_level
TRANSITION_MS = options.fade_time
CROP_SCREEN = options.crop_screen
VERBOSE = options.verbose
RECALIBRATE_MS = options.recalibrate
CHECK_PID = options.check_pid
LEARN_MODE=options.learn
VIZ_LUMA_MAP=options.visualize
models.LUMA_FILE=options.luma_file
PLOT_BRIGHT=not options.plot_luma
PLOT_LUMA=options.plot_luma
XRANDR_OUTPUT=options.xrandr_output
ADJUSTMENT=options.adjustment
RESET=options.reset
MIN_WHITE = options.min_white
MAX_WHITE = options.max_white
if options.horizontal:
CROP_SCREEN = HORIZ_CROP_SCREEN
global SCREENSHOT_CMD
if CROP_SCREEN is not None:
SCREENSHOT_CMD += ' -crop %s' % CROP_SCREEN
def print_config():
print "DAEMON MODE:", not not RUN_AS_DAEMON
print "CROPPING:", not not CROP_SCREEN
print "FADE TIME:", TRANSITION_MS
print "SLEEP TIME:", SCREENSHOT_TIME
print "DISPLAY RANGE:", MIN_LEVEL, MAX_LEVEL
print "LEARNING MODE:", LEARN_MODE
print "BRIGHTNESS RANGE:", MIN_WHITE, MAX_WHITE
print "RECALIBRATE EVERY:", RECALIBRATE_MS
print "FOLLOW WINDOW PID:", not not CHECK_PID
print "FOLLOW WINDOW TITLE:", not CHECK_PID
print "SCREENSHOT CMD", SCREENSHOT_CMD
``` |
{
"source": "JLTastet/scalar_portal",
"score": 2
} |
#### File: scalar_portal/api/branching_ratios.py
```python
from __future__ import absolute_import, division
from future.utils import viewitems, with_metaclass
import abc # Abstract Base Classes
# We use OrderedDict to obtain stable, deterministic results, and to make sure
# particle definitions always come before decay channel ones.
from collections import OrderedDict
import numpy as np
from ..api.channel import Channel
from ..data.constants import second, c_si, default_scalar_id
def format_pythia_particle_string(
pdg_id, name, antiname, spin_type, charge_type, mass, lifetime_si,
new=True, may_decay=True, is_visible=False):
lifetime_mm = 1e3 * lifetime_si * c_si
all_prop = '{}:{} = {} {} {} {} 0 {} 0.0 0.0 0.0 {:.12}'.format(
pdg_id, 'new' if new else 'all', name, antiname, spin_type, charge_type,
mass, lifetime_mm)
is_resonance = '{}:isResonance = false'.format(pdg_id)
may_decay = '{}:mayDecay = {}'.format(pdg_id, str(may_decay).lower())
is_visible = '{}:isVisible = {}'.format(pdg_id, str(is_visible).lower())
return '\n'.join([all_prop, is_resonance, may_decay, is_visible])
class BranchingRatios(with_metaclass(abc.ABCMeta, object)):
'''
Represents a set of computed branching ratios.
'''
def __init__(self, channels, mass, couplings,
ignore_invalid=False,
scalar_id=default_scalar_id):
self._channels = OrderedDict((str(ch), ch) for ch in channels)
self._mS = np.asarray(mass, dtype='float')
try:
self._couplings = { k: np.array(v, dtype='float')
for k, v in viewitems(couplings) }
except AttributeError:
raise(ValueError("'couplings' should be a dictionary (e.g. `{'theta': 1}`)."))
try:
bc = np.broadcast(self._mS, *self._couplings.values())
except ValueError:
raise(ValueError('Mass and coupling arrays could not be broadcast together.'))
self._ndim = bc.nd
self._scalar_id = scalar_id
self._widths = OrderedDict((str(ch), ch.width(mass, self._couplings)) for ch in channels)
if ignore_invalid:
for w in self._widths.values():
w[np.isnan(w)] = 0
@property
def widths(self):
return self._widths
@property
@abc.abstractmethod
def branching_ratios(self):
pass # pragma: no cover
def pythia_strings(self):
if self._ndim > 0:
raise(ValueError('Can only generate PYTHIA strings for a single mass and coupling.'))
for ch, br in viewitems(self.branching_ratios):
if not np.isfinite(br):
raise(ValueError('Cannot generate PYTHIA string: invalid channel {} for m = {}.'.format(ch, self._mS)))
return OrderedDict(
(ch_str, channel.pythia_string(self.branching_ratios[ch_str], self._scalar_id))
for ch_str, channel in viewitems(self._channels))
class DecayBranchingRatios(BranchingRatios):
'''
Represents a set of decay branching ratios for the scalar.
'''
def __init__(self, *args, **kwargs):
super(DecayBranchingRatios, self).__init__(*args, **kwargs)
self._total_width = sum(self.widths.values())
with np.errstate(invalid='ignore'):
self._br = OrderedDict(
(ch_str, w / self._total_width) for ch_str, w in viewitems(self.widths))
@property
def total_width(self):
return self._total_width
@property
def lifetime_si(self):
tau = 1 / self._total_width
return tau / second
@property
def branching_ratios(self):
return self._br
def pythia_particle_string(self, new=True):
'''
Returns a string which can be directly read by the PYTHIA event
generator to add the scalar particle.
'''
if self._mS.ndim > 0:
raise(ValueError('Can only generate a PYTHIA string for a single scalar mass.'))
return format_pythia_particle_string(
pdg_id=self._scalar_id, name='S', antiname='void', spin_type=1,
charge_type=0, mass=self._mS, lifetime_si=self.lifetime_si,
new=new, may_decay=True, is_visible=False)
class ProductionBranchingRatios(BranchingRatios):
'''
Represents a set of production branching ratios for the scalar.
'''
def __init__(self, *args, **kwargs):
super(ProductionBranchingRatios, self).__init__(*args, **kwargs)
self._br = OrderedDict(
(st, w / self._channels[st].parent_width) for st, w in viewitems(self.widths))
@property
def branching_ratios(self):
return self._br
class BranchingRatiosResult(object):
'''
Utility class wrapping the result of a computation of both production and
decay branching ratios.
Aggregates `ProductionBranchingRatios` and `DecayBranchingRatios`, and
provides shortcuts for methods related to the scalar particle itself.
'''
def __init__(self, prod, decay):
self._prod = prod
self._decay = decay
@property
def production(self):
return self._prod
@property
def decay(self):
return self._decay
@property
def total_width(self):
return self._decay.total_width
@property
def lifetime_si(self):
return self._decay.lifetime_si
def pythia_particle_string(self, new=True):
return self._decay.pythia_particle_string(new)
def pythia_full_string(self):
particle_str = self.pythia_particle_string()
production_strs = self.production.pythia_strings()
decay_strs = self.decay.pythia_strings()
full_string = '\n'.join(
[particle_str] +
list(st for st in production_strs.values() if st is not None) +
list(st for st in decay_strs.values() if st is not None))
return full_string
```
#### File: scalar_portal/data/particles.py
```python
from __future__ import division
import os
import pandas
import numpy as np
from particletools.tables import PYTHIAParticleData
from . import constants as cst
from . import qcd
_srcdir = os.path.dirname(__file__)
# Meson properties
# ----------------
# Special cases for K_S0 and K_L0
_k0_codes = {'K_S': 310, 'K_L': 130}
_k0_names = {val: key for key, val in _k0_codes.items()}
_meson_df = pandas.read_csv(os.path.join(_srcdir, 'meson_properties.dat'),
delim_whitespace=True)
_meson_names = list(_meson_df.Name) + list(_k0_codes.keys())
def _get_meson(feature, value):
query = _meson_df[_meson_df[feature] == value]
assert(len(query) <= 1)
if len(query) < 1:
raise(ValueError('No meson with {} == {}'.format(feature, value)))
return query.iloc[0]
def _split_meson_charge(meson_name):
# Separates the name of the QCD state and its charge
if len(meson_name) >= 4 and meson_name[-4:] == 'bar0':
qcd_state, charge = meson_name[:-4], meson_name[-4:]
elif len(meson_name) >= 1 and meson_name[-1:] in ['0', '+', '-']:
qcd_state, charge = meson_name[:-1], meson_name[-1:]
else:
raise(ValueError('Unrecognised meson name {}'.format(meson_name)))
return qcd_state, charge
def _get_meson_by_name(meson_name):
# Special case for neutral kaons
if meson_name in ['K_S0', 'K_L0']:
return _get_meson_by_name('K')
try:
# Handle mesons specified without the electric charge (e.g. `K*`)
return _get_meson('Name', meson_name)
except:
# Handle mesons specified with the charge (e.g. `K*0`)
qcd_state, charge = _split_meson_charge(meson_name)
return _get_meson('Name', qcd_state)
def _get_meson_by_id(pdg_id):
# Handle the special case of K_S0 and K_L0
if pdg_id in _k0_names:
return _k0_names[pdg_id] + '0'
# Handle the general case
query = _meson_df[(_meson_df['IdZero'] == abs(pdg_id))
| (_meson_df['IdPlus'] == abs(pdg_id))]
assert(len(query) <= 1)
if len(query) < 1:
raise(ValueError('No meson corresponding to PDG code {}'.format(pdg_id)))
else:
record = query.iloc[0]
qcd_state = record.Name
# Infer the charge (and 'bar') from the PDG code
if +record.IdZero == pdg_id:
charge = '0'
elif -record.IdZero == pdg_id:
charge = 'bar0'
elif +record.IdPlus == pdg_id:
charge = '+'
elif -record.IdPlus == pdg_id:
charge = '-'
else:
assert(False) # pragma: no cover
fullname = qcd_state + charge
return fullname
def _get_meson_pdg_id(meson_name):
qcd_state, charge = _split_meson_charge(meson_name)
# Handle special PDG codes for K_S0 and K_L0
if qcd_state in _k0_codes:
if charge != '0':
raise(ValueError('Invalid particle string {}'.format(meson_name)))
return _k0_codes[qcd_state]
# Now handle the generic case
record = _get_meson_by_name(qcd_state)
# Infer the PDG code from the charge (and the 'bar')
if charge == '0':
code = +record.IdZero
elif charge == 'bar0':
if record.SelfConjugate:
raise(ValueError('{}0 is self-conjugate!'.format(qcd_state)))
code = -record.IdZero
elif charge == '+':
code = +record.IdPlus
elif charge == '-':
code = -record.IdPlus
else:
assert(False) # pragma: no cover
if code == 0:
raise(ValueError('No PDG code for {}.'.format(meson_name)))
return code
def _get_meson_mass(meson_name):
record = _get_meson_by_name(meson_name)
return record.Mass
def _get_meson_spin_code(meson_name):
record = _get_meson_by_name(meson_name)
return record.SpinCode
_charges = {
'0' : 0,
'bar0': 0,
'+' : +1,
'-' : -1,
}
def _get_meson_charge(meson_name):
_, charge_str = _split_meson_charge(meson_name)
return _charges[charge_str]
def _get_meson_parity(meson_name):
record = _get_meson_by_name(meson_name)
return record.Parity
def _get_abs_meson_strangeness(meson_name):
record = _get_meson_by_name(meson_name)
return abs(record.S)
def _get_abs_meson_charm(meson_name):
record = _get_meson_by_name(meson_name)
return abs(record.C)
def _get_abs_meson_beauty(meson_name):
record = _get_meson_by_name(meson_name)
return abs(record.B)
def _get_meson_lifetime(meson_name):
try:
return cst.meson_lifetimes[meson_name]
except KeyError:
raise(ValueError('Lifetime of {} is unknown.'.format(meson_name)))
# Lepton properties
# -----------------
_lepton_masses = {
'e' : cst.m_e ,
'mu' : cst.m_mu ,
'tau': cst.m_tau,
}
def _get_lepton_mass(lepton_name):
if len(lepton_name) >= 1 and lepton_name[-1] in ['-', '+']:
basename = lepton_name[:-1]
else:
basename = lepton_name
try:
return _lepton_masses[basename]
except KeyError:
raise(ValueError('Unknown lepton {}.'.format(lepton_name)))
def _get_lepton_spin_code(lepton_name):
return 2
# Generic particle properties
# ---------------------------
_pdata = PYTHIAParticleData()
def _get_generic_pdg_id(particle):
try:
return _pdata.pdg_id(particle)
except:
raise(ValueError("Particle '{}' not found in PYTHIA database.".format(particle)))
def _get_generic_mass(particle):
try:
return _pdata.mass(particle)
except:
raise(ValueError("Particle '{}' not found in PYTHIA database.".format(particle)))
# Public API
# ----------
def is_meson(particle):
if particle in _meson_names:
return True
else:
try:
basename, charge = _split_meson_charge(particle)
if basename in _meson_names:
return True
except:
return False
return False
def get_qcd_state(particle):
if is_meson(particle):
qcd_state, charge = _split_meson_charge(particle)
return qcd_state
else:
raise(ValueError('{} is not a meson.'.format(particle)))
def is_lepton(particle):
if particle in _lepton_masses:
return True
elif len(particle) >= 1 and particle[:-1] in _lepton_masses:
return True
else:
return False
def get_pdg_id(particle):
if is_meson(particle):
# The PYTHIA database is sometimes inaccurate for mesons, so we
# manually override it in this specific case.
return _get_meson_pdg_id(particle)
else:
return _get_generic_pdg_id(particle)
def get_name(pdg_id):
# NOTE: Only mesons are handled so far.
return _get_meson_by_id(pdg_id)
def get_mass(particle):
if is_lepton(particle):
return _get_lepton_mass(particle)
elif is_meson(particle):
return _get_meson_mass(particle)
else:
try:
return _get_generic_mass(particle)
except ValueError:
raise(ValueError('Mass of {} is unknown.'.format(particle)))
def get_lifetime(particle):
"""
Returns the particle lifetime (average lifetime in its rest frame), in
natural units (GeV⁻¹).
"""
# NOTE: Only mesons are handled so far.
if is_meson(particle):
return _get_meson_lifetime(particle)
else:
raise(ValueError('Operation not supported for {}.'.format(particle)))
def get_spin_code(particle):
"""
Returns a positive integer 2S+1 representing the spin S of the particle.
"""
if is_lepton(particle):
return _get_lepton_spin_code(particle)
elif is_meson(particle):
return _get_meson_spin_code(particle)
else:
raise(ValueError('Spin of {} is unknown.'.format(particle)))
def get_charge(particle):
# NOTE: Only mesons are handled so far.
if is_meson(particle):
return _get_meson_charge(particle)
else:
raise(ValueError('Operation not supported for {}.'.format(particle)))
def get_parity(particle):
# NOTE: Only mesons are handled so far.
if is_meson(particle):
return _get_meson_parity(particle)
else:
raise(ValueError('Operation not supported for {}.'.format(particle)))
def get_abs_strangeness(particle):
# NOTE: Only mesons are handled so far.
if is_meson(particle):
return _get_abs_meson_strangeness(particle)
else:
raise(ValueError('Operation not supported for {}.'.format(particle)))
def get_abs_charm(particle):
# NOTE: Only mesons are handled so far.
if is_meson(particle):
return _get_abs_meson_charm(particle)
else:
raise(ValueError('Operation not supported for {}.'.format(particle)))
def get_abs_beauty(particle):
# NOTE: Only mesons are handled so far.
if is_meson(particle):
return _get_abs_meson_beauty(particle)
else:
raise(ValueError('Operation not supported for {}.'.format(particle)))
# Quark masses and strong coupling constant
# -----------------------------------------
def alpha_s(mu, nf):
"""
Computes the strong coupling constant α_s at scale μ with nf dynamical flavors
using the `rundec` package, through the `qcd` wrapper from Wilson.
Note: we use the *non-squared* scale μ, which has dimension +1, instead of μ².
Running is computed at 5 loops, and decoupling at 4 loops.
numpy.vectorize is used to emulate NumPy broadcast rules in `mu`, but is not
as fast as native vectorization.
RunDec references:
* <NAME>., <NAME>, and <NAME>.
“RunDec: A Mathematica Package for Running and Decoupling of the Strong Coupling and Quark Masses.”
Computer Physics Communications 133, no. 1 (December 2000): 43–65.
https://doi.org/10.1016/S0010-4655(00)00155-7.
* Schmidt, Barbara, and <NAME>.
“CRunDec: A C++ Package for Running and Decoupling of the Strong Coupling and Quark Masses.”
Computer Physics Communications 183, no. 9 (September 2012): 1845–48.
https://doi.org/10.1016/j.cpc.2012.03.023.
* Herren, Florian, and <NAME>.
“Version 3 of {\tt RunDec} and {\tt CRunDec}.”
Computer Physics Communications 224 (March 2018): 333–45.
https://doi.org/10.1016/j.cpc.2017.11.014.
Wilson references:
* Website: https://wilson-eft.github.io/
* Source code: https://github.com/wilson-eft/wilson
* Paper (for the Wilson RG running & matching, not used here):
Aebischer, Jason, <NAME>, and <NAME>. :
“: A Python Package for the Running and Matching of Wilson Coefficients above and below the Electroweak Scale.”
The European Physical Journal C 78, no. 12 (December 19, 2018): 1026.
https://doi.org/10.1140/epjc/s10052-018-6492-7.
"""
return np.vectorize(lambda _mu: qcd.alpha_s(_mu, nf, alphasMZ=cst.alpha_s_MZ, loop=5), cache=True)(mu)
_pole_masses = {
'u': None,
'd': None,
's': None,
'c': 1.5,
'b': 4.8,
't': cst.m_t_os
}
def on_shell_mass(q):
"""
Returns the approximate pole mass of the chosen quark.
This really only makes sense for the top quark.
"""
try:
M_q = _pole_masses[q]
except KeyError:
raise(ValueError('Unknown quark {}.'.format(q)))
if M_q is None:
raise(ValueError('The pole mass is ill-defined for {}.'.format(q)))
else:
return M_q
def msbar_mass(q, mu, nf):
"""
Returns the running quark mass in the MSbar scheme at a scale μ, in a
theory with nf dynamical flavors.
We use CRunDec through a slightly modified version of the `wilson.util.qcd` wrapper.
"""
if q in ['u', 'd', 't']:
raise(ValueError('MSbar mass not implemented for {} quark.'.format(q)))
elif q == 's':
return np.vectorize(
lambda _mu: qcd.m_s(cst.m_s_msbar_2GeV, _mu, nf, alphasMZ=cst.alpha_s_MZ, loop=5),
cache=True)(mu)
elif q == 'c':
return np.vectorize(
lambda _mu: qcd.m_c(cst.m_c_si, _mu, nf, alphasMZ=cst.alpha_s_MZ, loop=5),
cache=True)(mu)
elif q == 'b':
return np.vectorize(
lambda _mu: qcd.m_b(cst.m_b_si, _mu, nf, alphasMZ=cst.alpha_s_MZ, loop=5),
cache=True)(mu)
else:
raise(ValueError('Unknown quark {}.'.format(q)))
_si_masses = {
'c': cst.m_c_si,
'b': cst.m_b_si,
't': cst.m_t_si,
}
def scale_invariant_mass(q):
"""
Returns the scale-invariant mass of the heavy quark, in the MS-bar scheme.
For the c and b quarks, we use the 2018 PDG values [1], while for the top
quark we look for the fixed point of the MS-bar mass with Nf=6, using the
calculation from [2], which is accurate to order O(αs³) + O(α) + O(α αs),
and the Higgs mass from the PDG [1].
[1] <NAME> al. (Particle Data Group), Phys. Rev. D 98, 030001 (2018)
DOI: 10.1103/PhysRevD.98.030001
[2] Jegerlehner, Fred, <NAME>, and <NAME>.
“On the Difference between the Pole and the MSbar Masses of the Top Quark at the Electroweak Scale.”
Physics Letters B 722, no. 1–3 (May 2013): 123–29.
https://doi.org/10.1016/j.physletb.2013.04.012.
"""
if q in ['u', 'd', 's']:
raise(ValueError('Scale-invariant mass not implemented for the {} quark.'.format(q)))
elif q in ['c', 'b', 't']:
return _si_masses[q]
else:
raise(ValueError('Unknown quark {}.'.format(q)))
```
#### File: scalar_portal/decay/two_pions.py
```python
from __future__ import division
from __future__ import absolute_import
import os
import numpy as np
import scipy.interpolate as si
from ..api.channel import DecayChannel
_srcdir = os.path.dirname(__file__)
# Load the table containing the normalized S -> π π decay width.
# Source:
# <NAME>., 2019.
# Decay and Detection of a Light Scalar Boson Mixing with the Higgs.
# Physical Review D 99. https://doi.org/10.1103/PhysRevD.99.015018
# First column: scalar mass m_S in GeV.
# Second column: Γ(S -> π π) ratio evaluated at m_S.
# Note: The first column should not be assumed to be increasing.
_decay_width_table = np.loadtxt(os.path.join(_srcdir, 'winkler_pi.txt'))
# The calculation is not valid above 2.0 GeV, so we return NaN above this value.
_upper_lim = 2.0 # GeV
# Make sure the upper limit makes sense
assert(np.max(_decay_width_table[:,0]) >= _upper_lim)
# Workaround for SciPy 0.15.1
_itp = si.interp1d(
_decay_width_table[:,0], _decay_width_table[:,1],
kind='linear',
bounds_error=False,
fill_value=0.,
assume_sorted=False
)
def normalized_total_width(mS):
"""
Total decay width Γ(S → π π) = Γ(S → π⁰ π⁰) + Γ(S → π⁺ π⁻).
"""
return np.where(mS <= _upper_lim, _itp(mS), float('nan'))
def normalized_decay_width(final_state, mS):
"""
Decay width to two pions, for a specific final state.
Possible values for `final_state`:
`neutral`: Γ(S → π⁰ π⁰) = 1/3×Γ(S → π π)
`charged`: Γ(S → π⁺ π⁻) = 2/3×Γ(S → π π)
"""
if final_state == 'neutral':
fraction = 1/3
elif final_state == 'charged':
fraction = 2/3
else:
raise(ValueError('Unknown final state {}.'.format(final_state)))
return fraction * normalized_total_width(np.asarray(mS, dtype='float'))
class TwoPions(DecayChannel):
'''
Decay channel 'S -> π⁰ π⁰' or 'S -> π⁺ π⁻'.
'''
def __init__(self, final_state):
if final_state == 'neutral':
children = ['pi0', 'pi0']
elif final_state == 'charged':
children = ['pi+', 'pi-']
else:
raise(ValueError("Final state must be either 'neutral' (2 pi0) or 'charged' (pi+ pi-)."))
super(TwoPions, self).__init__(children)
self._final_state = final_state
def normalized_width(self, mS):
return normalized_decay_width(self._final_state, mS)
```
#### File: scalar_portal/decay/two_quarks.py
```python
from __future__ import division
from __future__ import absolute_import
import numpy as np
from ..data.constants import *
from ..data.particles import *
from ..api.channel import DecayChannel, format_pythia_string
def _beta(mq, mS):
"""
Perturbative "velocity" of the two outgoing quarks.
"""
return (1 - 4*(mq/mS)**2)**(1/2)
def _Delta_QCD(aS, Nf):
"QCD corrections away from the threshold."
x = aS/pi
return 5.67*x + (35.94-1.36*Nf)*x**2 + (164.14-25.77*Nf+0.259*Nf**2)*x**3
def _Delta_t(aS, mq, mS):
"QCD correction arising from the top triangle, away from the threshold."
# We have to use the pole mass for the top quark.
mt = on_shell_mass('t')
return (aS/pi)**2 * (1.57 - (4/3)*np.log(mS/mt) + (4/9)*np.log(mq/mS)**2)
_lower_validity_bound = 2.0 # GeV
# Number of dynamical quarks
# Nf = 4 throughout the considered mass range.
_Nf = 4
# q qbar thresholds
_thresholds = {
's': 2 * get_mass('K'),
'c': 2 * get_mass('D'),
'b': 2 * get_mass('B'),
}
def _normalized_decay_width_large_mass(q, mS):
"""
Approximates the decay width of S -> q qbar above the Hq Hq threshold
(where Hq=K for q=s, D for c, B for b).
"""
mq = msbar_mass(q, mu=mS, nf=_Nf)
aS = alpha_s(mu=mS, nf=_Nf)
# It seems that Spira forgot the β³ in the paper, but it is needed to
# reproduce figure 4, on page 213, so we put it back.
# Moreover, to get the correct threshold in the full QCD, it makes sense to
# replace the phase-space factor β(m_q) (obtained from pQCD) with β(m_Hq).
beta = _beta(_thresholds[q]/2, mS)
w = 3*mS*mq**2/(8*pi*v**2) * beta**3 * (1 + _Delta_QCD(aS, _Nf) + _Delta_t(aS, mq, mS))
return w
def normalized_decay_width(q, mS):
"""
Computes the decay width into two quarks: S -> q qbar, for q ∈ {s, c}.
This computation is only valid above 2 GeV and below the b threshold. This
function will return NaNs outside this range.
"""
mS = np.asarray(mS, dtype='float')
if q not in ['s', 'c']:
raise(ValueError('S -> {} {}bar not implemented.'.format(q, q)))
w = np.zeros_like(mS, dtype='float')
valid = (mS >= _lower_validity_bound) & (mS < _thresholds['b'])
w[~valid] = np.nan
open_channels = valid & (mS >= _thresholds[q])
# Only do the calculation for open channels
mS_open = mS[open_channels]
if np.any(open_channels):
w[open_channels] = _normalized_decay_width_large_mass(q, mS_open)
return w
def normalized_total_width(mS):
return sum(normalized_decay_width(q, mS) for q in ['s', 'c'])
class TwoQuarks(DecayChannel):
'''
Decay channel 'S -> q qbar'.
'''
def __init__(self, flavor):
if not flavor in ['s', 'c']:
raise(ValueError('S -> {} {}bar not implemented.'.format(flavor, flavor)))
super(TwoQuarks, self).__init__([flavor, flavor+'bar'])
self._q = flavor
def normalized_width(self, mS):
return normalized_decay_width(self._q, mS)
def pythia_string(self, branching_ratio, scalar_id):
id_q = get_pdg_id(self._q)
return format_pythia_string(
scalar_id, [id_q, -id_q], branching_ratio,
matrix_element=pythia_me_mode_hadronize)
def is_open(self, mS):
return mS > _thresholds[self._q]
```
#### File: scalar_portal/test/test_api_wildcard.py
```python
from __future__ import absolute_import
from nose.tools import assert_equals, assert_raises
from .. import *
def test_api_wildcard():
assert('Model' in globals())
assert('Channel' in globals())
assert('ProductionChannel' in globals())
assert('DecayChannel' in globals())
assert('ActiveProcesses' in globals())
assert('BranchingRatios' in globals())
assert('DecayBranchingRatios' in globals())
assert('ProductionBranchingRatios' in globals())
assert('format_pythia_string' in globals())
assert('format_pythia_particle_string' in globals())
```
#### File: scalar_portal/test/test_channel.py
```python
from __future__ import absolute_import
from nose.tools import assert_equals, assert_raises
import numpy as np
from ..api import channel as ch
from ..production import two_body_hadronic as hh
from ..production import two_body_quartic as q2
from ..production import three_body_quartic as q3
from ..decay import leptonic as lp
from ..decay import two_pions as pi
from ..decay import two_kaons as kk
from ..decay import multimeson as mm
from ..decay import two_gluons as gg
from ..decay import two_quarks as qq
def test_string():
assert_equals(ch._to_channel_str('B', ['S', 'K*']), 'B -> S K*')
assert_equals(ch._to_channel_str('S', ['c', 'cbar']), 'S -> c cbar')
assert_equals(ch._from_channel_str('B -> S K*_0(700)'), ('B', ['S', 'K*_0(700)']))
assert_raises(ValueError, lambda: ch._from_channel_str('e+ e- -> t tbar'))
assert_raises(ValueError, lambda: ch._from_channel_str('B -> S K* -> S K gamma'))
def test_lt():
assert(lp.Leptonic('mu') < lp.Leptonic('e'))
assert(not (lp.Leptonic('e') < lp.Leptonic('e')))
assert(hh.TwoBodyHadronic('B0', 'K0') < hh.TwoBodyHadronic('B+', 'K+'))
assert(not (hh.TwoBodyHadronic('B+', 'K+') < hh.TwoBodyHadronic('B0', 'K0')))
assert(hh.TwoBodyHadronic('B0', 'K0') < lp.Leptonic('e'))
def test_leptonic():
ch = lp.Leptonic('mu')
mS = np.array([0.1, 0.5, 1, 5, 10])
assert(np.all(ch.normalized_width(mS) == lp.normalized_decay_width('mu', mS)))
assert(np.all(ch.width(mS, {'theta': 0.25}) == 0.25**2 * lp.normalized_decay_width('mu', mS)))
assert(np.all(ch.is_open(mS) == [False, True, True, True, True]))
assert(np.all(ch.is_valid(mS)))
assert_equals(str(ch), 'S -> mu+ mu-')
assert_equals(ch.pythia_string(0.42, 9900025), '9900025:addChannel = 1 0.42 0 -13 13')
assert_raises(ValueError, lambda: lp.Leptonic("tau'"))
assert_raises(ValueError, lambda: lp.Leptonic('pi0' ))
def test_two_pions():
ch0 = pi.TwoPions('neutral')
ch1 = pi.TwoPions('charged')
mS = np.array([0.1, 0.25, 0.3, 1, 2])
assert(np.all(ch0.normalized_width(mS) == pi.normalized_decay_width('neutral', mS)))
assert(np.all(ch1.normalized_width(mS) == pi.normalized_decay_width('charged', mS)))
assert(np.all(ch0.is_open(mS) == [False, False, True, True, True]))
assert(np.all(ch0.is_valid(mS)))
assert(np.all(~ch0.is_valid([2.5, 5])))
assert_equals(str(ch0), 'S -> pi0 pi0')
assert_equals(str(ch1), 'S -> pi+ pi-')
assert_equals(ch0.pythia_string(0.42, 9900025), '9900025:addChannel = 1 0.42 0 111 111' )
assert_equals(ch1.pythia_string(0.42, 9900025), '9900025:addChannel = 1 0.42 0 211 -211')
assert_raises(ValueError, lambda: pi.TwoPions('pi0 pi0'))
def test_two_kaons():
ch0 = kk.TwoKaons('neutral')
ch1 = kk.TwoKaons('charged')
mS = np.array([0.9, 1, 1.4, 2])
assert(np.all(ch0.normalized_width(mS) == kk.normalized_decay_width(mS)))
# The widths should be equal since we use the same mass for K0 and K+.
assert(np.all(ch0.normalized_width(mS) == ch1.normalized_width(mS)))
assert(np.all(ch0.is_open(mS) == [False, True, True, True]))
assert(np.all(ch0.is_valid(mS)))
assert(np.all(~ch0.is_valid([2.5, 5])))
assert_equals(str(ch0), 'S -> K0 Kbar0')
assert_equals(str(ch1), 'S -> K+ K-' )
assert_equals(ch0.pythia_string(0.42, 9900025), '9900025:addChannel = 1 0.42 0 311 -311')
assert_equals(ch1.pythia_string(0.42, 9900025), '9900025:addChannel = 1 0.42 0 321 -321')
assert_raises(ValueError, lambda: kk.TwoKaons('K+ K-'))
def test_multimeson():
ch = mm.Multimeson()
mS = np.array([0, 0.5, 0.6, 1, 1.4, 1.7, 2])
assert(np.all(ch.normalized_width(mS) == mm.normalized_decay_width(mS)))
assert(np.all(ch.is_open(mS) == [False, False, True, True, True, True, True]))
assert(np.all(ch.is_valid(mS)))
assert(np.all(~ch.is_valid([2.5, 4])))
assert_equals(str(ch), 'S -> mesons...')
assert_equals(ch.pythia_string(0.42, 9900025), None)
def test_two_gluons():
ch = gg.TwoGluons()
mS = np.array([2, 3, 5, 10])
assert(np.all(ch.normalized_width(mS) == gg.normalized_decay_width(mS)))
assert(np.all(ch.is_open(mS)))
assert(np.all(ch.is_valid(mS)))
assert(np.all(~ch.is_valid([0.5, 1, 1.5])))
assert_equals(str(ch), 'S -> g g')
assert_equals(ch.pythia_string(0.42, 9900025), '9900025:addChannel = 1 0.42 91 21 21')
def test_two_quarks():
ch_s = qq.TwoQuarks('s')
mS = np.array([2, 3, 4, 10])
assert(np.all(ch_s.normalized_width(mS) == qq.normalized_decay_width('s', mS)))
assert(np.all(ch_s.is_open(mS)))
assert(np.all(ch_s.is_valid(mS)))
assert(np.all(~ch_s.is_valid([0.5, 1, 1.5, 11])))
assert_equals(str(ch_s), 'S -> s sbar')
assert_equals(ch_s.pythia_string(0.42, 9900025), '9900025:addChannel = 1 0.42 91 3 -3')
ch_c = qq.TwoQuarks('c')
assert(np.all(ch_c.is_open(mS) == [False, False, True, True]))
assert_raises(ValueError, lambda: qq.TwoQuarks('t'))
def test_hadronic_production():
ch = hh.TwoBodyHadronic('B+', 'K*+')
mS = np.array([0, 0.1, 0.5, 1, 2, 3, 5])
assert(np.all(ch.normalized_width(mS) == hh.normalized_decay_width('B', 'K*', mS)))
assert(np.all(ch.width(mS, {'theta': 0.25}) == 0.25**2 * hh.normalized_decay_width('B', 'K*', mS)))
assert(np.all(ch.is_open(mS) == [True, True, True, True, True, True, False]))
assert(np.all(ch.is_valid(mS)))
assert_equals(str(ch), 'B+ -> S K*+')
assert_equals(ch.pythia_string(0.42, 9900025), '521:addChannel = 1 0.42 0 9900025 323')
assert_raises(ValueError, lambda: hh.TwoBodyHadronic('B+', 'e+' ))
assert_raises(ValueError, lambda: hh.TwoBodyHadronic('B+', 'K*' ))
assert_raises(ValueError, lambda: hh.TwoBodyHadronic('B' , 'K*0'))
tau = hh.get_lifetime('B+')
wtot = 1 / tau
epsilon = 1e-14
assert(abs(ch.parent_width - wtot) <= epsilon * wtot)
br1 = ch.normalized_branching_ratio(mS)
w1 = ch.normalized_width(mS)
assert(np.all(np.abs(wtot*br1 - w1) <= epsilon * w1))
br = ch.branching_ratio(mS, {'theta': 0.25})
w = ch.width(mS, {'theta': 0.25})
assert(np.all(np.abs(wtot*br - w) <= epsilon * w))
def test_quartic_2body():
ch = q2.TwoBodyQuartic('B_s0')
mS = np.array([0, 0.1, 0.5, 1, 2.5, 3, 10])
assert(np.all(ch.normalized_width(mS) == q2.normalized_decay_width('B_s', mS)))
assert(np.all(ch.is_open(mS) == [True, True, True, True, True, False, False]))
assert(np.all(ch.is_valid(mS)))
assert_equals(str(ch), 'B_s0 -> S S')
assert_equals(ch.pythia_string(0.42, 9900025), '531:addChannel = 1 0.42 0 9900025 9900025')
assert_raises(ValueError, lambda: q2.TwoBodyQuartic('Z'))
assert_raises(ValueError, lambda: q2.TwoBodyQuartic('B+'))
assert(np.all(ch.width(mS, {'alpha': 0.5}) == 0.5**2 * ch.normalized_width(mS)))
assert(np.all(ch.branching_ratio(mS, {'alpha': 0.5}) == 0.5**2 * ch.normalized_branching_ratio(mS)))
# Test internals
assert_raises(ValueError, lambda: q2._get_decay_constant('D'))
def test_quartic_3body():
ch = q3.ThreeBodyQuartic('B+', 'K+')
mS = np.array([0, 0.5, 2.3, 2.5])
assert(np.all(ch.normalized_width(mS) == q3.normalized_decay_width('B', 'K', mS)))
assert(np.all(ch.is_open(mS) == [True, True, True, False]))
assert(np.all(ch.is_valid(mS)))
assert_equals(str(ch), 'B+ -> S S K+')
# FIXME: find correct matrix element.
assert_equals(ch.pythia_string(0.42, 9900025), '521:addChannel = 1 0.42 0 9900025 9900025 321')
assert_raises(ValueError, lambda: q3.ThreeBodyQuartic('B+', 'e+' ))
assert_raises(ValueError, lambda: q3.ThreeBodyQuartic('B+', 'K*' ))
assert_raises(ValueError, lambda: q3.ThreeBodyQuartic('B' , 'K*0'))
def test_neutral_kaons():
ch1 = hh.TwoBodyHadronic('K_L0', 'pi0', weak_eigenstate='K0')
mS = np.array([0, 0.1, 0.5, 1, 2, 3, 5])
assert(np.all(ch1.normalized_width(mS) == hh.normalized_decay_width('K', 'pi', mS)))
ch2 = hh.TwoBodyHadronic('B0', 'K0')
assert(np.all(ch2.normalized_width(mS) == hh.normalized_decay_width('B', 'K', mS)))
ch3 = q2.TwoBodyQuartic('K_L0', weak_eigenstate='K0')
assert(np.all(ch3.normalized_width(mS) == q2.normalized_decay_width('K', mS)))
ch4 = q3.ThreeBodyQuartic('K_L0', 'pi0', weak_eigenstate='K0')
assert(ch4.normalized_width(0.15) == q3.normalized_decay_width('K', 'pi', 0.15))
def test_vectorization():
# Check that lists are handled as well as NumPy arrays
def check_vectorization(channel, mS):
assert(np.all(channel.normalized_width(mS) == \
channel.normalized_width(np.asarray(mS, dtype='float'))))
for m in mS:
assert(channel.normalized_width(m) == \
channel.normalized_width(np.asarray(m, dtype='float')))
mS = [0.1, 0.5, 1, 2, 5, 10]
check_vectorization(hh.TwoBodyHadronic('B+', 'pi+' ), mS)
check_vectorization(hh.TwoBodyHadronic('B+', 'K+' ), mS)
check_vectorization(hh.TwoBodyHadronic('B+', 'K*_0(700)+' ), mS)
check_vectorization(hh.TwoBodyHadronic('B+', 'K*+' ), mS)
check_vectorization(hh.TwoBodyHadronic('B+', 'K_1(1270)+' ), mS)
check_vectorization(hh.TwoBodyHadronic('B+', 'K*_2(1430)+'), mS)
check_vectorization(q2.TwoBodyQuartic('B0') , mS)
check_vectorization(lp.Leptonic('e') , mS)
check_vectorization(q3.ThreeBodyQuartic('B+', 'K+'), [0, 1])
mS = [0.1, 0.5, 1]
check_vectorization(pi.TwoPions('neutral'), mS)
mS = [2, 3, 5]
check_vectorization(gg.TwoGluons() , mS)
check_vectorization(qq.TwoQuarks('c'), mS)
```
#### File: scalar_portal/test/test_constants.py
```python
from __future__ import absolute_import
from nose.tools import assert_equals, assert_raises
from ..data import constants as c
def test_ckm():
assert_equals(c.ckm(1,1), c.Vud)
assert_equals(c.ckm(1,2), c.Vus)
assert_equals(c.ckm(1,3), c.Vub)
assert_equals(c.ckm(2,1), c.Vcd)
assert_equals(c.ckm(2,2), c.Vcs)
assert_equals(c.ckm(2,3), c.Vcb)
assert_equals(c.ckm(3,1), c.Vtd)
assert_equals(c.ckm(3,2), c.Vts)
assert_equals(c.ckm(3,3), c.Vtb)
assert_raises(ValueError, lambda: c.ckm(0,1))
assert_raises(ValueError, lambda: c.ckm(4,1))
assert_raises(ValueError, lambda: c.ckm(1,0))
assert_raises(ValueError, lambda: c.ckm(1,4))
def test_VUD():
assert_equals(c.VUD('u','d'), c.Vud)
assert_equals(c.VUD('u','s'), c.Vus)
assert_equals(c.VUD('u','b'), c.Vub)
assert_equals(c.VUD('c','d'), c.Vcd)
assert_equals(c.VUD('c','s'), c.Vcs)
assert_equals(c.VUD('c','b'), c.Vcb)
assert_equals(c.VUD('t','d'), c.Vtd)
assert_equals(c.VUD('t','s'), c.Vts)
assert_equals(c.VUD('t','b'), c.Vtb)
assert_raises(ValueError, lambda: c.VUD('d', 'u'))
assert_raises(ValueError, lambda: c.VUD('a', 'd'))
assert_raises(ValueError, lambda: c.VUD('u', 'z'))
```
#### File: scalar_portal/test/test_decay.py
```python
from __future__ import absolute_import
from nose.tools import assert_equals, assert_raises
import numpy as np
from ..data.particles import *
from ..decay import leptonic as lp
from ..decay import two_pions as tp
from ..decay import two_kaons as kk
from ..decay import multimeson as mm
from ..decay import two_gluons as gg
from ..decay import two_quarks as qq
def test_leptonic_width():
eps = 1e-12
mS = np.array([0.01, 0.1, 0.5, 1.0, 2.0, 4.0, 10.0])
w = lp.normalized_decay_width('e' , mS)
assert(all(w[mS > 2*get_mass('e' )] > 0))
assert(all(w[mS < 2*get_mass('e' )] == 0))
target = np.array([
1.6900126864964634e-15, 1.7165714271571945e-14, 8.584148208244031e-14,
1.7168377110602744e-13, 3.433679456830234e-13, 6.867360931015804e-13,
1.7168403739688331e-12])
assert(np.all(np.abs(w - target) <= eps * target))
w = lp.normalized_decay_width('mu' , mS)
assert(all(w[mS > 2*get_mass('mu' )] > 0))
assert(all(w[mS < 2*get_mass('mu' )] == 0))
target = np.array([
0, 0, 2.732025043654036e-9, 6.853907708050072e-9, 1.443491869260822e-8,
2.9237286606176158e-8, 7.335112420583204e-8])
assert(np.all(np.abs(w - target) <= eps * target))
w = lp.normalized_decay_width('tau', mS)
assert(all(w[mS > 2*get_mass('tau')] > 0))
assert(all(w[mS < 2*get_mass('tau')] == 0))
target = np.array([
0, 0, 0, 0, 0, 8.028575393027576e-7, 0.00001695364846943955])
assert(np.all(np.abs(w - target) <= eps * target))
assert_raises(ValueError, lambda: lp.normalized_decay_width('K', mS))
def test_two_pion_width():
mS = np.array([0.01, 0.2, 0.3, 0.5, 0.9, 1.0, 1.5, 2.0, 2.5, 4.0, 10.0])
wn = tp.normalized_decay_width('neutral', mS)
assert(all(wn[mS < 2*get_mass('pi')] == 0))
assert(all(wn[(mS > 2*get_mass('pi')) & (mS <= 2.0)] > 0))
assert(all(np.isnan(wn[mS > 2.0])))
wc = tp.normalized_decay_width('charged', mS)
assert(all(wc[mS < 2*get_mass('pi')] == 0))
assert(all(wc[(mS > 2*get_mass('pi')) & (mS <= 2.0)] > 0))
assert(all(np.isnan(wc[mS > 2.0])))
# Now test that Γ(S -> pi+ pi-) = 2 Γ(S -> pi0 pi0)
eps = 1e-12
finite = np.isfinite(wn)
assert(np.all(np.abs(wc - 2*wn)[finite] <= eps * wc[finite]))
assert_raises(ValueError, lambda: tp.normalized_decay_width('test', mS))
# To test the numerical values, we use the masses at the interpolation knots.
# This way, the different interpolations between scipy and Mathematica do not
# introduce an additional error.
mS = np.array([
0.2808915847265062, 0.5011797711548195, 1.0036678784980348,
1.4995286492412911, 1.9618148657454992])
w = tp.normalized_decay_width('charged', mS)
target = np.array([
4.880143779482717e-10, 1.4857373356991338e-8, 3.604336514028455e-7,
2.319233832129673e-9, 2.310540370746613e-9])
assert(np.all(np.abs(w - target) <= eps * target))
# Test masses away from the interpolation knots.
# If using linear interpolation, we actually expect the same result as the
# Mathematica version.
eps = 1e-12
mS = np.array([0.2, 0.3, 0.5, 1.0, 1.4, 2.0])
w = tp.normalized_decay_width('charged', mS)
target = np.array([
0, 1.8927616589680657e-9, 1.475091932044681e-8, 4.019482951592148e-7,
6.6426603227451354e-9, 2.39245459053496e-9])
assert(np.all(np.abs(w - target) <= eps * target))
def test_two_kaon_width():
mS = np.array([0.01, 0.2, 0.3, 0.5, 0.9, 1.0, 1.5, 2.0, 2.5, 4.0, 10.0])
w = kk.normalized_decay_width(mS)
assert(np.all(w[ mS < 2*get_mass('K') ] == 0))
assert(np.all(w[(mS > 2*get_mass('K')) & (mS <= 2.0)] > 0))
assert(np.all(np.isnan(w[mS > 2.0])))
# Test numerical values at the interpolation knots.
eps = 1e-12
mS = np.array([1.0029956973063714, 1.5221480119166657, 1.992527315035567])
w = kk.normalized_decay_width(mS)
target = np.array([
5.3034678644569654e-8, 4.121879878937285e-8, 1.7589704763603936e-8])
assert(np.all(np.abs(w - target) <= eps * target))
# Test masses away from the interpolation knots.
eps = 1e-12 # For linear interpolation, we can use a small ε
mS = np.array([0.9, 1.0, 1.4, 2.0])
w = kk.normalized_decay_width(mS)
target = np.array([
0, 3.25398123949564e-8, 4.558339609849428e-8, 1.743794584685908e-8])
assert(np.all(np.abs(w - target) <= eps * target))
def test_multimeson():
threshold = 4 * get_mass('pi')
assert_equals(mm.normalized_decay_width(threshold), 0)
Lambda_S = 2.0
eps = 1e-14
total_width_below = (
tp.normalized_decay_width('neutral', Lambda_S) +
tp.normalized_decay_width('charged', Lambda_S) +
kk.normalized_decay_width(Lambda_S) * 2 +
mm.normalized_decay_width(Lambda_S)
)
total_width_above = (
gg.normalized_decay_width(Lambda_S) +
qq.normalized_decay_width('s', Lambda_S) +
qq.normalized_decay_width('c', Lambda_S)
)
assert(abs(total_width_below - total_width_above) <= eps * total_width_above)
mS = np.array([0.1, 0.5, 0.6, 1, 1.4, 1.7, 2])
w = mm.normalized_decay_width(mS)
assert(np.all(mm.normalized_total_width(mS) == w))
eps = 1e-8
target = np.array([
0, 0, 6.842325342810394e-10, 6.507037347327064e-9,
1.9642598915115103e-8, 3.6178523573490095e-8, 5.985102812537482e-8])
assert(np.all(np.abs(w - target) <= eps * target))
def test_two_gluon_width():
mS = np.array([0.01, 0.1, 0.5, 1.0, 2.0, 4.0, 10.0])
w = gg.normalized_decay_width(mS)
assert(all(w[mS >= 2.0] > 0))
assert(all(np.isnan(w[mS < 2.0])))
eps = 1e-8
target = np.array([
5.346506003380509e-8,3.366555346862895e-7,1.7387372280010782e-6])
# Here we compute the error relative to the maximum value, in order to
# account for numerical cancellations in small decay widths, which reduce
# the relative precision.
assert(np.all(np.abs(w[mS >= 2] - target) <= eps * np.max(target)))
def test_two_quark_width():
mS = np.array([0.01, 0.1, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 11.0])
valid = (mS >= 2.0) & (mS < 2*get_mass('B'))
# Test S -> s sbar (always far from the threshold).
w = qq.normalized_decay_width('s', mS)
assert(np.all(w[valid] > 0))
assert(np.all(np.isnan(w[~valid])))
eps = 1e-8
target = np.array([
4.485054167109034e-8, 6.273213385478923e-8, 7.557563898011938e-8,
8.669975909774289e-8, 1.339576192802357e-7])
assert(np.all(np.abs(w[valid] - target) <= eps * target))
# Test the range of validity of the formula for S -> c cbar.
w = qq.normalized_decay_width('c', mS)
assert(np.all(w[valid & (mS > 2*get_mass('D'))] > 0))
assert(np.all(np.isnan(w[~valid])))
# Test S -> c cbar, both near and away from the physical threshold.
mS = np.array([3.5, 3.75, 4, 4.5, 5, 10])
w = qq.normalized_decay_width('c', mS)
eps = 1e-8
target = np.array([
0, 8.653439606661616e-9, 5.215462624791676e-7, 2.071975664654039e-6,
3.6864436285061025e-6, 0.000014785007585122307])
assert(np.all(np.abs(w - target) <= eps * target))
assert_raises(ValueError, lambda: qq.normalized_decay_width('b', mS))
```
#### File: scalar_portal/test/test_form_factors.py
```python
from __future__ import absolute_import, division
from nose.tools import assert_equals, assert_raises
from ..data.form_factors import *
def test_form_factors_at_zero():
# Scalar form factors
f = get_form_factor('B', 'K')
assert(abs(f(0) - 0.33) < 1e-2)
f = get_form_factor('B', 'pi')
assert(abs(f(0) - 0.258) < 1e-3)
f = get_form_factor('K', 'pi')
assert(abs(f(0) - 0.96) < 1e-2)
# Pseudoscalar form factors
f = get_form_factor('B', 'K*_0(700)')
assert(abs(f(0) - 0.46) < 1e-2)
f = get_form_factor('B', 'K*_0(1430)')
assert(abs(f(0) - 0.17) < 1e-2)
# Vector form factors
f = get_form_factor('B', 'K*')
assert(abs(f(0) - 0.374) < 0.033)
f = get_form_factor('B', 'K*(1410)')
assert(abs(f(0) - 0.300) < 0.036)
f = get_form_factor('B', 'K*(1680)')
assert(abs(f(0) - 0.22) < 0.04)
# Pseudo-vector form factors
f = get_form_factor('B', 'K_1(1270)')
assert(abs(f(0) - (-0.52)) < 0.13)
f = get_form_factor('B', 'K_1(1400)')
assert(abs(f(0) - (-0.07)) < 0.033)
# Tensor form factors
f = get_form_factor('B', 'K*_2(1430)')
assert(abs(f(0) - 0.23) < 1e-2)
# Exceptions
assert_raises(ValueError, lambda: get_form_factor('K', 'B'))
assert_raises(ValueError, lambda: get_form_factor('B', 'D'))
```
#### File: scalar_portal/test/test_model.py
```python
from __future__ import absolute_import
from nose.tools import assert_equals, assert_raises
import numpy as np
from ..api.model import Model
from ..api.branching_ratios import BranchingRatiosResult
from ..data.constants import default_scalar_id
def test_model():
m = Model()
assert_equals(m.scalar_pdg_id, default_scalar_id)
m.decay.enable('LightScalar')
m.production.disable('K -> S pi')
def test_groups():
m_ref = Model()
m_ref.production.enable_all()
m_ref.decay.enable_all()
m = Model()
m.production.enable('K -> S pi')
m.production.enable('B -> S pi')
m.production.enable('B -> S K?')
m.production.enable('K -> S S')
m.production.enable('B -> S S')
m.production.enable('B -> S S pi')
m.production.enable('B -> S S K?')
m.decay.enable('LightScalar')
m.decay.enable('HeavyScalar')
lp = m.production.list_enabled()
lp_ref = m_ref.production.list_enabled()
assert_equals(set(lp), set(lp_ref))
ld = m.decay.list_enabled()
ld_ref = m_ref.decay.list_enabled()
assert_equals(set(ld), set(ld_ref))
def test_channels():
m = Model()
m.production.enable_all()
m.decay.enable_all()
prod_ch = m.production.get_active_processes()
decay_ch = m.decay.get_active_processes()
mS = np.array([0.5, 1.5, 3])
for ch in prod_ch:
ch.normalized_width(mS)
for ch in decay_ch:
ch.normalized_width(mS)
def test_result():
m = Model()
m.production.enable_all()
m.decay.enable('LightScalar')
mS = np.array([0.1, 0.5, 1])
res = m.compute_branching_ratios(mS, theta=0.25, alpha=0)
assert(isinstance(res, BranchingRatiosResult))
res2 = m.compute_branching_ratios(mS, {'theta': 0.25, 'alpha': 0})
assert(np.all(res.total_width == res2.total_width))
def test_toy_model_matching():
m = Model()
Lambda = 2.0 # Matching scale in GeV
m.production.enable_all()
m.decay.enable('LightScalar')
res_low = m.compute_branching_ratios(Lambda, theta=1, alpha=0)
m.decay.disable_all()
m.decay.enable('HeavyScalar')
res_high = m.compute_branching_ratios(Lambda, theta=1, alpha=0)
eps = 1e-8
assert(abs(res_low.total_width - res_high.total_width) <= eps * res_high.total_width)
``` |
{
"source": "Jlthompson96/Simple-BMI-Calculator",
"score": 4
} |
#### File: Jlthompson96/Simple-BMI-Calculator/bmi.py
```python
weight = input("What is your weight (in lbs)? ")
height = input("What is your height (in inches)? ")
def calcBMI():
return 703 * weight/pow(height,2)
bmi = calcBMI()
def output():
if bmi > 30:
print("You are obese")
elif bmi > 25:
print("You are overweight")
elif bmi > 18.5:
print("You are normal")
elif bmi < 18.5:
print("You are underweight")
output()
``` |
{
"source": "jltipton/tiptoe",
"score": 3
} |
#### File: tiptoe/tiptoe/localfiles.py
```python
import os
def list_files(folder_path):
list_of_files = list()
for (dirpath, dirnames, filenames) in os.walk(folder_path):
list_of_files += [os.path.join(dirpath, file) for file in filenames]
return list_of_files
def setup_path(folder_path):
if not os.path.exists(folder_path):
try:
os.makedirs(folder_path)
except OSError:
logging.info('failed to create {}'.format(folder_path))
else:
logging.info('created directory(s) {}'.format(folder_path))
return folder_path
```
#### File: tiptoe/tiptoe/twofactor.py
```python
import requests
import time
import json
def check_2factor_submit(browser):
try:
otp_submit = browser.find_element_by_id("auth-send-code")
except NoSuchElementException:
return ""
otp_submit.click()
def check_2factor_field(browser):
try:
otp_field = browser.find_element_by_id("auth-mfa-otpcode")
except NoSuchElementException:
return ""
otp = get_2factor()
time.sleep(8)
otp_field.send_keys(otp)
time.sleep(3)
otp_field.submit()
def check_2factor(browser):
check_2factor_submit(browser)
time.sleep(15)
check_2factor_field(browser)
return ""
def get_2factor():
msgs = json.loads(response.content)
latest_msg = msgs["messages"][0]["body"]
otp_code = latest_msg.split(" ")[0]
loggit("2factor code: {}".format(otp_code))
return otp_code
``` |
{
"source": "JLTrincado/MxFinder",
"score": 3
} |
#### File: JLTrincado/MxFinder/extract_orfs.py
```python
__author__="jruiz"
__date__ ="$Sep 08, 2015 12:24:43 PM$"
'''Extract all ORFs in a transcript FASTA
'''
import sys
import os
from Bio import SeqIO
from Bio.Seq import Seq
fasta = sys.argv[1]
try:
threshold = sys.argv[2] #nucleotides
except:
threshold = 75
#OBJECTS
class orf_object:
def __init__(self, sequence, start, end):
self.sequence = sequence
self.start = start
self.end = end
#FUNCTIONS
def find_all(sequence, subsequence):
''' Returns a list of indexes within sequence that are the start of subsequence'''
start = 0
idxs = []
next_idx = sequence.find(subsequence, start)
while next_idx != -1:
idxs.append(next_idx)
start = next_idx + 1# Move past this on the next time around
next_idx = sequence.find(subsequence, start)
return idxs
def find_orfs(sequence, threshold):
""" Finds all valid open reading frames in the string 'sequence', and
returns them as a list"""
starts = find_all(sequence, 'ATG')
stop_amber = find_all(sequence, 'TAG')
stop_ochre = find_all(sequence, 'TAA')
stop_umber = find_all(sequence, 'TGA')
stops = stop_amber + stop_ochre + stop_umber
stops.sort()
orfs = []
for start in starts:
for stop in stops:
if start < stop \
and (start - stop) % 3 == 0: # Stop is in-frame
if len(sequence[start:stop+3]) >= int(threshold):
orf_obj = orf_object(sequence[start:stop+3], start, stop+3)
orfs.append(orf_obj)
break
orfs.sort(key=lambda x: len(x.sequence), reverse=True)
return orfs
cdna = SeqIO.index(fasta, "fasta")
for sequence in cdna:
ends = []
orfs = find_orfs(cdna[sequence].seq, threshold)
n = 1
for orf in orfs:
if orf.end in ends:
continue
ends.append(orf.end)
print(">" + sequence + "_" + str(n) + ":" + str(orf.start) + "-" + str(orf.end) + "\n" + str(orf.sequence))
n += 1
exit(0)
``` |
{
"source": "jltwheeler/fullstackfomo",
"score": 2
} |
#### File: architecture/architecture/api.py
```python
from diagrams import Cluster, Diagram
from diagrams.aws.compute import ECS, AutoScaling
from diagrams.aws.network import Route53, ElbApplicationLoadBalancer
from .utils import create_kwargs
def api(graph_attr: dict = None, path: str = None) -> None:
name = "NodeJS GraphQL API"
kwargs = create_kwargs(name, graph_attr, path)
with Diagram(**kwargs):
with Cluster("VPC"):
route53 = Route53("DNS")
elb = ElbApplicationLoadBalancer("ALB")
asg = AutoScaling("ASG")
with Cluster("ECS Tasks"):
ecs = [ECS("API 1"), ECS("API 2")]
route53 >> elb >> asg >> ecs
```
#### File: architecture/architecture/worker.py
```python
from diagrams import Diagram, Cluster
from diagrams.aws.analytics import ElasticsearchService
from diagrams.aws.compute import Lambda
from diagrams.aws.database import Dynamodb
from diagrams.aws.integration import SimpleQueueServiceSqs
from diagrams.aws.management import CloudwatchEventTimeBased
from diagrams.saas.social import Twitter
from .utils import create_kwargs
def worker(graph_attr=None, path=None) -> None:
name = "Data retrieval worker"
kwargs = create_kwargs(name, graph_attr, path)
with Diagram(**kwargs):
source = Twitter("Data source")
event = CloudwatchEventTimeBased("CloudWatch (1 min)")
_lambda = Lambda("Python lambdas")
dynamodb = Dynamodb("DynamoDB")
sqs = SimpleQueueServiceSqs("SQS")
es = ElasticsearchService("ElasticSearch")
source >> event >> sqs >> _lambda >> dynamodb >> es
```
#### File: workers/functions/reddit_handler.py
```python
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
import json
import os
import time
import boto3
from dotenv import load_dotenv
import praw
from .constants import ENDPOINT_URL, MAIN_TABLE_NAME
from .utils import (
compose_expression_attr_vals,
compose_update_expression,
copy_dict_partial,
)
if os.environ.get("PYTHON_ENV", "prod") == "dev":
load_dotenv()
dynamo_db = boto3.resource(
"dynamodb",
endpoint_url=ENDPOINT_URL,
aws_access_key_id="secret",
aws_secret_access_key="secret",
)
else:
dynamo_db = boto3.resource("dynamodb", region_name="eu-west-1")
client_id = os.environ["REDDIT_CLIENT_ID"]
client_secret = os.environ["REDDIT_CLIENT_SECRET"]
user_agent = "fullstack fomo"
reddit = praw.Reddit(
client_id=client_id, client_secret=client_secret, user_agent=user_agent
)
print("Connected to DynamoDB and Reddit client")
def _get_subreddit_data(subreddit: str) -> dict:
return [
{
"id": submission.id,
"title": submission.title,
"dataSource": "reddit",
"score": submission.score,
"longText": submission.selftext,
"link": submission.shortlink,
"numComments": submission.num_comments,
"thumbnail": submission.url,
"author": submission.author_fullname,
"subreddit": submission.subreddit_name_prefixed,
}
for submission in reddit.subreddit(subreddit).hot(limit=5)
]
def main(event, context) -> None:
start_time = time.time()
reddit_table = dynamo_db.Table("FomoSubreddits")
main_table = dynamo_db.Table(MAIN_TABLE_NAME)
items = [
item["subreddit"] for item in reddit_table.scan().get("Items", [])
]
if not items:
return {
"statusCode": 500,
"body": json.dumps({"message": "Not tracking any subreddits"}),
}
print("Fetching latest reddit data...")
with ThreadPoolExecutor(max_workers=8) as executor:
iterator = executor.map(_get_subreddit_data, items[:1])
results = [item for result in iterator for item in result]
print("Checking for updated data and updating main DynamoDB table.")
now_time = datetime.utcnow().isoformat()
for result in results:
key = {"id": result["id"], "dataSource": result["dataSource"]}
existing_item = main_table.get_item(Key=key).get("Item", False)
if existing_item:
print(
f"Updating entry for item ID: {result['id']}, source: {result['dataSource']}"
)
updated_item = copy_dict_partial(
result, exclude_keys=["id", "dataSource"]
)
updated_item["updatedAt"] = now_time
main_table.update_item(
Key=key,
UpdateExpression=compose_update_expression(updated_item),
ExpressionAttributeValues=compose_expression_attr_vals(
updated_item
),
)
else:
print(
f"Creating entry for item ID: {result['id']}, source: {result['dataSource']}"
)
result["createdAt"] = now_time
result["updatedAt"] = now_time
main_table.put_item(Item=result)
delta_time = time.time() - start_time
message = f"Function took {delta_time:.2f}secs to run"
if os.environ.get("PYTHON_ENV", "prod") == "dev":
return {"statusCode": 200, "body": json.dumps({"message": message})}
```
#### File: scripts/lib/config.py
```python
import os
import json
from decimal import Decimal
REGION_NAME = "eu-west-1"
FOMO_TABLES = [
{
"name": "FomoMain",
"definitions": [
{"AttributeName": "id", "AttributeType": "S"},
{"AttributeName": "dataSource", "AttributeType": "S"},
],
"key_schema": [
{"AttributeName": "id", "KeyType": "HASH"},
{"AttributeName": "dataSource", "KeyType": "RANGE"},
],
"units": {
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5,
},
},
{
"name": "FomoTwitterUsers",
"definitions": [
{"AttributeName": "userName", "AttributeType": "S"},
],
"key_schema": [
{"AttributeName": "userName", "KeyType": "HASH"},
],
"units": {
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1,
},
"file": "twitter.json",
},
{
"name": "FomoSubreddits",
"definitions": [
{"AttributeName": "subreddit", "AttributeType": "S"},
],
"key_schema": [
{"AttributeName": "subreddit", "KeyType": "HASH"},
],
"units": {
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1,
},
"file": "reddit.json",
},
]
def get_data(json_file: str) -> dict:
with open(os.path.join(os.getcwd(), "data", json_file), "rb") as file:
users = json.load(file, parse_float=Decimal)
return users
```
#### File: scripts/lib/drop_local_db.py
```python
import boto3
def main():
ENDPOINT_URL = "http://localhost:8000"
dynamo_db = boto3.resource(
"dynamodb",
endpoint_url=ENDPOINT_URL,
aws_access_key_id="secret",
aws_secret_access_key="secret",
region_name="eu-west-1",
)
print(f"Connected to local dynamodb database on {ENDPOINT_URL}")
existing_tables = list(dynamo_db.tables.all())
for table in existing_tables:
table.delete()
print(f"Deleted {table.name}.")
print(f"Finished dropping {len(existing_tables)} tables.")
if __name__ == "__main__":
main()
``` |
{
"source": "jlu5/Limnoria",
"score": 2
} |
#### File: plugins/NickAuth/test.py
```python
import supybot.ircdb as ircdb
from supybot.test import *
class NickAuthTestCase(PluginTestCase):
plugins = ('NickAuth', 'User')
prefix1 = '[email protected]'
def _procedure(self, nickserv_reply):
self.assertNotError('register foobar 123')
self.assertResponse('user list', 'foobar')
self.assertNotError('hostmask remove foobar %s' % self.prefix)
self.assertNotError('identify foobar 123')
self.assertNotError('nick add foobar baz')
self.assertNotError('unidentify')
self.prefix = self.prefix1
self.assertError('nick add foobar qux')
self.nick = self.prefix.split('!')[0]
self.assertError('hostmask list')
self.irc.feedMsg(ircmsgs.privmsg(self.irc.nick,
'auth',
prefix=self.prefix))
self.assertEqual(self.irc.takeMsg().command, 'WHOIS')
self.assertError('hostmask list')
self.irc.feedMsg(ircmsgs.privmsg(self.irc.nick,
'auth',
prefix=self.prefix))
self.assertEqual(self.irc.takeMsg().command, 'WHOIS')
if nickserv_reply:
self.irc.feedMsg(ircmsgs.IrcMsg(':leguin.freenode.net 330 pgjrgrg '
'%s baz :is logged in as' % self.nick))
msg = self.irc.takeMsg()
self.assertNotEqual(msg, None)
self.assertEqual(msg.args[1], 'You are now authenticated as foobar.')
self.assertResponse('hostmask list',
'foobar has no registered hostmasks.')
else:
msg = self.irc.takeMsg()
self.assertEqual(msg, None)
self.assertError('hostmask list')
def testAuth(self):
self._procedure(True)
def testNoAuth(self):
self._procedure(False)
def testList(self):
self.assertNotError('register foobar 123')
self.assertRegexp('nick list', 'You have no recognized nick')
self.assertNotError('nick add foo')
self.assertRegexp('nick list', 'foo')
self.assertNotError('nick add %s bar' % self.nick)
self.assertRegexp('nick list', 'foo and bar')
self.assertNotError('nick add %s %s baz' % (self.irc.network, self.nick))
self.assertRegexp('nick list', 'foo, bar, and baz')
self.assertRegexp('nick list %s' % self.irc.network, 'foo, bar, and baz')
self.assertRegexp('nick list %s foobar' % self.irc.network,
'foo, bar, and baz')
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
``` |
{
"source": "jlu5/Supybot-UrbanDictionary",
"score": 2
} |
#### File: jlu5/Supybot-UrbanDictionary/plugin.py
```python
from __future__ import unicode_literals
# my libs
import json
import re
# supybot libs
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('UrbanDictionary')
@internationalizeDocstring
class UrbanDictionary(callbacks.Plugin):
"""Add the help for "@plugin help UrbanDictionary" here
This should describe *how* to use this plugin."""
threaded = True
######################
# INTERNAL FUNCTIONS #
######################
def _red(self, string):
"""return a red string."""
return ircutils.mircColor(string, 'red')
def _bu(self, string):
"""bold and underline string."""
return ircutils.bold(ircutils.underline(string))
def cleanjson(self, s):
"""clean up json and return."""
s = s.replace('\n', '')
s = s.replace('\r', '')
s = s.replace('\t', '')
s = s.strip()
# return
return s
####################
# PUBLIC FUNCTIONS #
####################
def urbandictionary(self, irc, msg, args, optlist, optterm):
"""[--disableexamples | --showvotes | --num # | --showtags] <term>
Fetches definition for <term> on UrbanDictionary.com
Use --disableexamples to not display examples.
Use --showvotes to show votes [default: off]
Use --num # to limit the number of definitions. [default:10]
Use --showtags to display tags (if available)
"""
# default args for output. can manip via --getopts.
args = {'showExamples': True,
'numberOfDefinitions':self.registryValue('maxNumberOfDefinitions'),
'showVotes': False,
'showTags':False
}
# optlist to change args.
if optlist:
for (key, value) in optlist:
if key == 'disableexamples':
args['showExamples'] = False
if key == 'showvotes':
args['showVotes'] = True
if key == 'showtags':
args['showTags'] = True
if key == 'num': # if number is >, default to config var.
if 0 <= value <= self.registryValue('maxNumberOfDefinitions'):
args['numberOfDefinitions'] = value
# build and fetch url.
url = 'http://api.urbandictionary.com/v0/define?term=%s' % utils.web.urlquote(optterm)
try:
html = utils.web.getUrl(url)
except utils.web.Error as e:
self.log.error("ERROR opening {0} message: {1}".format(url, e))
irc.error("could not open {0} message: {1}".format(url, e), Raise=True)
# try parsing json.
#irc.reply("{0}".format(self._repairjson(html.decode('utf-8'))))
try:
#jsondata = self._repairjson(html.decode('utf-8')) # decode utf-8. fix \r\n that ud puts in below.
jsondata = html.decode('utf-8')
jsondata = json.loads(jsondata) # odds chars in UD.
except Exception as e:
self.log.error("Error parsing JSON from UD: {0}".format(e))
irc.error("Failed to parse json data. Check logs for error", Raise=True)
# process json.
results = jsondata.get('result_type') # exact, no_results, fulltext .
if not results:
# assume exact i guess...
results = 'exact'
definitions = jsondata.get('list')
# prep output now depending on results.
if results == "exact": # we did not find anything.
outdefs = []
for i in definitions[0:args['numberOfDefinitions']]: # iterate through each def.
# clean these up.
definition = self.cleanjson(''.join(i['definition'])) #.encode('utf-8')
example = self.cleanjson(''.join(i['example']))
# now add
outputstring = "{0}".format(definition) # default string.
if args['showExamples']: # show examples?
outputstring += " {0} {1} {2}".format(self._bu("[ex:]"), example, self._bu("[/ex]"))
if args['showVotes']: # show votes?
outputstring += " (+{0}/-{1})".format(i['thumbs_up'], i['thumbs_down'])
outdefs.append(outputstring) # add to our list.
output = " | ".join([item for item in outdefs]) # create string with everything.
elif results == "fulltext": # not direct. yields related terms.
output = " | ".join(sorted(set([item['word'] for item in definitions]))) # sorted, unique words.
# output time.
if results == "no_results" or len(definitions) == 0: # NOTHING FOUND.
irc.error("'{0}' not defined on UrbanDictionary.".format(optterm), Raise=True)
else: # we have definitions, so we're gonna output.
# check if we should add tags.
if args['showTags']: # display tags.
tags = jsondata.get('tags')
if tags: # we have tags. add it to optterm.
tags = " | ".join([i for i in tags])
else:
tags = False
else:
tags = False
# now lets output.
if self.registryValue('disableANSI'): # disable formatting.
if tags:
irc.reply("{0} :: {1} :: {2}".format(optterm, tags, ircutils.stripFormatting(output)))
else:
irc.reply("{0} :: {1}".format(optterm, ircutils.stripFormatting(output)))
else: # colors.
if tags:
irc.reply("{0} :: {1} :: {2}".format(self._red(optterm), tags, output))
else:
irc.reply("{0} :: {1}".format(self._red(optterm), output))
urbandictionary = wrap(urbandictionary, [getopts({'showtags':'',
'showvotes':'',
'num':('int'),
'disableexamples':''}), ('text')])
Class = UrbanDictionary
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=250:
``` |
{
"source": "jlu5/terrabot",
"score": 3
} |
#### File: terrabot/terrabot/bot.py
```python
from . import packets
from . import client
from terrabot.data.player import Player
from terrabot.data.world import World
from .events import Events, EventManager
class TerraBot(object):
"""A class that handles basic functions of a terraria bot like movement and login"""
# Defaults to 7777, because that is the default port for the server
def __init__(self, ip, port=7777, protocol=194, name="Terrabot", password=''):
super(TerraBot, self).__init__()
self.protocol = protocol
self.world = World()
self.player = Player(name)
self.password = password
self.evman = EventManager()
self.client = client.Client(ip, port, self.player, self.world, self.evman)
self.evman.method_on_event(Events.PlayerID, self.received_player_id)
self.evman.method_on_event(Events.Initialized, self.initialized)
self.evman.method_on_event(Events.Login, self.logged_in)
self.evman.method_on_event(Events.ItemOwnerChanged, self.item_owner_changed)
self.evman.method_on_event(Events.PasswordRequested, self.send_password)
# self.event_manager.method_on_event(events.Events.)
def start(self):
self.client.start()
self.client.add_packet(packets.Packet1(self.protocol))
def item_owner_changed(self, id, data):
if self.player.logged_in:
self.client.add_packet(packets.Packet16(data[0], data[1]))
def received_player_id(self, event_id, data):
self.client.add_packet(packets.Packet4(self.player))
self.client.add_packet(packets.Packet10(self.player))
self.client.add_packet(packets.Packet2A(self.player))
self.client.add_packet(packets.Packet32(self.player))
for i in range(0, 83):
self.client.add_packet(packets.Packet5(self.player, i))
self.client.add_packet(packets.Packet6())
def initialized(self, event, data):
self.client.add_packet(packets.Packet8(self.player, self.world))
def logged_in(self, event, data):
self.client.add_packet(packets.PacketC(self.player, self.world))
def send_password(self, event, data):
if self.password:
self.client.add_packet(packets.Packet26(self.password))
else:
print('ERROR: Server needed password to login but none was given!')
self.stop()
def message(self, msg, color=None):
if self.player.logged_in:
if color:
hex_code = '%02x%02x%02x' % color
msg = "[c/" + hex_code + ":" + msg + "]"
self.client.add_packet(packets.Packet19(self.player, msg))
def get_event_manager(self):
return self.evman
def stop(self):
self.client.stop()
```
#### File: terrabot/packets/packet31.py
```python
import struct
class Packet31Parser(object):
def parse(self, world, player, data, ev_man):
player.x = world.spawnX
player.y = world.spawnY
``` |
{
"source": "jluastro/nirc2",
"score": 3
} |
#### File: nirc2/reduce/align_rms.py
```python
import numpy as np
import argparse
from gcwork import starset
import pdb
import sys
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
run(argv)
return
def run(args=None):
"""
align_rms main routine.
"""
options = parse_options(args)
_run = open(options.out_root + '.run', 'w')
_run.write('RUN: python nirc2.reduce.align_rms() run with args:\n')
_run.write(' '.join(args) + '\n\n')
_run.write('OPTIONS:\n')
_run.write('root_name = ' + options.root_name + '\n')
_run.write('N_required = ' + str(options.N_required) + '\n')
_run.write('out_root = ' + options.out_root + '\n')
_run.write('calc_err_mean = ' + str(options.calc_err_mean) + '\n')
_run.write('calc_rel_err = ' + str(options.calc_rel_err) + '\n')
_run.write('idx_min = ' + str(options.idx_min) + '\n')
_run.write('idx_max = ' + str(options.idx_max) + '\n')
_run.write('idx_ref = ' + str(options.idx_ref) + '\n')
_run.close()
# Read in the align output. Determine the number of
# individual starlists are in the stack.
s = starset.StarSet(options.root_name)
N_lists = len(s.years)
if options.idx_min == None:
options.idx_min = N_lists
if options.idx_max == None:
options.idx_max = N_lists
# Trim down the starlist to just those that are
# in the desired number of epochs and are detected
# in the reference epoch.
s.stars = trim_stars(s, options)
# Fetch the data off the starset
name = s.getArray('name')
x = s.getArrayFromAllEpochs('xpix')
y = s.getArrayFromAllEpochs('ypix')
m = s.getArrayFromAllEpochs('mag')
f = 10**(m/-2.5)
flux_dn = s.getArrayFromAllEpochs('fwhm')
corr = s.getArrayFromAllEpochs('corr')
nimg = s.getArrayFromAllEpochs('nframes')
snr = s.getArrayFromAllEpochs('snr')
# Identify where we have measurements and where are non-detections.
good = ((x > -1000) & (y > -1000) & (m != 0))
# Calculate the number of epochs the stars are detected in.
cnt = good[options.idx_min : options.idx_max, :].sum(axis=0)
# Mask the bad data.
x_msk = np.ma.masked_where(good == False, x, copy=True)
y_msk = np.ma.masked_where(good == False, y, copy=True)
f_msk = np.ma.masked_where(good == False, f, copy=True)
m_msk = np.ma.masked_where(good == False, m, copy=True)
flux_dn = np.ma.masked_where(good == False, flux_dn, copy=True)
corr_msk = np.ma.masked_where(good == False, corr, copy=True)
nimg_msk = np.ma.masked_where(good == False, nimg, copy=True)
snr_msk = np.ma.masked_where(good == False, snr, copy=True)
# Calculate the average x, y, m, f
if options.idx_ref != None:
print(( 'Using epoch {0} as average pos/flux'.format(options.idx_ref) ))
x_avg = x[options.idx_ref, :]
y_avg = y[options.idx_ref, :]
m_avg = m[options.idx_ref, :]
f_avg = f[options.idx_ref, :]
year = s.years[options.idx_ref]
corr_avg = corr[options.idx_ref, :]
flux_dn_avg = flux_dn[options.idx_ref, :]
nimg_avg = nimg[options.idx_ref, :]
snr_orig = snr[options.idx_ref, :]
else:
print( 'Calculate average pos/flux ' )
print(( 'from epochs {0} - {1}'.format(options.idx_min, options.idx_max) ))
x_avg = x_msk[options.idx_min : options.idx_max, :].mean(axis=0)
y_avg = y_msk[options.idx_min : options.idx_max, :].mean(axis=0)
f_avg = f_msk[options.idx_min : options.idx_max, :].mean(axis=0)
m_avg = -2.5 * np.log10(f_avg)
year = s.years[options.idx_min : options.idx_max].mean()
corr_avg = corr_msk[options.idx_min : options.idx_max, :].mean(axis=0)
flux_dn_avg = flux_dn[options.idx_min : options.idx_max, :].mean(axis=0)
nimg_avg = cnt
snr_orig = None
# Calculate the error on x, y, m, f
x_std = calc_error(x_msk, x_avg, cnt, options)
y_std = calc_error(y_msk, y_avg, cnt, options)
f_std = calc_error(f_msk, f_avg, cnt, options)
m_std = f_std / f_avg
# Estimate a new signal to noise ratio
new_snr = f_avg / f_std
if ((options.calc_rel_err == False) and
(isinstance(snr_orig, (list, tuple, np.ndarray)))):
new_snr = 1.0 / np.hypot(1.0/new_snr, 1.0/snr_orig)
# Fix up any infinities in the SNR. Set them to 0.
new_snr[np.isinf(new_snr)] = 0.0
## Check if new_snr has a mask before using it
if np.ma.is_masked(new_snr):
new_snr[new_snr.mask] = 0.0
_out = open(options.out_root + '.lis', 'w')
hdr = '{name:13s} {mag:>6s} {year:>8s} '
hdr += '{x:>9s} {y:>9s} {xe:>9s} {ye:>9s} '
hdr += '{snr:>20s} {corr:>6s} {nimg:>8s} {flux:>20s}\n'
_out.write(hdr.format(name='# name', mag='m', year='t',
x='x', y='y', xe='xe', ye='ye',
snr='snr', corr='corr', nimg='N_frames', flux='flux'))
fmt = '{name:13s} {mag:6.3f} {year:8.3f} '
fmt += '{x:9.3f} {y:9.3f} {xe:9.3f} {ye:9.3f} '
fmt += '{snr:20.4f} {corr:6.2f} {nimg:8d} {flux:20.0f}\n'
for ss in range(len(x_avg)):
_out.write(fmt.format(name=name[ss], mag=m_avg[ss], year=float(year),
x=x_avg[ss], y=y_avg[ss], xe=x_std[ss], ye=y_std[ss],
snr=new_snr[ss], corr=corr_avg[ss], nimg=int(nimg_avg[ss]),
flux=flux_dn_avg[ss]))
_out.close()
return s
def trim_stars(s, options):
# Get the relevant variables off the starset
x = s.getArrayFromAllEpochs('xpix')
y = s.getArrayFromAllEpochs('ypix')
m = s.getArrayFromAllEpochs('mag')
# Mask where we have null detections and
# ID good stars where we have detections.
good = ((x > -1000) & (y > -1000) & (m != 0))
# Figure out the number of epochs each star is detected in.
# Trim out the stars that don't have enough detections.
cnt = good[options.idx_min : options.idx_max, :].sum(axis=0)
# Figure out if a stars is not detected in the reference epoch
# and set it for trimming (cnt = 0).
if options.idx_ref != None:
cnt[good[options.idx_ref, :] == False] = 0
# Trim our arrays to only the "good" stars that meet our number
# of epochs criteria.
idx = np.where(cnt >= options.N_required)[0]
new_stars = [s.stars[ii] for ii in idx]
return new_stars
def calc_error(v_msk, v_avg, cnt, options):
v_tmp = (v_msk[options.idx_min : options.idx_max, :] - v_avg)**2
v_tmp = v_tmp.sum(axis=0)
Ndof = cnt
if options.idx_ref == None:
Ndof -= 1
v_tmp /= Ndof
v_std = np.sqrt(v_tmp)
if options.calc_err_mean:
v_std /= np.sqrt(Ndof)
idx = np.where(cnt <= 1)[0]
v_std[idx] = 0.0
return v_std
def parse_options(args):
purpose = 'Combine a stack of aligned starlists to produce a new "average"\n'
purpose += 'starlist with astrometric and photometric errors estimated from\n'
purpose += 'the RMS error (or error on the mean) of the stack.\n'
purpose += '\n'
purpose += ''
##########
# Setup a Parser
##########
parser = argparse.ArgumentParser(description=purpose)
# root_name
help_str = 'The root of the align output files.'
parser.add_argument('root_name', type=str, help=help_str)
# N_required
help_str = 'The minimum number of starlists a star must be in to be included '
help_str += 'in the output list.'
parser.add_argument('N_required', type=int, help=help_str)
# out_root
help_str = 'Output root of files. Default is the input align '
help_str += 'root_name + "_rms".'
parser.add_argument('-o', '--outroot', dest='out_root', help=help_str)
# calc_err_mean
help_str = 'Include to calculate the error on the mean rather than the '
help_str += 'RMS error.'
parser.add_argument('--errOnMean', '-e', dest='calc_err_mean',
action='store_true', help=help_str)
# calc_rel_err
help_str = 'Include to calculate the relative pohtometric error '
help_str += '(no zero-point error).'
parser.add_argument('--relPhotErr', '-r', dest='calc_rel_err',
action='store_true', help=help_str)
# stack_min
help_str = 'The index of the starlist that starts the stack over which '
help_str += 'to calculate averages and errors (def=1). Note that the '
help_str += 'default use is that the first image (idx=0) contains the'
help_str += '"average" star positions/fluxes and that the first stack'
help_str += 'image is at --stackMin=1.'
parser.add_argument('--stackMin', dest='idx_min', help=help_str,
action='store', default=1, type=int)
# stack_max
help_str = 'The index of the starlist that ends the stack over which '
help_str += 'to calculate averages and errors (def=last). Note that the '
help_str += 'default use is that the first image (idx=0) contains the'
help_str += '"average" star positions/fluxes and that the last stack'
help_str += 'image is at --stackMax=max.'
parser.add_argument('--stackMax', dest='idx_max', help=help_str, type=int)
# reference_list
help_str = 'Specify the index number (0-based) of the starlist that should be '
help_str += 'adopted as the reference. If a reference list is specified, the average '
help_str += 'positions and fluxes will come from this list and only the astrometric '
help_str += 'and photometric errors will be calculated from the remaining stack '
help_str += 'of images. If None is specified, then the average positions and fluxes '
help_str += 'will come from stacking the remaining set of images.'
parser.add_argument('--refList', dest='idx_ref', help=help_str,
action='store', default=0)
options = parser.parse_args(args)
# Define the root file name for the output files.
if options.out_root == None:
options.out_root = options.root_name + '_rms'
# Fix the reference epoch.
if options.idx_ref == 'None':
options.idx_ref = None
return options
if __name__ == "__main__":
main()
```
#### File: nirc2/reduce/calib.py
```python
import os, sys
from . import util
from astropy.io import fits
from astropy import stats
from pyraf import iraf as ir
from nirc2 import instruments
import numpy as np
from astropy import stats
import astropy
from datetime import datetime
module_dir = os.path.dirname(__file__)
def makedark(files, output,
raw_dir=None,
instrument=instruments.default_inst):
"""
Make dark image for imaging data. Makes a calib/ directory
and stores all output there. All output and temporary files
will be created in a darks/ subdirectory.
Parameters
----------
files : list of int
Integer list of the files. Does not require padded zeros.
output : str
Output file name. Include the .fits extension.
raw_dir : str, optional
Directory where raw files are stored. By default,
assumes that raw files are stored in '../raw'
instrument : instruments object, optional
Instrument of data. Default is `instruments.default_inst`
"""
redDir = os.getcwd() + '/' # Reduce directory.
curDir = redDir + 'calib/'
darkDir = util.trimdir(curDir + 'darks/')
# Set location of raw data
rawDir = util.trimdir(os.path.abspath(redDir + '../raw') + '/')
# Check if user has specified a specific raw directory
if raw_dir is not None:
rawDir = util.trimdir(os.path.abspath(raw_dir) + '/')
util.mkdir(curDir)
util.mkdir(darkDir)
_out = darkDir + output
_outlis = darkDir + 'dark.lis'
util.rmall([_out, _outlis])
darks = instrument.make_filenames(files, rootDir=rawDir)
# Write out the sources of the dark files
data_sources_file = open(redDir + 'data_sources.txt', 'a')
data_sources_file.write(
'---\n# Dark Files for {0} \n'.format(output))
for cur_file in darks:
out_line = '{0} ({1})\n'.format(cur_file, datetime.now())
data_sources_file.write(out_line)
data_sources_file.close()
f_on = open(_outlis, 'w')
f_on.write('\n'.join(darks) + '\n')
f_on.close()
ir.unlearn('imcombine')
ir.imcombine.combine = 'median'
ir.imcombine.reject = 'sigclip'
ir.imcombine.nlow = 1
ir.imcombine.nhigh = 1
ir.imcombine('@' + _outlis, _out)
def makeflat(onFiles, offFiles, output, normalizeFirst=False,
raw_dir=None,
instrument=instruments.default_inst):
"""
Make flat field image for imaging data. Makes a calib/ directory
and stores all output there. All output and temporary files
will be created in a flats/ subdirectory.
If only twilight flats were taken (as in 05jullgs), use these flats as
the onFiles, and use 0,0 for offFiles. So the reduce.py file should look
something like this: onFiles = range(22, 26+1) and offFiles = range(0,0)
The flat will then be made by doing a median combine using just the
twilight flats.
Parameters
----------
onFiles : list of int
Integer list of lamps ON files. Does not require padded zeros.
offFiles : list of int
Integer list of lamps OFF files. Does not require padded zeros.
output : str
Output file name. Include the .fits extension.
normalizeFirst : bool, default=False
If the individual flats should be normalized first,
such as in the case of twilight flats.
raw_dir : str, optional
Directory where raw files are stored. By default,
assumes that raw files are stored in '../raw'
instrument : instruments object, optional
Instrument of data. Default is `instruments.default_inst`
"""
redDir = os.getcwd() + '/'
curDir = redDir + 'calib/'
flatDir = util.trimdir(curDir + 'flats/')
# Set location of raw data
rawDir = util.trimdir(os.path.abspath(redDir + '../raw') + '/')
# Check if user has specified a specific raw directory
if raw_dir is not None:
rawDir = util.trimdir(os.path.abspath(raw_dir) + '/')
util.mkdir(curDir)
util.mkdir(flatDir)
_on = flatDir + 'lampsOn.fits'
_off = flatDir + 'lampsOff.fits'
_norm = flatDir + 'flatNotNorm.fits'
_out = flatDir + output
_onlis = flatDir + 'on.lis'
_offlis = flatDir + 'off.lis'
_onNormLis = flatDir + 'onNorm.lis'
util.rmall([_on, _off, _norm, _out, _onlis, _offlis, _onNormLis])
lampson = instrument.make_filenames(onFiles, rootDir=rawDir)
lampsoff = instrument.make_filenames(offFiles, rootDir=rawDir)
lampsonNorm = instrument.make_filenames(onFiles, rootDir=flatDir + 'norm')
util.rmall(lampsonNorm)
# Write out the sources of the dark files
data_sources_file = open(redDir + 'data_sources.txt', 'a')
data_sources_file.write(
'---\n# Flat Files for {0}, Lamps On\n'.format(output))
for cur_file in lampson:
out_line = '{0} ({1})\n'.format(cur_file, datetime.now())
data_sources_file.write(out_line)
data_sources_file.write(
'---\n# Flat Files for {0}, Lamps Off\n'.format(output))
for cur_file in lampsoff:
out_line = '{0} ({1})\n'.format(cur_file, datetime.now())
data_sources_file.write(out_line)
data_sources_file.close()
if (len(offFiles) != 0):
f_on = open(_onlis, 'w')
f_on.write('\n'.join(lampson) + '\n')
f_on.close()
f_on = open(_offlis, 'w')
f_on.write('\n'.join(lampsoff) + '\n')
f_on.close()
f_onn = open(_onNormLis, 'w')
f_onn.write('\n'.join(lampsonNorm) + '\n')
f_onn.close()
# Combine to make a lamps on and lamps off
ir.unlearn('imcombine')
ir.imcombine.combine = 'median'
ir.imcombine.reject = 'sigclip'
ir.imcombine.nlow = 1
ir.imcombine.nhigh = 1
ir.imcombine('@' + _offlis, _off)
# Check if we should normalize individual flats first
# such as in the case of twilight flats.
if normalizeFirst:
f_on = open(_offlis, 'w')
f_on.write('\n'.join(lampsoff) + '\n')
f_on.close()
# Subtract "off" from individual frames
ir.imarith('@'+_onlis, '-', _off, '@'+_onNormLis)
# Scale them and combine
ir.imcombine.scale = 'median'
ir.imcombine('@' + _onNormLis, _norm)
else:
# Combine all "on" frames
ir.imcombine('@' + _onlis, _on)
# Now do lampsOn - lampsOff
ir.imarith(_on, '-', _off, _norm)
# Normalize the final flat
ir.module.load('noao', doprint=0, hush=1)
ir.module.load('imred', doprint=0, hush=1)
ir.module.load('generic', doprint=0, hush=1)
orig_img = fits.getdata(_norm)
orig_size = (orig_img.shape)[0]
if (orig_size >= 1024):
flatRegion = '[100:900,513:950]'
else:
flatRegion = ''
ir.normflat(_norm, _out, sample=flatRegion)
else:
f_on = open(_onlis, 'w')
f_on.write('\n'.join(lampson) + '\n')
f_on.close()
# Combine twilight flats
ir.unlearn('imcombine')
ir.imcombine.combine = 'median'
ir.imcombine.reject = 'sigclip'
ir.imcombine.nlow = 1
ir.imcombine.nhigh = 1
if normalizeFirst:
# Scale them
ir.imcombine.scale = 'median'
ir.imcombine('@' + _onlis, _norm)
# Normalize the flat
ir.module.load('noao', doprint=0, hush=1)
ir.module.load('imred', doprint=0, hush=1)
ir.module.load('generic', doprint=0, hush=1)
flatRegion = '[100:900,513:950]'
ir.normflat(_norm, _out, sample=flatRegion)
def makemask(dark, flat, output, instrument=instruments.default_inst):
"""
Make bad pixel mask for imaging data. Makes a calib/ directory
and stores all output there. All output and temporary files
will be created in a masks/ subdirectory.
Parameters
----------
dark : str
The filename of the dark file (must be in the
calib/darks/ directory). This is used to
construct a hot pixel mask. Use a long (t>20sec) exposure dark.
flat : str
The filename of a flat file (must be in the
calib/flats/ directory). This is used to
construct a dead pixel mask. The flat should be normalized.
output : str
The output file name. This will be created in the masks/
subdirectory.
instrument : instruments object, optional
Instrument of data. Default is `instruments.default_inst`
"""
redDir = os.getcwd() + '/'
calDir = redDir + 'calib/'
maskDir = util.trimdir(calDir + 'masks/')
flatDir = util.trimdir(calDir + 'flats/')
darkDir = util.trimdir(calDir + 'darks/')
util.mkdir(calDir)
util.mkdir(maskDir)
_out = maskDir + output
_dark = darkDir + dark
_flat = flatDir + flat
_inst_mask = module_dir + '/masks/' + instrument.get_bad_pixel_mask_name()
util.rmall([_out])
##########
# Make hot pixel mask
##########
whatDir = redDir + dark
print(whatDir)
# Get the sigma-clipped mean and stddev on the dark
img_dk = fits.getdata(_dark)
if float(astropy.__version__) < 3.0:
dark_stats = stats.sigma_clipped_stats(img_dk,
sigma=3,
iters=10)
else:
dark_stats = stats.sigma_clipped_stats(img_dk,
sigma=3,
maxiters=10)
dark_mean = dark_stats[0]
dark_stddev = dark_stats[2]
# Clip out the very hot pixels.
hi = dark_mean + (10.0 * dark_stddev)
hot = img_dk > hi
##########
# Make dead pixel mask
##########
img_fl = fits.getdata(_flat)
if float(astropy.__version__) < 3.0:
flat_stats = stats.sigma_clipped_stats(img_dk,
sigma=3,
iters=10)
else:
flat_stats = stats.sigma_clipped_stats(img_fl,
sigma=3,
maxiters=10)
flat_mean = flat_stats[0]
flat_stddev = flat_stats[2]
# Clip out the dead pixels
lo = 0.5
hi = flat_mean + (15.0 * flat_stddev)
dead = np.logical_or(img_fl > hi, img_fl < lo)
# We also need the original instrument mask (with cracks and such)
inst_mask = fits.getdata(_inst_mask)
# Combine into a final supermask. Use the flat file just as a template
# to get the header from.
ofile = fits.open(_flat)
if ((hot.shape)[0] == (inst_mask.shape)[0]):
mask = hot + dead + inst_mask
else:
mask = hot + dead
mask = (mask != 0)
unmask = (mask == 0)
ofile[0].data[unmask] = 0
ofile[0].data[mask] = 1
ofile[0].writeto(_out, output_verify='silentfix')
return
def make_instrument_mask(dark, flat, outDir, instrument=instruments.default_inst):
"""Make the static bad pixel mask for the instrument. This only needs to be
run once. This creates a file called nirc2mask.fits or osiris_img_mask.fits
which is subsequently used throughout the pipeline. The dark should be a long
integration dark.
Parameters
----------
dark : str
The full absolute path to a medianed dark file. This is
used to construct a hot pixel mask (4 sigma detection thresh).
flat : str
The full absolute path to a medianed flat file. This is
used to construct a dead pixel mask.
outDir : str
full path to output directory with '/' at the end.
instrument : instruments object, optional
Instrument of data. Default is `instruments.default_inst`
"""
_out = outDir + instrument.get_bad_pixel_mask_name()
_dark = dark
_flat = flat
util.rmall([_out])
##########
# Make hot pixel mask
##########
# Get the sigma-clipped mean and stddev on the dark
img_dk = fits.getdata(_dark)
if float(astropy.__version__) < 3.0:
dark_stats = stats.sigma_clipped_stats(img_dk,
sigma=3,
iters=10)
else:
dark_stats = stats.sigma_clipped_stats(img_dk,
sigma=3,
maxiters=10)
dark_mean = dark_stats[0]
dark_stddev = dark_stats[2]
# Clip out the very hot pixels.
hi = dark_mean + (15.0 * dark_stddev)
hot = img_dk > hi
print(('Found %d hot pixels' % (hot.sum())))
##########
# Make dead pixel mask
##########
img_fl = fits.getdata(_flat)
if float(astropy.__version__) < 3.0:
flat_stats = stats.sigma_clipped_stats(img_dk,
sigma=3,
iters=10)
else:
flat_stats = stats.sigma_clipped_stats(img_fl,
sigma=3,
maxiters=10)
flat_mean = flat_stats[0]
flat_stddev = flat_stats[2]
# Clip out the dead pixels
lo = 0.5
hi = flat_mean + (15.0 * flat_stddev)
dead = np.logical_or(img_fl > hi, img_fl < lo)
print(('Found %d dead pixels' % (dead.sum())))
# Combine into a final supermask
new_file = fits.open(_flat)
mask = hot + dead
mask = (mask != 0)
unmask = (mask == 0)
new_file[0].data[unmask] = 0
new_file[0].data[mask] = 1
new_file[0].writeto(_out, output_verify='silentfix')
def analyzeDarkCalib(firstFrame, skipcombo=False):
"""
Reduce data from the dark_calib script that should be run once
a summer in order to test the dark current and readnoise.
This should be run in the reduce/calib/ directory for a particular
run.
"""
redDir = os.getcwd() + '/' # Reduce directory.
curDir = redDir + 'calib/'
darkDir = util.trimdir(curDir + 'darks/')
rawDir = util.trimdir(os.path.abspath(redDir + '../raw') + '/')
util.mkdir(curDir)
util.mkdir(darkDir)
def printStats(frame, tint, sampmode, reads):
files = list(range(frame, frame+3))
fileName = 'dark_%ds_1ca_%d_%dsm.fits' % (tint, sampmode, reads)
if (skipcombo == False):
makedark(files, fileName)
# Get the sigma-clipped mean and stddev on the dark
img_dk = fits.getdata(darkDir + fileName)
if float(astropy.__version__) < 3.0:
dark_stats = stats.sigma_clipped_stats(img_dk,
sigma=3,
iters=10)
else:
dark_stats = stats.sigma_clipped_stats(img_dk,
sigma=3,
maxiters=10)
darkMean = dark_stats[0]
darkStdv = dark_stats[2]
return darkMean, darkStdv
frame = firstFrame
lenDarks = 11
tints = np.zeros(lenDarks) + 12
tints[-3] = 10
tints[-2] = 50
tints[-1] = 100
reads = np.zeros(lenDarks)
reads[0] = 1
reads[1] = 2
reads[2] = 4
reads[3] = 8
reads[4] = 16
reads[5] = 32
reads[6] = 64
reads[7] = 92
reads[-3:] = 16
samps = np.zeros(lenDarks) + 3
samps[0] = 2
dMeans = np.zeros(lenDarks, dtype=float)
dStdvs = np.zeros(lenDarks, dtype=float)
for ii in range(lenDarks):
(dMeans[ii], dStdvs[ii]) = printStats(frame, tints[ii],samps[ii], reads[ii])
dStdvs[ii] *= np.sqrt(3)
frame += 3
# Calculate the readnoise
rdnoise = dStdvs * 4.0 * np.sqrt(reads) / (np.sqrt(2.0))
print(('READNOISE per read: ', rdnoise))
##########
# Print Stuff Out
##########
outFile = darkDir + 'analyzeDarkCalib.out'
util.rmall([outFile])
_out = open(outFile,'w')
hdr = '%8s %5s &9s %9s %4s %6s'
print('Sampmode Reads Noise(DN) Noise(e-) Tint Coadds')
print('-------- ----- --------- --------- ---- ------')
_out.write('Sampmode Reads Noise(DN) Noise(e-) Tint Coadds\n')
_out.write('-------- ----- --------- --------- ---- ------\n')
for ii in range(lenDarks):
print(('%8d %5d %9.1f %9.1f %4d 1' % \
(samps[ii], reads[ii], dStdvs[ii], dStdvs[ii] * 4.0, tints[ii])))
for ii in range(lenDarks):
_out.write('%8d %5d %9.1f %9.1f %4d 1\n' % \
(samps[ii], reads[ii], dStdvs[ii], dStdvs[ii] * 4.0, tints[ii]))
_out.close()
return
```
#### File: nirc2/reduce/dar.py
```python
from pyraf import iraf
import glob
import numpy as np
import pylab as py
import math
from astropy.io import fits as pyfits
import datetime
try:
import urllib.request, urllib.parse, urllib.error
p2 = False
except:
import urllib
p2 = True # Python 2 is running, so the urllib command is different
import os, sys
from nirc2.reduce import nirc2_util
from nirc2.reduce import util
from nirc2.reduce import slalib
from nirc2 import instruments
from astropy.table import Table
module_dir = os.path.dirname(__file__)
def get_atm_conditions(year):
"""
Retrieve atmospheric conditions from CFHT archive website,
then calls dar.splitAtmosphereCFHT() to separate the data
by months.
"""
yearStr = str(year)
if p2: # Python 2 command, necessary for IRAF
_atm = urllib.urlopen("http://mkwc.ifa.hawaii.edu/archive/wx/cfht/cfht-wx.%s.dat" % yearStr)
else:
_atm = urllib.request.urlopen("http://mkwc.ifa.hawaii.edu/archive/wx/cfht/cfht-wx.%s.dat" % yearStr)
atm = _atm.read()
_atm.close()
root = module_dir + '/weather/'
if type(atm) == bytes:
# this is for python 3
atmfile = open(root + 'cfht-wx.' + yearStr + '.dat','wb')
else:
atmfile = open(root + 'cfht-wx.' + yearStr + '.dat','w')
atmfile.write(atm)
atmfile.close()
splitAtmosphereCFHT(str(year))
def keckDARcoeffs(lamda, year, month, day, hour, minute):
"""
Calculate the differential atmospheric refraction
for two objects observed at Keck.
Input:
lamda -- Effective wavelength (microns) assumed to be the same for both
year, month, day, hour, minute of observation (HST)
Output:
refA
refB
"""
iraf.noao()
# Set up Keck observatory info
foo = iraf.noao.observatory(command="set", obsid="keck", Stdout=1)
obs = iraf.noao.observatory
####################
# Setup all the parameters for the atmospheric refraction
# calculations. Typical values obtained from the Mauna Kea
# weather pages and from the web.
####################
#
# Temperature Lapse Rate (Kelvin/meter)
tlr = 0.0065
# Precision required to terminate the iteration (radian)
eps = 1.0e-9
# Height above sea level (meters)
hm = obs.altitude
# Latitude of the observer (radian)
phi = math.radians(obs.latitude)
# Pull from atmosphere logs.
logDir = module_dir + '/weather/'
logFile = logDir +'cfht-wx.'+ str(year) +'.'+ str(month).zfill(2) +'.dat'
_atm = Table.read(logFile, format='ascii', header_start=None)
atmYear = _atm['col1']
atmMonth = _atm['col2']
atmDay = _atm['col3']
atmHour = _atm['col4']
atmMin = _atm['col5'] # HST times
atmTemp = _atm['col8'] # Celsius
atmHumidity = _atm['col9'] # percent
atmPressure = _atm['col10'] # mb pressure
# Find the exact time match for year, month, day, hour
idx = (np.where((atmYear == year) & (atmMonth == month) &
(atmDay == day) & (atmHour == hour)))[0]
if (len(idx) == 0):
print(( 'Could not find DAR data for %4d-%2d-%2d %2d:%2d in %s' % \
(year, month, day, hour, minute, logFile)))
atmMin = atmMin[idx]
atmTemp = atmTemp[idx]
atmHumidity = atmHumidity[idx]
atmPressure = atmPressure[idx]
# Find the closest minute
minDiff = abs(atmMin - minute)
sdx = minDiff.argsort()
# Select out the closest in time.
# Ambient Temperature (Kelvin)
# Should be around 274.0 Kelvin
tdk = atmTemp[sdx[0]] + 272.15
# Pressure at the observer (millibar)
# Should be around 760.0 millibars
pmb = atmPressure[sdx[0]]
# Relative humidity (%)
# Should be around 0.1 %
rh = atmHumidity[sdx[0]] / 100.0 #relative humidity should be between 0 and 1
print(hm, tdk, pmb, rh, lamda, phi, tlr, eps)
return slalib.refco(hm, tdk, pmb, rh, lamda, phi, tlr, eps)
def nirc2dar(fitsFile, instrument=instruments.default_inst):
"""
Use the FITS header to extract date, time, wavelength,
elevation, and image orientation information. This is everything
that is necessary to calculate the differential atmospheric
refraction. The differential atmospheric refraction
is applicable only along the zenith direction of an image.
This code calculates the predicted DAR using archived CFHT
atmospheric data and the elevation and wavelength of the observations.
Then the DAR correction is transformed into image coefficients that
can be applied in image coordinates.
"""
# Get header info
img, hdr = pyfits.getdata(fitsFile, header=True)
effWave = instrument.get_central_wavelength(hdr)
elevation = hdr[instrument.hdr_keys['elevation']]
airmass = hdr['AIRMASS']
parang = hdr['PARANG']
date = hdr['DATE-OBS'].split('-')
year = int(date[0])
month = int(date[1])
day = int(date[2])
utc = hdr['UTC'].split(':')
hour = int(utc[0])
minute = int(utc[1])
second = int(math.floor(float(utc[2])))
utc = datetime.datetime(year, month, day, hour, minute, second)
utc2hst = datetime.timedelta(hours=-10)
hst = utc + utc2hst
(refA, refB) = keckDARcoeffs(effWave, hst.year, hst.month, hst.day,
hst.hour, hst.minute)
tanz = math.tan(math.radians(90.0 - elevation))
tmp = 1.0 + tanz**2
darCoeffL = tmp * (refA + 3.0 * refB * tanz**2)
darCoeffQ = -tmp * (refA*tanz +
3.0 * refB * (tanz + 2.0*tanz**3))
# Convert DAR coefficients for use with units of NIRC2 pixels
scale = instrument.get_plate_scale(hdr)
darCoeffL *= 1.0
darCoeffQ *= 1.0 * scale / 206265.0
# Lets determine the zenith and horizon unit vectors for
# this image.
pos_ang = instrument.get_position_angle(hdr)
pa = math.radians(parang + pos_ang)
zenithX = -math.sin(pa)
zenithY = math.cos(pa)
# Compute the predicted differential atmospheric refraction
# over a 10'' seperation along the zenith direction.
# Remember coeffecicents are only for deltaZ in pixels
deltaZ = img.shape[0] * scale
deltaR = darCoeffL * (deltaZ/scale) + darCoeffQ * (deltaZ/scale)**2
deltaR *= scale # now in arcseconds
magnification = (deltaZ + deltaR) / deltaZ
print(( 'DAR FITS file = %s' % (fitsFile)))
print(('DAR over 10": Linear dR = %f" Quad dR = %f"' % \
(darCoeffL * deltaZ, darCoeffQ * deltaZ**2)))
print(('DAR Magnification = %f' % (magnification)))
print(('DAR Vertical Angle = %6.1f' % (math.degrees(pa))))
return (pa, darCoeffL, darCoeffQ)
def darPlusDistortion(inputFits, outputRoot, xgeoim=None, ygeoim=None, instrument=instruments.default_inst):
"""
Create lookup tables (stored as FITS files) that can be used
to correct DAR. Optionally, the shifts due to DAR can be added
to existing NIRC2 distortion lookup tables if the xgeoim/ygeoim
input parameters are set.
Inputs:
inputFits - a NIRC2 image for which to determine the DAR correction
outputRoot - the root name for the output. This will be used as the
root name of two new images with names, <outputRoot>_x.fits and
<outputRoot>_y.fits.
Optional Inputs:
xgeoim/ygeoim - FITS images used in Drizzle distortion correction
(lookup tables) will be modified to incorporate the DAR correction.
The order of the correction is 1. distortion, 2. DAR.
"""
# Get the size of the image and the half-points
hdr = pyfits.getheader(inputFits)
imgsizeX = int(hdr['NAXIS1'])
imgsizeY = int(hdr['NAXIS2'])
halfX = int(round(imgsizeX / 2.0))
halfY = int(round(imgsizeY / 2.0))
# First get the coefficients
(pa, darCoeffL, darCoeffQ) = nirc2dar(inputFits, instrument=instrument)
#(a, b) = nirc2darPoly(inputFits)
# Create two 1024 arrays (or read in existing ones) for the
# X and Y lookup tables
if ((xgeoim == None) or (xgeoim == '')):
x = np.zeros((imgsizeY, imgsizeX), dtype=float)
else:
x = pyfits.getdata(xgeoim)
if ((ygeoim == None) or (ygeoim == '')):
y = np.zeros((imgsizeY, imgsizeX), dtype=float)
else:
y = pyfits.getdata(ygeoim)
# Get proper header info.
fits = pyfits.open(inputFits)
axisX = np.arange(imgsizeX, dtype=float) - halfX
axisY = np.arange(imgsizeY, dtype=float) - halfY
xcoo2d, ycoo2d = np.meshgrid(axisX, axisY)
xnew1 = xcoo2d + x
ynew1 = ycoo2d + y
# Rotate coordinates clockwise by PA so that zenith is along +ynew2
# PA = parallactic angle (angle from +y to zenith going CCW)
sina = math.sin(pa)
cosa = math.cos(pa)
xnew2 = xnew1 * cosa + ynew1 * sina
ynew2 = -xnew1 * sina + ynew1 * cosa
# Apply DAR correction along the y axis
xnew3 = xnew2
ynew3 = ynew2*(1 + darCoeffL) + ynew2*np.abs(ynew2)*darCoeffQ
# Rotate coordinates counter-clockwise by PA back to original
xnew4 = xnew3 * cosa - ynew3 * sina
ynew4 = xnew3 * sina + ynew3 * cosa
#xnew2 = a[0] + a[1]*xnew1 + a[2]*ynew1 + \
# a[3]*xnew1**2 + a[4]*xnew1*ynew1 + a[5]*ynew1**2
#ynew2 = b[0] + b[1]*xnew1 + b[2]*ynew1 + \
# b[3]*xnew1**2 + b[4]*xnew1*ynew1 + b[5]*ynew1**2
x = xnew4 - xcoo2d
y = ynew4 - ycoo2d
xout = outputRoot + '_x.fits'
yout = outputRoot + '_y.fits'
util.rmall([xout, yout])
fits[0].data = x
fits[0].writeto(xout, output_verify='silentfix')
fits[0].data = y
fits[0].writeto(yout, output_verify='silentfix')
return (xout, yout)
def applyDAR(inputFits, spaceStarlist, plot=False, instrument=instruments.default_inst):
"""
inputFits: (str) name if fits file associated with this starlist
Input a starlist in x=RA (+x = west) and y=Dec (arcseconds) taken from
space and introduce differential atmospheric refraction (DAR). The amount
of DAR that is applied depends on the header information in the input fits
file. The resulting output starlist should contain what was observed
after the starlight passed through the atmosphere, but before the
starlight passed through the telescope. Only achromatic DAR is
applied in this code.
The output file has the name <fitsFile>_acs.lis and is saved to the
current directory.
"""
# Get header info
#hdr = pyfits.getheader(fits)
#effWave = hdr['EFFWAVE']
#elevation = hdr['EL']
#lamda = hdr['CENWAVE']
#airmass = hdr['AIRMASS']
#parang = hdr['PARANG']
#
#date = hdr['DATE-OBS'].split('-')
#year = int(date[0])
#month = int(date[1])
#day = int(date[2])
#utc = hdr['UTC'].split(':')
#hour = int(utc[0])
#minute = int(utc[1])
#second = int(math.floor(float(utc[2])))
#utc = datetime.datetime(year, month, day, hour, minute, second)
#utc2hst = datetime.timedelta(hours=-10)
#hst = utc + utc2hst
#(refA, refB) = keckDARcoeffs(effWave, hst.year, hst.month, hst.day,
# hst.hour, hst.minute)
#tanz = math.tan(math.radians(90.0 - elevation))
#tmp = 1.0 + tanz**2
#darCoeffL = tmp * (refA + 3.0 * refB * tanz**2)
#darCoeffQ = -tmp * (refA*tanz +
# 3.0 * refB * (tanz + 2.0*tanz**3))
# Convert DAR coefficients for use with arcseconds
#darCoeffL *= 1.0
#darCoeffQ *= 1.0 / 206265.0
# Lets determine the zenith and horizon unit vectors for
# this image. The angle we need is simply the parallactic
# (or vertical) angle since ACS images are North Up already.
#pa = math.radians(parang)
#MS: presumably the above code is all replacable with this call (which uses the intrument object
(pa, darCoeffL, darCoeffQ) = nirc2dar(inputFits, instrument=instrument)
##########
#
# Read in the starlist
#
##########
_list = Table.read(spaceStarlist, format='ascii')
cols = list(_list.columns.keys())
names = [_list[ss][0].strip() for ss in range(len(_list))]
mag = _list[cols[1]]
date = _list[cols[2]]
x = _list[cols[3]] # RA in arcsec
y = _list[cols[4]]
xe = _list[cols[5]]
ye = _list[cols[6]]
# Magnify everything in the y (zenith) direction. Do it relative to
# the first star. Even though dR depends on dzObs (ground observed dz),
# it is a small mistake and results in less than a 10 micro-arcsec
# change in dR.
dx = x - x[0]
dy = y - y[0]
# Rotate coordinates CW so that the zenith angle is at +ynew
sina = math.sin(pa)
cosa = math.cos(pa)
xnew1 = dx * cosa + dy * sina
ynew1 = -dx * sina + dy * cosa
# Apply DAR
xnew2 = xnew1
ynew2 = ynew1 * (1.0 - darCoeffL) - ynew1 * np.abs(ynew1) * darCoeffQ
# Rotate coordinates CCW back to original angle
xnew3 = xnew2 * cosa - ynew2 * sina
ynew3 = xnew2 * sina + ynew2 * cosa
xnew = xnew3 + x[0]
ynew = ynew3 + y[0]
##########
#
# Write out the starlist
#
##########
# Save the current directory
newFits = fits.replace('.fits', '').split('/')[-1]
newList = newFits + '_acs.lis'
print(newList)
_new = open(newList, 'w')
for i in range(len(names)):
_new.write('%10s %7.3f %7.2f %10.4f %10.4f 0 0 10 1 1 8\n' % \
(names[i], mag[i], date[i], xnew[i], ynew[i]))
_new.close()
if (plot==True):
py.clf()
py.quiver(x, y, xnew - x, ynew - y, scale=0.02)
py.quiver([0], [0], [0.001], [0], color='r', scale=0.02)
py.axis([-5, 5, -5, 5])
py.show()
def splitAtmosphereCFHT(year):
"""
Take an original archive file containing atmospheric parameters and
split it up into seperate files for individual months. This makes
later calls to calculate DAR parameters MUCH faster.
"""
yearStr = str(year)
logDir = module_dir + '/weather/'
logFile = logDir + '/cfht-wx.' + yearStr + '.dat'
_infile = open(logFile, 'r')
outfiles = []
for ii in range(1, 12+1):
monthStr = str(ii).zfill(2)
_month = open(logDir + '/cfht-wx.' +yearStr+ '.' +monthStr+ '.dat', 'w')
outfiles.append( _month )
for line in _infile:
fields = line.split()
month = int(fields[1])
# Check for wrong month number
if not (month > 0 and month <= 12):
continue
_outfile = outfiles[month-1]
_outfile.write(line)
for _month in outfiles:
_month.close()
def test_darPlusDistortion():
data_dir = module_dir + '/distortion/'
file_geox_darunfix = data_dir + 'nirc2dist_xgeoim.fits'
file_geoy_darunfix = data_dir + 'nirc2dist_ygeoim.fits'
data_dir = '/u/ghezgroup/data/m92_test/08jul_new_on/'
file_geox_darfix = data_dir + 'reduce/kp/gc_f1/ce0249geo_x.fits'
file_geoy_darfix = data_dir + 'reduce/kp/gc_f1/ce0249geo_y.fits'
xon = pyfits.getdata(file_geox_darfix)
yon = pyfits.getdata(file_geoy_darfix)
xoff = pyfits.getdata(file_geox_darunfix)
yoff = pyfits.getdata(file_geoy_darunfix)
# Make arrays with the coordinates for each
imgsize = 1024
axisX = np.arange(imgsize, dtype=float)
axisY = np.arange(imgsize, dtype=float)
xcoo2d, ycoo2d = np.meshgrid(axisX, axisY)
# Lets trim so that we only keep every 20th pixel
idx = np.arange(25, imgsize, 50)
xon = xon.take(idx, axis=0).take(idx, axis=1)
yon = yon.take(idx, axis=0).take(idx, axis=1)
xoff = xoff.take(idx, axis=0).take(idx, axis=1)
yoff = yoff.take(idx, axis=0).take(idx, axis=1)
xcoo2d = xcoo2d.take(idx, axis=0).take(idx, axis=1)
ycoo2d = ycoo2d.take(idx, axis=0).take(idx, axis=1)
# Calculate differences
xdiff = xon - xoff
ydiff = yon - yoff
# Make vector plots
py.clf()
qvr = py.quiver2([xcoo2d], [ycoo2d], [xdiff], [ydiff],
units='width', scale=5,
width=0.005, headwidth=3, headlength=3,
headaxislength=3)
py.quiverkey(qvr, 100, 1120, 1.0, '1 pixel', coordinates='data', color='r')
py.xlabel('NIRC2 X (pixel)')
py.ylabel('NIRC2 Y (pixel)')
py.title('Arrows point to DAR Fix')
#py.savefig('plots/vector_daroffon.png')
py.show()
``` |
{
"source": "jlubbersgeo/general_geochem",
"score": 2
} |
#### File: jlubbersgeo/general_geochem/magmatrace_current.py
```python
"""magmatrace is a python module for doing high temperature geochemistry calculations
"""
# import the dependencies we'll need in these functions:
import numpy as np
import pandas as pd
import warnings
import re
#%% Mixing model related functions
def mixing(c1, c2, f):
"""
mixing creates a mixing model between two endmembers
Inputs:
c1 = concentration of endmember 1
c2 = concentration of endmember 2
f = fraction of endmember 1 in the model
Returns:
cm = concnetration of the mixture
"""
cm = c1 * f + c2 * (1 - f)
return cm
# isotopic mixing model.
# fix this to add in a conditional to choose either one or two ratios
def isomix(rationum, c1, r1, c2, r2, *data):
"""
isomix uses equations 18.24-18.26 from Faure 1998 to calculate isotopic mixing
compositions for a given isotopic pair
Inputs:
rationum: use the input 'oneratio' or 'tworatios' to define how many isotopic
systems you are interested in
c1 = concentration of element for endmember 1
c2 = concentration of element for endmember 2
r1 = isotopic ratio for endmember 1
r2 = isotopic ratio for endmember 2
*data = repeat the first 4 inputs for the second isotopic system of interest
Returns:
cm = concentrations of mixture for various values of 'f' where
f is the fraction of endmember 1 in the mixture
rm = isotopic ratios of mixture for various values of 'f'
"""
# array of fractions of component 1
f = np.linspace(0, 1, 11)
# concentration of the mixture
# eq. 18.19
if rationum == "oneratio":
cm = c1 * f + c2 * (1 - f)
# eq. 18.25
a = (c1 * c2 * (r2 - r1)) / (c1 - c2)
# eq. 18.26
b = (c1 * r1 - c2 * r2) / (c1 - c2)
# eq. 18.24
rm = a / cm + b
return cm, rm
elif rationum == "tworatios":
cm = c1 * f + c2 * (1 - f)
# eq. 18.25
a = (c1 * c2 * (r2 - r1)) / (c1 - c2)
# eq. 18.26
b = (c1 * r1 - c2 * r2) / (c1 - c2)
# eq. 18.24
rm = a / cm + b
cm2 = data[0] * f + data[2] * (1 - f)
# eq 18.25
c = (data[0] * data[2] * (data[3] - data[1])) / (data[0] - data[2])
# eq 18.26
d = (data[0] * data[1] - data[2] * data[3]) / (data[0] - data[2])
rm2 = c / cm2 + d
return cm, rm, cm2, rm2
else:
print(
"Check your input. Ensure to specify rattionum and the correct amount of concentrations or ratios"
)
def ratio_mixing(df, n_components, resolution=0.1):
"""
Mixing of ratios as described by Albarede 1995
Introduction to Geochemical Modeling equation 1.3.1
Inputs:
df | pandas DataFrame
DataFrame of inputs. should be formatted as follows:
For 2 component mixing:
Index|Element1_c|Element1_r|Element2_c|Element2_r
-------------------------------------------------
A | | | |
-------------------------------------------------
B | | | |
For 3 component mixing:
Index|Element1_c|Element1_r|Element2_c|Element2_r
-------------------------------------------------
A | | | |
-------------------------------------------------
B | | | |
-------------------------------------------------
C | | | |
Where the name of each component is the index of the dataframe and the
concentration and ratio columns for each elemental species contain "_c" and "_r"
somewhere in the column header, respectively.
n_components | int
Number of end-member components (either 2 or 3)
resolution | float
The resolution you want to run your mixing model at. This is a number between 0.01
and 0.5. This is how far apart to space points in the eventual mixing mesh
(e.g. .1 will return a mixing mesh spaced by 1O% increments for each component)
Default is 0.1
Returns:
results | pandas DataFrame
The results of the mixing model that is n x 7 in shape:
f_A|f_B|f_C|Element1_c_mix|Element2_c_mix|Element1_r_mix|Element2_r_mix
-----------------------------------------------------------------------
Where f columns are fraction of each component in the mixture and other columns
Are for the concentrations and ratios of the mixture for each respective combination
of f values
"""
if n_components == 2:
if resolution < 0.01:
print(
"Please pick a lower resolution (e.g., bigger number).\nYou don't need it and it your computer may explode"
)
if resolution > 0.5:
print("Please pick a higher resolution (e.g., number < 0.5). \n")
else:
# generate an array for fraction of each component
f = np.arange(0, 1 + resolution, resolution)
# all possible combinations for three f arrays
a = np.array(np.meshgrid(f, f)).T.reshape(-1, 2)
# where the combinations sum to 1
f_vals = a[a.sum(axis=1) == 1]
# get names of components
components = df.index.tolist()
# get names of columns where concentrations and ratios are held
# IMPORTANT TO HAVE DATAFRAME IN THIS FORMAT
elements = [col for col in df.columns if "_c" in col]
ratios = [col for col in df.columns if "_r" in col]
# Concentration of mixture
if len(elements) == 1:
el1_mix_concentrations = (
df.loc[components[0], elements[0]] * f_vals[:, 0]
+ df.loc[components[1], elements[0]] * f_vals[:, 1]
)
# ratio values of the mixture using Albarede 1995 eq. 1.3.1
el1_mix_ratios = df.loc[components[0], ratios[0]] * (
(f_vals[:, 0] * df.loc[components[0], elements[0]])
/ el1_mix_concentrations
) + df.loc[components[1], ratios[0]] * (
(f_vals[:, 1] * df.loc[components[1], elements[0]])
/ el1_mix_concentrations
)
results = pd.DataFrame(
{
"f_{}".format(components[0]): f_vals[:, 0],
"f_{}".format(components[1]): f_vals[:, 1],
"{}_mix".format(elements[0]): el1_mix_concentrations,
"{}_mix".format(ratios[0]): el1_mix_ratios,
}
)
else:
el1_mix_concentrations = (
df.loc[components[0], elements[0]] * f_vals[:, 0]
+ df.loc[components[1], elements[0]] * f_vals[:, 1]
)
el2_mix_concentrations = (
df.loc[components[0], elements[1]] * f_vals[:, 0]
+ df.loc[components[1], elements[1]] * f_vals[:, 1]
)
# ratio values of the mixture using Albarede 1995 eq. 1.3.1
el1_mix_ratios = df.loc[components[0], ratios[0]] * (
(f_vals[:, 0] * df.loc[components[0], elements[0]])
/ el1_mix_concentrations
) + df.loc[components[1], ratios[0]] * (
(f_vals[:, 1] * df.loc[components[1], elements[0]])
/ el1_mix_concentrations
)
el2_mix_ratios = df.loc[components[0], ratios[1]] * (
(f_vals[:, 0] * df.loc[components[0], elements[1]])
/ el2_mix_concentrations
) + df.loc[components[1], ratios[1]] * (
(f_vals[:, 1] * df.loc[components[1], elements[1]])
/ el2_mix_concentrations
)
results = pd.DataFrame(
{
"f_{}".format(components[0]): f_vals[:, 0],
"f_{}".format(components[1]): f_vals[:, 1],
"{}_mix".format(elements[0]): el1_mix_concentrations,
"{}_mix".format(elements[1]): el2_mix_concentrations,
"{}_mix".format(ratios[0]): el1_mix_ratios,
"{}_mix".format(ratios[1]): el2_mix_ratios,
}
)
if n_components == 3:
if resolution < 0.01:
print(
"Please pick a lower resolution (e.g., bigger number).\nYou don't need it and it your computer may explode"
)
if resolution > 0.5:
print("Please pick a higher resolution (e.g., number < 0.5). \n")
else:
# generate an array for fraction of each component
f = np.arange(0, 1 + resolution, resolution)
# all possible combinations for three f arrays
a = np.array(np.meshgrid(f, f, f)).T.reshape(-1, 3)
# where the combinations sum to 1
f_vals = a[a.sum(axis=1) == 1]
# get names of components
components = df.index.tolist()
# get names of columns where concentrations and ratios are held
# IMPORTANT TO HAVE DATAFRAME IN THIS FORMAT
elements = [col for col in df.columns if "_c" in col]
ratios = [col for col in df.columns if "_r" in col]
if len(elements) == 1:
# Concentration of mixture using basic 3 component mixing
# of concentrations
el1_mix_concentrations = (
df.loc[components[0], elements[0]] * f_vals[:, 0]
+ df.loc[components[1], elements[0]] * f_vals[:, 1]
+ df.loc[components[2], elements[0]] * f_vals[:, 2]
)
# ratio values of the mixture using Albarede 1995 eq. 1.3.1
el1_mix_ratios = (
df.loc[components[0], ratios[0]]
* (
(f_vals[:, 0] * df.loc[components[0], elements[0]])
/ el1_mix_concentrations
)
+ df.loc[components[1], ratios[0]]
* (
(f_vals[:, 1] * df.loc[components[1], elements[0]])
/ el1_mix_concentrations
)
+ df.loc[components[2], ratios[0]]
* (
(f_vals[:, 2] * df.loc[components[2], elements[0]])
/ el1_mix_concentrations
)
)
results = pd.DataFrame(
{
"f_{}".format(components[0]): f_vals[:, 0],
"f_{}".format(components[1]): f_vals[:, 1],
"f_{}".format(components[2]): f_vals[:, 2],
"{}_mix".format(elements[0]): el1_mix_concentrations,
"{}_mix".format(ratios[0]): el1_mix_ratios,
}
)
else:
# Concentration of mixture using basic 3 component mixing
# of concentrations
el1_mix_concentrations = (
df.loc[components[0], elements[0]] * f_vals[:, 0]
+ df.loc[components[1], elements[0]] * f_vals[:, 1]
+ df.loc[components[2], elements[0]] * f_vals[:, 2]
)
el2_mix_concentrations = (
df.loc[components[0], elements[1]] * f_vals[:, 0]
+ df.loc[components[1], elements[1]] * f_vals[:, 1]
+ df.loc[components[2], elements[1]] * f_vals[:, 2]
)
# ratio values of the mixture using Albarede 1995 eq. 1.3.1
el1_mix_ratios = (
df.loc[components[0], ratios[0]]
* (
(f_vals[:, 0] * df.loc[components[0], elements[0]])
/ el1_mix_concentrations
)
+ df.loc[components[1], ratios[0]]
* (
(f_vals[:, 1] * df.loc[components[1], elements[0]])
/ el1_mix_concentrations
)
+ df.loc[components[2], ratios[0]]
* (
(f_vals[:, 2] * df.loc[components[2], elements[0]])
/ el1_mix_concentrations
)
)
el2_mix_ratios = (
df.loc[components[0], ratios[1]]
* (
(f_vals[:, 0] * df.loc[components[0], elements[1]])
/ el2_mix_concentrations
)
+ df.loc[components[1], ratios[1]]
* (
(f_vals[:, 1] * df.loc[components[1], elements[1]])
/ el2_mix_concentrations
)
+ df.loc[components[2], ratios[1]]
* (
(f_vals[:, 2] * df.loc[components[2], elements[1]])
/ el2_mix_concentrations
)
)
results = pd.DataFrame(
{
"f_{}".format(components[0]): f_vals[:, 0],
"f_{}".format(components[1]): f_vals[:, 1],
"f_{}".format(components[2]): f_vals[:, 2],
"{}_mix".format(elements[0]): el1_mix_concentrations,
"{}_mix".format(elements[1]): el2_mix_concentrations,
"{}_mix".format(ratios[0]): el1_mix_ratios,
"{}_mix".format(ratios[1]): el2_mix_ratios,
}
)
return results
def isoassim(modeltype, rationum, r, D, cp, ca, ep, ea, *data):
"""
isoassim uses equation 15B from DePaolo (1981) in order to look at
the evolution of one or two isotopic ratios and their associated trace element
concentrations during combined assimilation and fractionation
Inputs:
modeltype == DePaolo15b; this is the only one currently and is by far the most useful
rationum = the number of isotopic systems you are interested in. List 'oneratio' or 'tworatios'
r = the r value, defined as the rate of fractionation to the rate of assimilation by mass
D = the bulk D value for the first isotopic system
cp = concentration of trace element in parental magma
ca = concentration of trace element in assimilant
ep = isotopic ratio of parental magma
ea = isotopic ratio of assimilant
*data = if you are interested in two ratios as opposed to one then you must input new values
for D through ea for the second isotopic system
"""
# array of fractions of component 1
f = np.linspace(0, 1, 11)
if modeltype == "DePaolo15b" and rationum == "oneratio":
# mix the trace elements
cm = cp * f + ca * (1 - f)
# get the effective distribution coefficient
z = (r + D - 1) / (r - 1)
# calculate the isotopic ratio of the daughter that has undergone assimilation
em = ((r / (r - 1)) * (ca / z) * (1 - f ** (-z)) * ea + cp * f ** (-z) * ep) / (
(r / (r - 1)) * (ca / z) * (1 - f ** (-z)) + (cp * f ** (-z))
)
return cm, em
elif modeltype == "DePaolo15b" and rationum == "tworatios":
# get mixes of both trace elements associated with the isotopic systems of interest
cm = cp * f + ca * (1 - f)
cm2 = data[1] * f + data[2] * (1 - f)
# get the effective distribution coefficents for both isotopic systems
z1 = (r + D - 1) / (r - 1)
z2 = (r + data[0] - 1) / (r - 1)
# calculate the isotopic ratios of the daughter for both systems
em = ((r / (r - 1)) * (ca / z) * (1 - f ** (-z)) * ea + cp * f ** (-z) * ep) / (
(r / (r - 1)) * (ca / z) * (1 - f ** (-z)) + (cp * f ** (-z))
)
em2 = (
(r / (r - 1)) * (data[2] / z2) * (1 - f ** (-z2)) * data[4]
+ data[1] * f ** (-z2) * data[3]
) / ((r / (r - 1)) * (data[2] / z1) * (1 - f ** (-z1)) + (data[1] * f ** (-z1)))
return cm, cm2, em, em2
else:
print(
"You must specify the modeltype as DePaolo15b, number of ratios as one or two, r, D, and/or D2, then your ratios"
)
# Equations by Aitcheson & Forrest (1994) used to estimate the degree of assimilation independent of
# Depaolo's (1981) variable r
# equations based on isotopic compositions
def crustfraciso(eq, systems, D, c0m, e0m, em, ea, *data):
"""
This model will give either equation 5 or 7 of the Aitcheson & Forrest (1994) equations that are used for estimating
the fraction of assimilated crust without the requirement of guessing at the r value required for the DePaolo (1981)
equations. The user should be familiar about what needs to be input - in short, this is estimated basement compositions
as the assimilant, measured compositions for the 'daughter', and a thoroughly educated guess at intital magma
composition. An example of this applicability can be seen in Kay et al. (2010).
Inputs:
eq: 'five' for equation five and 'seven' for equation 'seven'.
Equation five is independent of erupted magma composition and degree of melting F
Equation seven is independent of the trace element composition in the assimilant
systems: Up to four isotopic systems can be considered. These are designated by the input 'one', 'two', 'threee', or
'four'. There is a caveat to putting in more than one isotopic system explained by teh input parameter *data
seen below
D: The bulk partition coefficient of the element associated with the isotopic system of interest in the host magma
c0m: Estimated trace element composition of the element associated with the isotopic system of interest in the
original parent magma.
e0m: Estimated isotopic ratio for the system of interest in the original parent magma.
em: Measured isotpic ratio of hte daughter magma that has undergone assimilation.
ea: Estimated isotopic ratio of the assimilant.
*data: If you wish to do more than one isotpic system, you must input values for D thorugh ea in exactly the same
order as defined in the function above
Outputs:
crustfrac 1, 2, 3, or 4 depending on how many isotpic systems you are interested in. This is equivalent to the
value 'rho' in Aitcheson & Forrest (1994)
"""
import numpy as np
r = np.linspace(0, 1, 11)
if eq == "five" and systems == "one":
wave = (e0m - em) / (em - ea)
ca = data[0]
gamma = ca / c0m
crustfrac = (r / (r - 1)) * (
(1 + ((wave * (r + D - 1)) / (r * gamma))) ** ((r - 1) / (r + D - 1)) - 1
)
return crustfrac
elif eq == "five" and systems == "two":
wave1 = (e0m - em) / (em - ea)
ca1 = data[0]
gamma1 = ca / c0m
crustfrac1 = (r / (r - 1)) * (
(1 + ((wave1 * (r + D - 1)) / (r * gamma1))) ** ((r - 1) / (r + D - 1)) - 1
)
wave2 = (data[3] - data[4]) / (data[4] - data[5])
ca2 = data[6]
gamma2 = ca2 / data[2]
crustfrac2 = (r / (r - 1)) * (
(1 + ((wave2 * (r + data[1] - 1)) / (r * gamma2)))
** ((r - 1) / (r + data[1] - 1))
- 1
)
return crustfrac1, crustfrac2
elif eq == "five" and systems == "three":
wave1 = (e0m - em) / (em - ea)
ca1 = data[0]
gamma1 = ca / c0m
crustfrac1 = (r / (r - 1)) * (
(1 + ((wave1 * (r + D - 1)) / (r * gamma1))) ** ((r - 1) / (r + D - 1)) - 1
)
wave2 = (data[3] - data[4]) / (data[4] - data[5])
ca2 = data[6]
gamma2 = ca2 / data[2]
crustfrac2 = (r / (r - 1)) * (
(1 + ((wave2 * (r + data[1] - 1)) / (r * gamma2)))
** ((r - 1) / (r + data[1] - 1))
- 1
)
wave3 = (data[9] - data[10]) / (data[10] - data[11])
ca3 = data[12]
gamma3 = ca3 / data[8]
crustfrac3 = (r / (r - 1)) * (
(1 + ((wave3 * (r + data[7] - 1)) / (r * gamma3)))
** ((r - 1) / (r + data[7] - 1))
- 1
)
return crustfrac1, crustfrac2, crustfrac3
elif eq == "five" and systems == "four":
wave1 = (e0m - em) / (em - ea)
ca1 = data[0]
gamma1 = ca / c0m
crustfrac1 = (r / (r - 1)) * (
(1 + ((wave1 * (r + D - 1)) / (r * gamma1))) ** ((r - 1) / (r + D - 1)) - 1
)
wave2 = (data[3] - data[4]) / (data[4] - data[5])
ca2 = data[6]
gamma2 = ca2 / data[2]
crustfrac2 = (r / (r - 1)) * (
(1 + ((wave2 * (r + data[1] - 1)) / (r * gamma2)))
** ((r - 1) / (r + data[1] - 1))
- 1
)
wave3 = (data[9] - data[10]) / (data[10] - data[11])
ca3 = data[12]
gamma3 = ca3 / data[8]
crustfrac3 = (r / (r - 1)) * (
(1 + ((wave3 * (r + data[7] - 1)) / (r * gamma3)))
** ((r - 1) / (r + data[7] - 1))
- 1
)
wave4 = (data[15] - data[16]) / (data[16] - data[17])
ca4 = data[18]
gamma4 = ca4 / data[14]
crustfrac4 = (r / (r - 1)) * (
(1 + ((wave4 * (r + data[13] - 1)) / (r * gamma4)))
** ((r - 1) / (r + data[13] - 1))
- 1
)
return crustfrac1, crustfrac2, crustfrac3, crustfrac4
elif eq == "seven" and systems == "one":
cm = data[0]
crustfrac = (r / (r - 1)) * (
((c0m / cm) * ((ea - e0m) / (ea - em))) ** ((r - 1) / (r + D - 1)) - 1
)
return crustfrac
elif eq == "seven" and systems == "two":
cm1 = data[0]
crustfrac1 = (r / (r - 1)) * (
((c0m / cm1) * ((ea - e0m) / (ea - em))) ** ((r - 1) / (r + D - 1)) - 1
)
cm2 = data[6]
crustfrac2 = (r / (r - 1)) * (
((data[2] / cm1) * ((data[5] - data[3]) / (data[5] - data[4])))
** ((r - 1) / (r + data[1] - 1))
- 1
)
return crustfrac1, crustfrac2
elif eq == "seven" and systems == "three":
cm1 = data[0]
crustfrac1 = (r / (r - 1)) * (
((c0m / cm1) * ((ea - e0m) / (ea - em))) ** ((r - 1) / (r + D - 1)) - 1
)
cm2 = data[6]
crustfrac2 = (r / (r - 1)) * (
((data[2] / cm2) * ((data[5] - data[3]) / (data[5] - data[4])))
** ((r - 1) / (r + data[1] - 1))
- 1
)
cm3 = data[12]
crustfrac3 = (r / (r - 1)) * (
((data[8] / cm3) * ((data[11] - data[9]) / (data[11] - data[10])))
** ((r - 1) / (r + data[7] - 1))
- 1
)
cm4 = data[18]
crustfrac4 = (r / (r - 1)) * (
((data[14] / cm4) * ((data[17] - data[15]) / (data[17] - data[16])))
** ((r - 1) / (r + data[13] - 1))
- 1
)
return crustfrac1, crustfrac2, crustfrac3, crustfrac4
else:
print("Check your input")
# equations independent of the isotopic composition
def crustfracele(systems, D, c0m, cm, ca, *data):
"""
This model will give either equation 6of the Aitcheson & Forrest (1994) equations that are used for estimating
the fraction of assimilated crust without the requirement of guessing at the r value required for the DePaolo (1981)
equations. The user should be familiar about what needs to be input - in short, this is estimated basement compositions
as the assimilant, measured compositions for the 'daughter', and a thoroughly educated guess at intital magma
composition. An example of this applicability can be seen in Kay et al. (2010). This particular equation uses trace
elements only and is independent of isotopic ratios. This equation is best used in combination with the function
crustfraciso.
Inputs:
systems: Up to four systems can be considered. These are designated by the input 'one', 'two', 'threee', or
'four'. There is a caveat to putting in more than one isotopic system explained by teh input parameter *data
seen below
D: The bulk partition coefficient of the element associated with the isotopic system of interest in the host magma
c0m: Estimated trace element composition of the element associated with the isotopic system of interest in the
original parent magma.
cm: Measured trace element composition of the 'daughter' magma that has undergone assimilation
ca: Estimated trace element composition of the assimilant
*data: If you wish to do more than one isotpic system, you must input values for D thorugh ea in exactly the same
order as defined in the function above
Outputs:
crustfrac 1, 2, 3, or 4 depending on how many systems you are interested in. This is equivalent to the
value 'rho' in Aitcheson & Forrest (1994)
"""
import numpy as np
r = np.linspace(0, 1, 11)
if systems == "one":
crustfrac = (r / (r - 1)) * (
((c0m * (r + D - 1) - r * ca) / (cm * (r + D - 1) - r * ca))
** ((r - 1) / (r + D - 1))
- 1
)
return (crustfrac,)
elif systems == "two":
crustfrac1 = (r / (r - 1)) * (
((c0m * (r + D - 1) - r * ca) / (cm * (r + D - 1) - r * ca))
** ((r - 1) / (r + D - 1))
- 1
)
crustfrac2 = (r / (r - 1)) * (
(
(data[1] * (r + data[0] - 1) - r * data[3])
/ (data[2] * (r + data[0] - 1) - r * data[3])
)
** ((r - 1) / (r + data[0] - 1))
- 1
)
return crustfrac1, crustfrac2
elif systems == "three":
crustfrac1 = (r / (r - 1)) * (
((c0m * (r + D - 1) - r * ca) / (cm * (r + D - 1) - r * ca))
** ((r - 1) / (r + D - 1))
- 1
)
crustfrac2 = (r / (r - 1)) * (
(
(data[1] * (r + data[0] - 1) - r * data[3])
/ (data[2] * (r + data[0] - 1) - r * data[3])
)
** ((r - 1) / (r + data[0] - 1))
- 1
)
crustfrac3 = (r / (r - 1)) * (
(
(data[5] * (r + data[4] - 1) - r * data[7])
/ (data[6] * (r + data[4] - 1) - r * data[7])
)
** ((r - 1) / (r + data[4] - 1))
- 1
)
return crustfrac1, crustfrac2, crustfrac3
elif systems == "four":
crustfrac1 = (r / (r - 1)) * (
((c0m * (r + D - 1) - r * ca) / (cm * (r + D - 1) - r * ca))
** ((r - 1) / (r + D - 1))
- 1
)
crustfrac2 = (r / (r - 1)) * (
(
(data[1] * (r + data[0] - 1) - r * data[3])
/ (data[2] * (r + data[0] - 1) - r * data[3])
)
** ((r - 1) / (r + data[0] - 1))
- 1
)
crustfrac3 = (r / (r - 1)) * (
(
(data[5] * (r + data[4] - 1) - r * data[7])
/ (data[6] * (r + data[4] - 1) - r * data[7])
)
** ((r - 1) / (r + data[4] - 1))
- 1
)
crustfrac4 = (r / (r - 1)) * (
(
(data[9] * (r + data[8] - 1) - r * data[11])
/ (data[10] * (r + data[8] - 1) - r * data[11])
)
** ((r - 1) / (r + data[8] - 1))
- 1
)
return crustfrac1, crustfrac2, crustfrac3, crustfrac3
else:
print("Check your input")
#%% Thermometry related functions
def plag_kd_calc(element, An, temp, method):
"""
calculates the partition coefficient for a given element in plagioclase based on its anorthite
content according to the Arrhenius relationship as originally defined by Blundy and Wood (1991)
This function gives the user an option of three experimental papers to choose from when calculating
partition coefficient:
Bindeman et al., 1998 = ['Li','Be','B','F','Na','Mg','Al','Si','P','Cl','K','Ca','Sc',
'Ti','Cr','Fe','Co','Rb','Sr','Zr','Ba','Y','La','Ce','Pr','Nd','Sm','Eu','Pb']
Nielsen et al., 2017 = ['Mg','Ti','Sr','Y','Zr','Ba','La','Ce','Pr','Nd','Pb']
Tepley et al., 2010 = ['Sr','Rb','Ba','Pb','La','Nd','Sm','Zr','Th','Ti']
Inputs:
-------
element : string
The element you are trying to calculate the partition coefficient for. See Bindeman 1998 for supported
elements
An : array-like
Anorthite content (between 0 and 1) of the plagioclase. This can be a scalar value or Numpy array
temp: scalar
Temperature in Kelvin to calculate the partition coefficient at
method : string
choice of 'Bindeman', 'Nielsen', 'Tepley'. This uses then uses the Arrhenius parameters from
Bindeman et al., 1998, Nielsen et al., 2017, or Tepley et al., 2010, respectively.
Returns:
--------
kd_mean : array-like
the mean partition coefficient for the inputs listed
kd_std : array-like
standard deviation of the partition coefficient calculated via
Monte Carlo simulation of 1000 normally distributed random A and B
parameters based on their mean and uncertainties
"""
if method == "Bindeman":
# Table 4 from Bindeman et al 1998
elements = [
"Li",
"Be",
"B",
"F",
"Na",
"Mg",
"Al",
"Si",
"P",
"Cl",
"K",
"Ca",
"Sc",
"Ti",
"Cr",
"Fe",
"Co",
"Rb",
"Sr",
"Zr",
"Ba",
"Y",
"La",
"Ce",
"Pr",
"Nd",
"Sm",
"Eu",
"Pb",
]
a = (
np.array(
[
-6.9,
28.2,
-0.61,
-37.8,
-9.4,
-26.1,
-0.3,
-2,
-30.7,
-24.5,
-25.5,
-15.2,
-94.2,
-28.9,
-44,
-35.2,
-59.9,
-40,
-30.4,
-90.4,
-55,
-48.1,
-10.8,
-17.5,
-22.5,
-19.9,
-25.7,
-15.1,
-60.5,
]
)
* 1e3
)
a_unc = (
np.array(
[
1.9,
6.1,
0.5,
11.5,
1,
1.1,
0.8,
0.2,
4.6,
9.5,
1.2,
0.6,
28.3,
1.5,
6.3,
1.9,
10.8,
6.7,
1.1,
5.5,
2.4,
3.7,
2.6,
2.3,
4.1,
3.6,
6.3,
16.1,
11.8,
]
)
* 1e3
)
b = (
np.array(
[
-12.1,
-29.5,
9.9,
23.6,
2.1,
-25.7,
5.7,
-0.04,
-12.1,
11,
-10.2,
17.9,
37.4,
-15.4,
-9.3,
4.5,
12.2,
-15.1,
28.5,
-15.3,
19.1,
-3.4,
-12.4,
-12.4,
-9.3,
-9.4,
-7.7,
-14.2,
25.3,
]
)
* 1e3
)
b_unc = (
np.array(
[
1,
4.1,
3.8,
7.1,
0.5,
0.7,
0.4,
0.08,
2.9,
5.3,
0.7,
0.3,
18.4,
1,
4.1,
1.1,
7,
3.8,
0.7,
3.6,
1.3,
1.9,
1.8,
1.4,
2.7,
2.0,
3.9,
11.3,
7.8,
]
)
* 1e3
)
plag_kd_params = pd.DataFrame(
[a, a_unc, b, b_unc], columns=elements, index=["a", "a_unc", "b", "b_unc"]
)
R = 8.314
elif method == "Nielsen":
elements = ["Mg", "Ti", "Sr", "Y", "Zr", "Ba", "La", "Ce", "Pr", "Nd", "Pb"]
a = (
np.array([-10, -32.5, -25, -65.7, -25, -35.1, -32, -33.6, -29, -31, -50])
* 1e3
)
a_unc = np.array([3.3, 1.5, 1.1, 3.7, 5.5, 4.5, 2.9, 2.3, 4.1, 3.6, 11.8]) * 1e3
b = np.array([-35, -15.1, 25.5, 2.2, -50, 10, -5, -6.8, 8.7, -8.9, 22.3]) * 1e3
b_unc = np.array([2.1, 1, 0.7, 1.9, 3.6, 2.4, 2.3, 1.4, 2.7, 2.0, 7.8]) * 1e3
plag_kd_params = pd.DataFrame(
[a, a_unc, b, b_unc], columns=elements, index=["a", "a_unc", "b", "b_unc"]
)
R = 8.314
elif method == "Tepley":
elements = ["Sr", "Rb", "Ba", "Pb", "La", "Nd", "Sm", "Zr", "Th", "Ti"]
a = (
np.array(
[-50.18, -35.7, -78.6, -13.2, -93.7, -84.3, -108.0, -70.9, -58.1, -30.9]
)
* 1e3
)
a_unc = (
np.array([6.88, 13.8, 16.1, 44.4, 12.2, 8.1, 17.54, 58.2, 35.5, 8.6]) * 1e3
)
b = np.array(
[44453, -20871, 41618, -15761, 37900, 24365, 35372 - 7042, -60465, -14204]
)
b_unc = np.array([1303, 2437, 2964, 5484, 2319, 1492, 3106, 101886073493])
plag_kd_params = pd.DataFrame(
[a, a_unc, b, b_unc], columns=elements, index=["a", "a_unc", "b", "b_unc"]
)
if np.percentile(An, q=50) < 0.6:
warnings.warn(
"Over half your An values are significantly below the calibration range in Tepley et al., (2010)"
"and most likely will produce partition coefficient values that are significantly overestimated",
stacklevel=2,
)
R = 8.314
if element in elements:
a = np.random.normal(
plag_kd_params[element].a, plag_kd_params[element].a_unc, 1000
)
b = np.random.normal(
plag_kd_params[element].b, plag_kd_params[element].b_unc, 1000
)
kds = np.exp((a[:, np.newaxis] * An + b[:, np.newaxis]) / (R * temp))
kd_mean = np.mean(kds, axis=0)
kd_std = np.std(kds, axis=0)
else:
raise Exception(
"The element you have selected is not supported by this function. Please choose another one"
)
return kd_mean, kd_std
def amp_kd_calc(amph_sites_ff, element):
"""
aem_calc calculates the partition coefficient for a specified trace element
that is in equilibrium with a given amphibole composition according to
Humphreys et al., 2019.
supported elements = ['Rb','Sr','Pb','Zr','Nb','La','Ce','Nd','Sm',
'Eu','Gd','Dy','Ho','Yb','Lu','Y']
Parameters
----------
amph_sites_ff : pandas DataFrame
Amphibole site allocations that incorporate ferric ferrous iron.
This should be the output from the get_amp_sites_ferric_ferrous function
element : string
The element you want to calculate the partition coefficient for
Raises
------
Exception
If you do not choose a supported element from Humphreys et al., 2019
an error will be thrown prompting you to choose a supported element
Returns
-------
aem_kd : array-like
partition coefficient between amphibole and its equilibrium melt
aem_kd_se : scalar
the one sigma uncertainty on your partition coefficient taken from
table 2 in Humphreys et al., 2019
"""
# Building table 2 from Humphreys et al 2019
elements = [
"Rb",
"Sr",
"Pb",
"Zr",
"Nb",
"La",
"Ce",
"Nd",
"Sm",
"Eu",
"Gd",
"Dy",
"Ho",
"Yb",
"Lu",
"Y",
]
constants = np.array(
[
9.1868,
3.41585,
-4.2533,
-25.6167,
-22.27,
-20.0493,
-21.1078,
-20.3082,
-11.3625,
-35.6604,
-19.0583,
-16.0687,
-20.4148,
-15.8659,
-19.3462,
-36.2514,
]
)
si = np.array(
[
-1.3898,
-0.75281,
0,
2.6183,
2.3241,
2.0732,
2.4749,
2.5162,
1.6002,
4.1452,
2.4417,
2.3858,
2.3654,
2.281,
2.1142,
3.6078,
]
)
al = np.array([0, 0, 2.715, 2.6867, 0, 0, 0, 0, 0, 2.6886, 0, 0, 0, 0, 0, 3.78])
ti = np.array(
[
-3.6797,
0,
1.69,
4.838,
3.7633,
2.5498,
2.4717,
2.5863,
0,
6.4057,
1.9786,
1.8255,
2.484,
1.5905,
2.8478,
7.513,
]
)
fe3 = np.array(
[
-1.5769,
0,
0.7065,
2.6591,
2.9786,
1.5317,
1.5722,
1.9459,
1.2898,
3.8508,
1.8765,
1.9741,
3.2601,
2.1534,
2.7011,
4.8366,
]
)
fe2 = np.array(
[
-0.6938,
0.36529,
0,
0.6536,
1.44,
1.117,
0.952,
0.9566,
1.2376,
0.7255,
0.9943,
0.6922,
1.2922,
0.7867,
1.0402,
0.814,
]
)
ca = np.array(
[
0,
0,
0,
2.5248,
1.8719,
2.2771,
1.5311,
1.2763,
0,
3.0679,
1.3577,
0,
3.1762,
0,
2.9625,
4.60,
]
)
naa = np.array(
[0, 0, -1.0433, 0, 0, -1.4576, 0, 0, 0, 0, 0, 0, -4.9224, 0, -3.2356, 0]
)
se = np.array(
[
0.29,
0.19,
0.23,
0.49,
0.45,
0.34,
0.32,
0.36,
0.43,
0.37,
0.4,
0.33,
0.4,
0.43,
0.39,
0.32,
]
)
columns = [
"element",
"constant",
"Si",
"Al_vi",
"Ti",
"Fe3",
"Fe2",
"Ca",
"Na_A",
"se",
]
aem_params = pd.DataFrame(
dict(
constant=constants,
Si=si,
Al_vi=al,
Ti=ti,
Fe3=fe3,
Fe2=fe2,
Ca=ca,
Na_a=naa,
SE=se,
),
index=elements,
)
if element in elements:
aem_kd = np.exp(
aem_params.loc[element].constant
+ (aem_params.loc[element].Si * amph_sites_ff["Si_T"])
+ (aem_params.loc[element].Al_vi * amph_sites_ff["Al_D"])
+ (aem_params.loc[element].Ti * amph_sites_ff["Ti_D"])
+ (aem_params.loc[element].Fe3 * amph_sites_ff["Fe3_D"])
+ (
aem_params.loc[element].Fe2
* (amph_sites_ff["Fe2_C"] + amph_sites_ff["Fe2_B"])
)
+ (aem_params.loc[element].Ca * amph_sites_ff["Ca_B"])
+ (aem_params.loc[element].Na_a * amph_sites_ff["Na_A"])
)
aem_kd_se = aem_params.loc[element].SE
else:
raise Exception(
"The element you have selected is not supported by this function. Please choose another one"
)
return aem_kd, aem_kd_se
# function to calculate zr saturation temperature
def t_zr_sat(M, zrmelt, model):
"""
t_zr_sat calculates the zircon saturation temperature using
the relationships found in both Watson and Harrison 1983 as
well as Boehnke et al., 2013
Inputs:
M = (Na + K + 2Ca)/(Al*Si) in normalized cation fraction
zrmelt = concentration of Zr in the melt
model = 'watson' or 'boehnke'. This will govern the equation used
to calculate zircon saturation temperature based on the equations
from watson and harrison 1983 or boehnke et al., 2013, respectively
Returns:
t = zircon saturation temperature for the chosen model
BOTH TEMPERATURES ARE IN DEGREES CELSIUS
"""
if model == "watson":
t = 12900 / (2.95 + 0.85 * M + np.log(496000 / zrmelt)) - 273.15
elif model == "boehnke":
t = 10108 / (0.32 + 1.16 * M + np.log(496000 / zrmelt)) - 273.15
return t
# titanium in quartz thermometry
def titaniq(Ti, P):
"""
titaniq calculates the quartz crystallization temperature based on a known
concentration of titanium in quartz and pressure of crystallization. This is
based on the work of Huang and Audetát (2012).
Inputs:
Ti = array-like concentration of Ti in quartz (ppm)
P = array-like pressure of crystallization (kbar)
Returns:
temp = array-like temperature of quartz crystallization (C)
"""
temp = ((-2794.3 - (660.53 * P ** 0.35)) / (np.log10(Ti) - 5.6459)) - 273.15
return temp
#%% melting and crystallization related functions
# partition coefficient
def kd(Cs, Cl):
"""
kd calculates a partition coefficient for a given set of measurements. For
igneous petrology, this is commonly the concentration of a trace element in
the mineral divided by the concentration of the same trace element in the
melt (e.g. Rollinson 1993 Eq. 4.3)
Inputs:
Cs = concnetration in the mineral
Cl = concentration in the melt
Returns:
kd = partition coefficient for the given input parameters
"""
kd = Cs / Cl
return kd
# Distribution coefficient
def bulk_kd(kds, f_s):
"""
bulk_kd generates a distribution coefficient that is the weighted sum of
partition coefficients for an element in a given mineral assemblage.
Based off Rollinson 1993 Eq. 4.5
Parameters
----------
kds : array-like
the individual partition coefficients of the mineral assemblage
f_s : array-like
the individual fractions of each mineral in the overall assemblage
between 0 and 1
Returns
-------
bulk_kd : the bulk partition coefficient for a given trace element for
the mineral assemblage
"""
D = np.sum(kds * f_s)
return D
# melting equations
def non_modal_batch_melt(Co, Do, F, P):
"""
non_modal_batch calculates the concentration of a given trace element in a melt produced from non modal
batch melting of a source rock as described by Shaw (1970) equation 15.
Inputs:
Co = Concentration of trace element in the original solid
Do = Bulk distribution coefficient for element when F = 0
F = Fraction of original solid melted (fraction of melt)
P = Bulk distribution coefficient of the melting mineral assemblage
Returns:
Cl = concentration in the newly formed liquid
Note: if Do and P are the same, then you effectively have modal batch melting
"""
Cl = Co * (1 / (F * (1 - P) + Do))
return Cl
def non_modal_frac_melt(Co, Do, F, P):
"""
non_modal_frac_melt calculates the composition of a trace element in a melt produced from non modal
fractional melting of a source rock as described by Rollinson 1993 Eq. 4.13 and 4.14.
Inputs:
Co = Concentration of trace element in the original solid
Do = Bulk distribution coefficient for element when F = 0
F = Fraction of original solid melted (fraction of melt)
P = Bulk distribution coefficient of melting mineral assemblage
Returns:
Cl = concentration in the extracted liquid. This is different from the
concentration of the instantaneous liquid.
Cs = concentration in the residual solid
Note: if Do and P are the same, then you effectively have modal fractional melting
"""
Cl = (Co / F) * (1 - (1 - F * (P / Do)) ** (1 / P))
return Cl
# dynamic melting
def non_modal_dynamic_melt(Co, Do, F, P, phi):
"""
non_modal_dynamic_melt calculates the concentration of a liquid extracted via
dynamic melting as described in McKenzie (1985) and Zou (2007) Eq. 3.18. This is
applicable for a sitiuation in which melt is in equilibrium when the fraction
is below a critical value and then fractional when it is above that value.
Parameters
----------
Co : array-like
Concentration of trace element in original solid
Do : array-like
Bulk distribution coefficient for element when F = 0
F : array-like
fraction of original solid melted (fraction of melt)
P : array-like
Bulk distribution coefficient of melting mineral assemblage
phi : array-like
critical mass porosity of residue
Returns
-------
Cl : array-like
Concentration of trace element in the liquid
"""
X = (F - phi) / (1 - phi)
Cl = (Co / X) * (
1
- (1 - ((X * (P + phi * (1 - P))) / (Do + phi * (1 - P))))
** (1 / (phi + (1 - phi) * P))
)
return Cl
# crystallization equations
def eq_xtl(
Cl, D, F,
):
"""
eq_xtl calculates the composition of a trace element in the remaining liquid after a certain amount of
crystallization has occured from a source melt when the crystal remeains in equilibrium with the melt
as described by White (2013) Chapter 7 eq. 7.81. It then calculates the concentration of that trace element
in a specific solid phase based on partition coefficient input.
Inputs:
Cl = concentration of trace element in original liquid
D = bulk distribution coefficient for trace element of crystallizing assemblage
F = fraction of melt remaining
Returns:
Cl_new = concentration of trace element in the remaining liquid
"""
Cl_new = Cl / (D + F * (1 - D))
return Cl_new
# fractional crystallization
def frac_xtl(
Cl, D, F,
):
"""
frac_xtl calculates the composition of a trace element in the remaining liquid after a certain amount of
crystallization has occured from a source melt when the crystal is removed from being in equilibrium with
the melt as described by White (2013) Chapter 7 eq. 7.82. It also calculates the
concentration of the trace element in the mean cumulate assemblage as described by Rollinson 1993 Eq. 4.20
Inputs:
Cl = concentration of trace element in original liquid
D = bulk distribution coefficient for trace element of crystallizing assemblage
F = fraction of melt remaining
Returns:
Cl_new = concentration of trace element in the remaining liquid
Cr = concentration in the cumulate
"""
Cl_new = Cl * (F) ** (D - 1)
Cr = Cl * ((1 - F ** D) / (1 - F))
return Cl_new, Cr
# in situ crystallization
def insitu_xtl(Cl, D, F, f, fa):
"""
insitu_xtl calculates the concentration of the remaining melt as described
in Langmuir (1989) and Rollinson 1993 Eq. 4.21 whereby crystallization
predominantly takes place at the sidewalls of a magma reservoir. Rather than
crystals being extracted from the liquid, liquid is extracted from a sidewall
'mush' in situ. The solidification zone progressively moves through the magma
chamber until crystallization is complete. In general this amounts in less
enrichment of incompatible elements and less depletion of compatible elements
than fractional crystallization
Parameters
----------
Cl : array-like
concentration of trace element in original liquid
D : array-like
bulk partition coefficient of crystallizing of crystallizing assemblage
F : array-like
fraction of melt remaining (between >0 and 1). If 0 is in this array,
error message will be thrown because python does not do division by 0
f : array-like
the fraction of interstitial liquid remaining after crystallization
within the solidification zone. It is assumed that some of this is
trapped in the cumulate (ft) and some is returned to the magma (fa).
therefore f = ft + fa
fa : fraction of interstitial liquid that returns to the magma.f = fa would
be an example where there is no interstital liquid in the crystallization
front
Returns
-------
Cl_new : array like
concentration of extracted liquid from crystallization front
"""
E = 1.0 / (D * (1.0 - f) + f)
Cl_new = Cl * (F ** ((fa * (E - 1)) / (fa - 1)))
return Cl_new
def fraclin_xtl(Cl, a, b, F):
"""
fraclin_xtl calculates the composition of the liquid remaining after it
has experienced fractional crystallization where the distribution coefficient
varies linearly with melt fraction. This was originally described by
Greenland 1970.
Parameters
----------
Cl : array-like
concentration of the trace element in the original liquid
a : array-like
intercept of the relationship describing the linear change in D with melt
fraction
b : array-like
slope of the relationship describing the linear change in D with
melt fraction
F : array-like
fraction of melt remaining (between 0 and 1).
Returns
-------
Cl_new : TYPE
DESCRIPTION.
"""
Cl_new = Cl * np.exp((a - 1) * np.log(F) + b * (F - 1))
return Cl_new
#%% General mineral recalculation.
def mineral_formula_calc(df, n_oxygens, mineral, normalized,index):
"""
mineral_formula_calc is a function that calculates the stoichiometry for a mineral based on a set of major
element oxide analyses as described by Deer et al., 1966 Appendix 1
Inputs:
df : pandas dataframe object of major element analyses. Column headers must have the the element somewhere in the name
** if a column containing 'Total' in the name exists, it will be removed so that only the individual analyses are
present
** your dataframe should have a column that pertains to sample, analysis number, etc. This will be set as the index
of the dataframe so that chemical formulas can be accessed easily upon calculation
EXAMPLE OF INPUT DATAFRAME:
|sample|SiO2|TiO2|Al2O3|Cr2O3|FeO|BaO|SrO|MnO|CaO|Na2O|K2O|NiO|Total| <---- currently supported elements
n_oxygens : number of ideal oxygens in the chemical formula (e.g., for feldspars this would be 8)
mineral : 'feldspar','olivine','pyroxene'
if 'pyroxene' is chosen, the function will calculate the proportions of Fe2+ and Fe3+ based off stoichiometry and charge
balance as described by Droop 1987. If 'feldspar', all Fe is assumed to be Fe3+. If 'olivine', all Fe is assumed to be 2+
normalized: boolean
if True, will normalize your geochemical analyses. If false, mineral formulas will be calculated using
raw geochemical data
index: string
column denoting which column to be used as the index for the dataframe. Suggested that this is a column that
denotes sample name or spot name or something similar
Returns:
norm_cations: pandas dataframe object that contains the calculated number of cations in the chemical formula
normalized to the amount of ideal oxygens specified by 'n_oxygens'.
"""
data = df.copy()
data.set_index(index,inplace = True)
data.fillna(0, inplace=True)
# if index is not None:
# data.set_index(index,inplace = True)
# else:
# data.index = np
# Removes the 'total column' from the list
columns = list(data.columns)
elements = []
for column in columns:
if "Total" in column:
columns.remove(column)
# can make this a delimeter variable for the user to choose from
# dropping anything after the underscore
for column in columns:
if "Si" in column:
elements.append(column.split("_")[0])
if "Ti" in column:
elements.append(column.split("_")[0])
if "Al" in column:
elements.append(column.split("_")[0])
if "Cr" in column:
elements.append(column.split("_")[0])
if "Fe" in column:
elements.append(column.split("_")[0])
if "Ba" in column:
elements.append(column.split("_")[0])
if "Sr" in column:
elements.append(column.split("_")[0])
if "Mn" in column:
elements.append(column.split("_")[0])
if "Mg" in column:
elements.append(column.split("_")[0])
if "Na" in column:
elements.append(column.split("_")[0])
if "K" in column:
elements.append(column.split("_")[0])
if "Ca" in column:
elements.append(column.split("_")[0])
if "Ni" in column:
elements.append(column.split("_")[0])
if "Cl" in column:
elements.append(column.split("_")[0])
if "P2O5" in column:
elements.append(column.split("_")[0])
# create new dataframe that is just the analyses without the total
oxides = data.loc[:, columns]
oxides.columns = elements
if normalized == True:
# normalize the wt%
oxides_normalized = 100 * (oxides.div(oxides.sum(axis="columns"), axis="rows"))
elif normalized == False:
oxides_normalized = oxides.copy()
# create an array filled with zeros such that it is the same shape of our input
# data
mol_cations = np.zeros(oxides_normalized.shape)
# these loops are saying that: for each element in my list of elements (e.g., columns)
# check to see if the given string (e.g., Si) is in it. If it is, then populate that column
# of the array with the appropriate math
# Here we call on the mendeleev package module 'element' to get the mass from a given element
# e.g.(el(element).mass)
for i, element in zip(range(len(elements)), elements):
if "Si" in element:
mol_cations[:, i] = oxides_normalized[element] / (28.09 + (16 * 2))
elif "Ti" in element:
mol_cations[:, i] = oxides_normalized[element] / (47.87 + (16 * 2))
elif "Al" in element:
mol_cations[:, i] = (2 * oxides_normalized[element]) / (
(26.98 * 2) + (16 * 3)
)
elif "Cr" in element:
mol_cations[:, i] = (2 * oxides_normalized[element]) / ((52 * 2) + (16 * 3))
elif "Fe" in element:
mol_cations[:, i] = oxides_normalized[element] / (55.85 + 16)
elif "Ba" in element:
mol_cations[:, i] = oxides_normalized[element] / (137.33 + 16)
elif "Sr" in element:
mol_cations[:, i] = oxides_normalized[element] / (87.62 + 16)
elif "Mn" in element:
mol_cations[:, i] = oxides_normalized[element] / (54.94 + 16)
elif "Mg" in element:
mol_cations[:, i] = oxides_normalized[element] / (24.31 + 16)
elif "Ca" in element:
mol_cations[:, i] = oxides_normalized[element] / (40.08 + 16)
elif "Na" in element:
mol_cations[:, i] = (2 * oxides_normalized[element]) / ((23 * 2) + 16)
elif "K" in element:
mol_cations[:, i] = (2 * oxides_normalized[element]) / ((39.1 * 2) + 16)
elif "Ni" in element:
mol_cations[:, i] = oxides_normalized[element] / (58.69 + 16)
mol_cations = pd.DataFrame(mol_cations, columns=elements)
# Calculating the number of oxygens per cation in the formula
mol_oxygens = np.zeros(mol_cations.shape)
for i, element in zip(range(len(elements)), elements):
if "Si" in element:
mol_oxygens[:, i] = mol_cations[element] * 2
elif "Ti" in element:
mol_oxygens[:, i] = mol_cations[element] * 2
elif "Al" in element:
mol_oxygens[:, i] = mol_cations[element] * (3 / 2)
elif "Cr" in element:
mol_oxygens[:, i] = mol_cations[element] * (3 / 2)
elif "Fe" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Ba" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Sr" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Mn" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Mg" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Ca" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
elif "Na" in element:
mol_oxygens[:, i] = mol_cations[element] * (1 / 2)
elif "K" in element:
mol_oxygens[:, i] = mol_cations[element] * (1 / 2)
elif "Ni" in element:
mol_oxygens[:, i] = mol_cations[element] * 1
mol_oxygens = pd.DataFrame(mol_oxygens, columns=elements)
# number of oxygens per cation, normalized to the ideal number of oxygens specified above
norm_oxygens = (mol_oxygens * n_oxygens).div(
mol_oxygens.sum(axis="columns"), axis="rows"
)
# calculate the mole cations of each oxide normalized to the number of ideal oxygens
norm_cations = np.zeros(norm_oxygens.shape)
for i, element in zip(range(len(elements)), elements):
if "Si" in element:
norm_cations[:, i] = norm_oxygens[element] / 2
elif "Ti" in element:
norm_cations[:, i] = norm_oxygens[element] / 2
elif "Al" in element:
norm_cations[:, i] = norm_oxygens[element] / (3 / 2)
elif "Cr" in element:
norm_cations[:, i] = norm_oxygens[element] / (3 / 2)
elif "Fe" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Ba" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Sr" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Mn" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Mg" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Ca" in element:
norm_cations[:, i] = norm_oxygens[element]
elif "Na" in element:
norm_cations[:, i] = norm_oxygens[element] / (1 / 2)
elif "K" in element:
norm_cations[:, i] = norm_oxygens[element] / (1 / 2)
elif "Ni" in element:
norm_cations[:, i] = norm_oxygens[element]
cations = []
# Get the cations by taking the first two characters
[cations.append(element[:2]) for element in elements]
# since some elements are only one letter (e.g., K) this
# strips the number from it
r = re.compile("([a-zA-Z]+)([0-9]+)")
for i in range(len(cations)):
m = r.match(cations[i])
if m != None:
cations[i] = m.group(1)
norm_cations = pd.DataFrame(norm_cations,columns = cations)
norm_cations['Total_cations'] = norm_cations.sum(axis = 'columns')
norm_cations[data.index.name] = data.index.tolist()
if mineral == "pyroxene":
# ideal cations
T = 4
# calculated cations based on oxide measurements
# S = norm_cations['Total_cations']
# step 2 and 3 from Droop 1987
norm_cations.loc[norm_cations["Total_cations"] > T, "Fe_3"] = (
2 * n_oxygens * (1 - (T / norm_cations["Total_cations"]))
)
norm_cations.loc[norm_cations["Total_cations"] <= T, "Fe_3"] = 0
# step 4 from Droop 1987
norm_cations.set_index(data.index.name, inplace=True)
ts = T / norm_cations["Total_cations"].to_numpy()
norm_cations = norm_cations * ts[:, np.newaxis]
norm_cations["Fe_2"] = norm_cations["Fe"] - norm_cations["Fe_3"]
else:
norm_cations.set_index(data.index.name, inplace=True)
return norm_cations
def hb_plag_amph_temp(plag_cations, amp_sites_fe, P, thermometer):
"""
hb_plag_amph_temp uses the Holland and Blundy (1994) equations to calculate
temperatures of formation for plagioclase - amphibole pairs.
Thermometer A: for use in assemblages where plagiocalse and amphibole are
co crystallizing with quartz
Thermometer B: for use in assemblages where plagioclase and amphibole are
crystallizing without quartz
Inputs:
plag_cations : pandas DataFrame
a dataframe consisting of plagioclase cation values.
amp_sites_fe : pandas DataFrame
a dataframe consiting of ideal site assignments that includes ferric and ferrous
iron. This is the output from the "get_amp_sites_ferric_ferrous" function and does
not need any tweaking
P : scalar
Pressure of formation in kbar
thermometer : string
Which thermometer you would like to use: Either "A" or "B"
Returns:
T_df: pandas DataFrame
dataframe of temperature calculation results for each grain in the input dataframes.
Where there are multiple analyses per phase per grain, every possible temperature
will be calculated (e.g., 4 amphibole analyes and 3 plag analyses per grain/sample
would yield 12 temperatures)
"""
# thermodynamic parameters
R = 0.0083144 # kJ/K
Ab = (
plag_cations["Na"]
/ (plag_cations["Na"] + plag_cations["Ca"] + plag_cations["K"])
).to_numpy()
An = (
plag_cations["Ca"]
/ (plag_cations["Na"] + plag_cations["Ca"] + plag_cations["K"])
).to_numpy()
plag_cations["An"] = An
plag_cations["Ab"] = Ab
# Calculating Yab for Thermometer A
Y = np.empty(An.shape)
# Yab-an parameters for each thermometer
for i in range(len(An)):
# Calculating Yab for Thermometer A
if thermometer == "A":
if Ab[i] > 0.5:
Y[i] = 0
else:
Y[i] = 12.01 * (1 - Ab[i]) ** 2 - 3
elif thermometer == "B":
# Calculating Yab-an for Thermometer B
if Ab[i] > 0.5:
Y[i] = 3
else:
Y[i] = 12.0 * (2 * Ab[i] - 1) + 3
else:
raise Exception(
'This alphabet is only two letters long. Please choose "A" or "B" for your thermometer'
)
plag_cations["Y"] = Y
# cummingtonite substitution
cm = (
amp_sites_fe["Si_T"]
+ (amp_sites_fe["Al_T"] + amp_sites_fe["Al_D"])
+ amp_sites_fe["Ti_D"]
+ amp_sites_fe["Fe3_D"]
+ (amp_sites_fe["Fe2_C"] + amp_sites_fe["Fe2_B"])
+ (amp_sites_fe["Mg_D"] + amp_sites_fe["Mg_C"])
+ amp_sites_fe["Mn_C"]
+ amp_sites_fe["Mn_B"]
- 13
)
# site terms for the thermometer
Si_T1 = (amp_sites_fe["Si_T"] - 4) / 4
Al_T1 = (8 - amp_sites_fe["Si_T"]) / 4
Al_M2 = (amp_sites_fe["Al_T"] + amp_sites_fe["Al_D"] + amp_sites_fe["Si_T"] - 8) / 2
K_A = amp_sites_fe["K_A"]
box_A = (
3
- amp_sites_fe["Ca_B"]
- (amp_sites_fe["Na_B"] + amp_sites_fe["Na_A"])
- amp_sites_fe["K_A"]
- cm
)
Na_A = amp_sites_fe["Ca_B"] + amp_sites_fe["Na_B"] + amp_sites_fe["Na_A"] + cm - 2
Na_M4 = (2 - amp_sites_fe["Ca_B"] - cm) / 2
Ca_M4 = amp_sites_fe["Ca_B"] / 2
hbl_plag_params = pd.DataFrame(
{
"Si_T1": Si_T1,
"Al_T1": Al_T1,
"Al_M2": Al_M2,
"K_A": K_A,
"box_A": box_A,
"Na_A": Na_A,
"Na_M4": Na_M4,
"Ca_M4": Ca_M4,
}
)
# put the index back in for the sample labels
hbl_plag_params.index = amp_sites_fe.index
# checks for the same unique index names in your plag and amphibole dataframes
sameset = set(hbl_plag_params.index.unique().to_list())
samegrains = list(sameset.intersection(plag_cations.index.unique().to_list()))
# empty list to fill with individual temperature dataframes
T_df_list = []
for grain in samegrains:
# this extracts each individual grain from the respective dataframes
# for plag and amphibole data
amp = hbl_plag_params.loc[grain, :]
plag = plag_cations.loc[grain, :]
# This conditional checks how many plag analyses there are for a given grain.
# if there is more than one it will follow the first option and use array broadcasting
# to calculate every possible temperature for a given amphibole - plagioclase pair.
# e.g. if you have two plag analyses and 4 amphibole analyses you will get 8 total temperatures
if len(amp.shape) == 2:
if len(plag.shape) == 2:
if thermometer == "A":
# numerator for thermometer A
top = (
-76.95
+ (0.79 * P)
+ plag["Y"].to_numpy()[:, np.newaxis]
+ 39.4 * amp["Na_A"].to_numpy()
+ 22.4 * amp["K_A"].to_numpy()
+ (41.5 - 2.89 * P) * amp["Al_M2"].to_numpy()
)
# denominator for thermometer A
bottom = -0.0650 - R * np.log(
(
27
* amp["box_A"].to_numpy()
* amp["Si_T1"].to_numpy()
* plag["Ab"].to_numpy()[:, np.newaxis]
)
/ (256 * amp["Na_A"].to_numpy() * amp["Al_T1"].to_numpy())
)
# final thermometer A
T = (top / bottom) - 273.15
elif thermometer == "B":
# thermometer B whole thing
T = (
(
78.44
+ plag["Y"].to_numpy()[:, np.newaxis]
- 33.6 * amp["Na_M4"].to_numpy()
- (66.8 - 2.92 * P) * amp["Al_M2"].to_numpy()
+ 78.5 * amp["Al_T1"].to_numpy()
+ 9.4 * amp["Na_A"].to_numpy()
)
/ (
0.0721
- R
* np.log(
(
27
* amp["Na_M4"].to_numpy()
* amp["Si_T1"].to_numpy()
* plag["An"].to_numpy()[:, np.newaxis]
)
/ (
64
* amp["Ca_M4"].to_numpy()
* amp["Al_T1"].to_numpy()
* plag["Ab"].to_numpy()[:, np.newaxis]
)
)
)
) - 273.15
# making the temperatures for a given grain dataframe for ease of use later on
T_df_list.append(
pd.DataFrame({"grain": grain, "T": np.concatenate(T, axis=None),})
)
# This is triggered if there is only one plag analysis per amphibole. In this case
# we don't need array broadcasting because the plag An/Ab variables are scalars. All the
# equations are the same as above
else:
if thermometer == "A":
top = (
-76.95
+ (0.79 * P)
+ plag["Y"]
+ 39.4 * amp["Na_A"].to_numpy()
+ 22.4 * amp["K_A"].to_numpy()
+ (41.5 - 2.89 * P) * amp["Al_M2"].to_numpy()
)
bottom = -0.0650 - R * np.log(
(
27
* amp["box_A"].to_numpy()
* amp["Si_T1"].to_numpy()
* plag["Ab"]
)
/ (256 * amp["Na_A"].to_numpy() * amp["Al_T1"].to_numpy())
)
T = (top / bottom) - 273.15
elif thermometer == "B":
T = (
(
78.44
+ plag["Y"]
- 33.6 * amp["Na_M4"].to_numpy()
- (66.8 - 2.92 * P) * amp["Al_M2"].to_numpy()
+ 78.5 * amp["Al_T1"].to_numpy()
+ 9.4 * amp["Na_A"].to_numpy()
)
/ (
0.0721
- R
* np.log(
(
27
* amp["Na_M4"].to_numpy()
* amp["Si_T1"].to_numpy()
* plag["An"]
)
/ (
64
* amp["Ca_M4"].to_numpy()
* amp["Al_T1"].to_numpy()
* plag["Ab"]
)
)
)
) - 273.15
T_df_list.append(
pd.DataFrame({"grain": grain, "T": np.concatenate(T, axis=None),})
)
# This is triggered if there is only one amphibole analysis per plag. In this case
# we don't need array broadcasting or the .to_numpy() function because the amphibole
# values are already scalars
else:
if len(plag.shape) == 2:
if thermometer == "A":
# numerator for thermometer A
top = (
-76.95
+ (0.79 * P)
+ plag["Y"][:, np.newaxis]
+ 39.4 * amp["Na_A"]
+ 22.4 * amp["K_A"]
+ (41.5 - 2.89 * P) * amp["Al_M2"]
)
# denominator for thermometer A
bottom = -0.0650 - R * np.log(
(27 * amp["box_A"] * amp["Si_T1"] * plag["Ab"][:, np.newaxis])
/ (256 * amp["Na_A"].to_numpy() * amp["Al_T1"])
)
# final thermometer A
T = (top / bottom) - 273.15
elif thermometer == "B":
# thermometer B whole thing
T = (
(
78.44
+ plag["Y"][:, np.newaxis]
- 33.6 * amp["Na_M4"]
- (66.8 - 2.92 * P) * amp["Al_M2"]
+ 78.5 * amp["Al_T1"]
+ 9.4 * amp["Na_A"]
)
/ (
0.0721
- R
* np.log(
(
27
* amp["Na_M4"]
* amp["Si_T1"]
* plag["An"][:, np.newaxis]
)
/ (
64
* amp["Ca_M4"]
* amp["Al_T1"]
* plag["Ab"][:, np.newaxis]
)
)
)
) - 273.15
# making the temperatures for a given grain dataframe for ease of use later on
T_df_list.append(
pd.DataFrame({"grain": grain, "T": np.concatenate(T, axis=None),})
)
# This is triggered if there is only one plag analysis per amphibole. In this case
# we don't need array broadcasting because the plag An/Ab variables are scalars. All the
# equations are the same as above
else:
if thermometer == "A":
top = (
-76.95
+ (0.79 * P)
+ plag["Y"]
+ 39.4 * amp["Na_A"]
+ 22.4 * amp["K_A"]
+ (41.5 - 2.89 * P) * amp["Al_M2"]
)
bottom = -0.0650 - R * np.log(
(27 * amp["box_A"] * amp["Si_T1"] * plag["Ab"])
/ (256 * amp["Na_A"] * amp["Al_T1"])
)
T = (top / bottom) - 273.15
elif thermometer == "B":
T = (
(
78.44
+ plag["Y"]
- 33.6 * amp["Na_M4"]
- (66.8 - 2.92 * P) * amp["Al_M2"]
+ 78.5 * amp["Al_T1"]
+ 9.4 * amp["Na_A"]
)
/ (
0.0721
- R
* np.log(
(27 * amp["Na_M4"] * amp["Si_T1"] * plag["An"])
/ (64 * amp["Ca_M4"] * amp["Al_T1"] * plag["Ab"])
)
)
) - 273.15
T_df_list.append(pd.DataFrame({"grain": grain, "T_calc": [T],},))
# overall temperature dataframe for every grain
T_df = pd.concat(T_df_list)
return T_df
#%% Cation fraction calculation for barometry based on Putirka 2008
``` |
{
"source": "jlubcke/django-pycharm-breakpoint",
"score": 2
} |
#### File: django-pycharm-breakpoint/django_pycharm_breakpoint/django_app.py
```python
import sys
import threading
from django.apps import AppConfig
from django.conf import settings
from django.core.handlers import exception
class DjangoPycharmBreakpointConfig(AppConfig):
name = 'django_pycharm_breakpoint'
verbose_name = 'Django PyCharm breakpoint'
def ready(self):
if not settings.DEBUG:
return
original_response_for_exception = exception.response_for_exception
def monkey_patched_response_for_exception(request, exc):
breakpoint_on_exception()
return original_response_for_exception(request, exc)
exception.response_for_exception = monkey_patched_response_for_exception
try:
import rest_framework
from rest_framework.views import APIView
except ImportError:
pass
else:
APIView.original_handle_exception = APIView.handle_exception
def monkey_patched_handle_exception(self, exc):
breakpoint_on_exception()
return self.original_handle_exception(exc)
APIView.handle_exception = monkey_patched_handle_exception
def breakpoint_on_exception():
try:
import pydevd
from pydevd import pydevd_tracing
except ImportError:
pass
else:
exctype, value, traceback = sys.exc_info()
frames = []
while traceback:
frames.append(traceback.tb_frame)
traceback = traceback.tb_next
thread = threading.current_thread()
frames_by_id = dict([(id(frame), frame) for frame in frames])
frame = frames[-1]
if hasattr(thread, "additional_info"):
thread.additional_info.pydev_message = "Uncaught exception"
try:
debugger = pydevd.debugger
except AttributeError:
debugger = pydevd.get_global_debugger()
pydevd_tracing.SetTrace(None) # no tracing from here
debugger.stop_on_unhandled_exception(thread, frame, frames_by_id, (exctype, value, traceback))
``` |
{
"source": "jlubken/cfgenvy",
"score": 3
} |
#### File: cfgenvy/test/test_cfgenvy.py
```python
from io import StringIO
from typing import Optional
from pytest import mark
from cfgenvy import Parser, yaml_dumps, yaml_type
class Service(Parser):
"""Service."""
YAML = "!test"
@classmethod
def _yaml_init(cls, loader, node):
"""Yaml init."""
return cls(**loader.construct_mapping(node, deep=True))
@classmethod
def _yaml_repr(cls, dumper, self, *, tag: str):
"""Yaml repr."""
return dumper.represent_mapping(tag, self.as_yaml())
@classmethod
def as_yaml_type(cls, tag: Optional[str] = None):
"""As yaml type."""
yaml_type(
cls,
tag or cls.YAML,
init=cls._yaml_init,
repr=cls._yaml_repr,
)
@classmethod
def yaml_types(cls):
"""Yaml types."""
cls.as_yaml_type()
def __init__(self, password, username):
"""__init__."""
self.password = password
self.username = username
def as_yaml(self):
"""As yaml."""
return {
"password": <PASSWORD>,
"username": self.username,
}
CONFIG_FILE = "./test/test.yaml"
ENV_FILE = "./test/test.env"
CONFIGS = """
!test
password: ${PASSWORD}
username: ${USERNAME}
""".strip()
ENVS = """
PASSWORD=password
USERNAME=username
""".strip()
EXPECTED = """
!test
password: password
username: username
""".strip()
def build(expected=EXPECTED):
"""Build."""
Service.as_yaml_type()
return (
Service,
{
"password": "password",
"username": "username",
},
expected,
)
def deserialize_args(
config_file=CONFIG_FILE,
env_file=ENV_FILE,
expected=EXPECTED,
):
"""Deserialize override env."""
return (
Service.parse,
{
"argv": ["-c", config_file, "-e", env_file],
"env": {
"PASSWORD": "nope",
"USERNAME": "nope",
},
},
expected,
)
def deserialize_env(
config_file=CONFIG_FILE,
expected=EXPECTED,
):
"""Deserialize env."""
return (
Service.parse,
{
"env": {
"CONFIG": config_file,
"PASSWORD": "password",
"USERNAME": "username",
}
},
expected,
)
def deserialize_file(
config_file=CONFIG_FILE,
env_file=ENV_FILE,
expected=EXPECTED,
):
"""Deserialize file."""
return (
Service.load,
{
"config_file": config_file,
"env_file": env_file,
},
expected,
)
def deserialize_streams(
configs=CONFIGS,
envs=ENVS,
expected=EXPECTED,
):
"""Deserialize string."""
return (
Service.loads,
{
"configs": StringIO(configs),
"envs": StringIO(envs),
},
expected,
)
@mark.parametrize(
"cls,kwargs,expected",
(
build(),
deserialize_streams(),
deserialize_file(),
deserialize_env(),
deserialize_args(),
),
)
def test_product(cls, kwargs, expected):
"""Test product."""
product = cls(**kwargs)
actual = yaml_dumps(product).strip()
assert actual == expected
``` |
{
"source": "jlubken/dsdk",
"score": 2
} |
#### File: src/dsdk/asset.py
```python
from __future__ import annotations
from argparse import Namespace
from logging import getLogger
from os import listdir
from os.path import isdir
from os.path import join as joinpath
from os.path import splitext
from typing import Any, Dict, Optional
from cfgenvy import yaml_type
logger = getLogger(__name__)
class Asset(Namespace):
"""Asset."""
YAML = "!asset"
@classmethod
def as_yaml_type(cls, tag: Optional[str] = None):
"""As yaml type."""
yaml_type(
cls,
tag or cls.YAML,
init=cls._yaml_init,
repr=cls._yaml_repr,
)
@classmethod
def build(cls, *, path: str, ext: str):
"""Build."""
kwargs = {}
for name in listdir(path):
if name[0] == ".":
continue
child = joinpath(path, name)
if isdir(child):
kwargs[name] = cls.build(path=child, ext=ext)
continue
s_name, s_ext = splitext(name)
if s_ext != ext:
continue
with open(child, encoding="utf-8") as fin:
kwargs[s_name] = fin.read()
return cls(path=path, ext=ext, **kwargs)
@classmethod
def _yaml_init(cls, loader, node):
"""Yaml init."""
return cls.build(**loader.construct_mapping(node, deep=True))
@classmethod
def _yaml_repr(cls, dumper, self, *, tag: str):
"""Yaml repr."""
return dumper.represent_mapping(tag, self.as_yaml())
def __init__(
self,
*,
path: str,
ext: str,
**kwargs: Asset,
):
"""__init__."""
self._path = path
self._ext = ext
super().__init__(**kwargs)
def as_yaml(self) -> Dict[str, Any]:
"""As yaml."""
return {
"ext": self._ext,
"path": self._path,
}
def __call__(self, *args):
"""__call__.
Yield (path, values).
"""
for key, value in vars(self).items():
if key.startswith("_"):
continue
if value.__class__ == Asset:
yield from value(*args, key)
continue
yield ".".join((*args, key)), value
```
#### File: src/dsdk/profile.py
```python
from contextlib import contextmanager
from logging import getLogger
from time import perf_counter_ns
from typing import Any, Dict, Generator, Optional
from cfgenvy import yaml_type
logger = getLogger(__name__)
class Profile:
"""Profile."""
YAML = "!profile"
@classmethod
def as_yaml_type(cls, tag: Optional[str] = None):
"""As yaml type."""
yaml_type(
cls,
tag or cls.YAML,
init=cls._yaml_init,
repr=cls._yaml_repr,
)
@classmethod
def _yaml_init(cls, loader, node):
"""Yaml init."""
return cls(**loader.construct_mapping(node, deep=True))
@classmethod
def _yaml_repr(cls, dumper, self, *, tag: str):
"""Yaml repr."""
return dumper.represent_mapping(tag, self.as_yaml())
def __init__(self, on: int, end: Optional[int] = None):
"""__init__."""
self.end = end
self.on = on
def as_yaml(self) -> Dict[str, Any]:
"""As yaml."""
return {"end": self.end, "on": self.on}
def __repr__(self):
"""__repr__."""
return f"Profile(end={self.end}, on={self.on})"
def __str__(self):
"""__str__."""
return str(
{
"end": self.end,
"on": self.on,
}
)
@contextmanager
def profile(key: str) -> Generator[Profile, None, None]:
"""Profile."""
# Replace return type with ContextManager[Profile] when mypy is fixed.
i = Profile(perf_counter_ns())
logger.info('{"key": "%s.on", "ns": "%s"}', key, i.on)
try:
yield i
finally:
i.end = perf_counter_ns()
logger.info(
'{"key": "%s.end", "ns": "%s", "elapsed": "%s"}',
key,
i.end,
i.end - i.on,
)
```
#### File: src/dsdk/service.py
```python
from __future__ import annotations
import pickle
from collections import OrderedDict
from contextlib import contextmanager
from datetime import date, datetime, tzinfo
from json import dumps
from logging import getLogger
from typing import (
Any,
Callable,
Dict,
Generator,
Mapping,
Optional,
Sequence,
)
from cfgenvy import Parser, YamlMapping
from numpy import allclose
from pandas import DataFrame
from pkg_resources import DistributionNotFound, get_distribution
from .asset import Asset
from .interval import Interval
from .utils import configure_logger, get_tzinfo, now_utc_datetime
try:
__version__ = get_distribution("dsdk").version
except DistributionNotFound:
# package is not installed
pass
logger = getLogger(__name__)
class Delegate:
"""Delegate."""
def __init__(self, parent: Any):
"""__init__."""
self.parent = parent
@property
def duration(self) -> Interval:
"""Return duration."""
return self.parent.duration
@duration.setter
def duration(self, value: Interval):
"""Duration setter."""
self.parent.duration = value
@property
def as_of_local_datetime(self) -> datetime:
"""Return as of local datetime."""
return self.parent.as_of_local_datetime
@property
def as_of(self) -> datetime:
"""Return as of utc datetime."""
return self.parent.as_of
@as_of.setter
def as_of(self, value: datetime):
"""Set as_of."""
self.parent.as_of = value
@property
def as_of_local_date(self) -> date:
"""Return as of local date."""
return self.parent.as_of_local_date
@property
def evidence(self) -> Evidence:
"""Return evidence."""
return self.parent.evidence
@property
def id(self) -> int:
"""Return id."""
return self.parent.id
@id.setter
def id(self, value: int):
"""Set id."""
self.parent.id = value
@property
def predictions(self) -> DataFrame:
"""Return predictions."""
return self.parent.predictions
@predictions.setter
def predictions(self, value: DataFrame) -> None:
"""Predictions setter."""
self.parent.predictions = value
@property
def time_zone(self) -> str:
"""Return time zone."""
return self.parent.time_zone
@time_zone.setter
def time_zone(self, value: str):
"""Time zone setter."""
self.parent.time_zone = value
@property
def tz_info(self) -> tzinfo:
"""Return tzinfo."""
return self.parent.tz_info
def as_insert_sql(self) -> Dict[str, Any]:
"""As insert sql."""
return self.parent.as_insert_sql()
class Batch:
"""Batch."""
def __init__( # pylint: disable=too-many-arguments
self,
as_of: Optional[datetime],
duration: Optional[Interval],
time_zone: Optional[str],
microservice_version: str,
) -> None:
"""__init__."""
self.id: Optional[int] = None
self.as_of = as_of
self.duration = duration
self.evidence = Evidence()
self.time_zone = time_zone
self.microservice_version = microservice_version
self.predictions: Optional[DataFrame] = None
@property
def as_of_local_datetime(self) -> datetime:
"""Return as of local datetime."""
assert self.as_of is not None
assert self.tz_info is not None
return self.as_of.astimezone(self.tz_info)
@property
def as_of_local_date(self) -> date:
"""Return as of local date."""
return self.as_of_local_datetime.date()
@property
def tz_info(self) -> tzinfo:
"""Return tz_info."""
assert self.time_zone is not None
return get_tzinfo(self.time_zone)
@property
def parent(self) -> Any:
"""Return parent."""
raise ValueError()
def as_insert_sql(self) -> Dict[str, Any]:
"""As insert sql."""
# duration comes from the database clock.
return {
"as_of": self.as_of,
"microservice_version": self.microservice_version,
"time_zone": self.time_zone,
}
class Evidence(OrderedDict):
"""Evidence."""
def __setitem__(self, key, value):
"""__setitem__."""
if key in self:
raise KeyError(f"{key} has already been set")
super().__setitem__(key, value)
class Service( # pylint: disable=too-many-instance-attributes
Parser,
YamlMapping,
):
"""Service."""
ON = dumps({"key": "%s.on"})
END = dumps({"key": "%s.end"})
BATCH_OPEN = dumps({"key": "batch.open", "as_of": "%s", "time_zone": "%s"})
BATCH_CLOSE = dumps({"key": "batch.close"})
TASK_ON = dumps({"key": "task.on", "task": "%s"})
TASK_END = dumps({"key": "task.end", "task": "%s"})
PIPELINE_ON = dumps({"key": "pipeline.on", "pipeline": "%s"})
PIPELINE_END = dumps({"key": "pipeline.end", "pipeline": "%s"})
COUNT = dumps(
{"key": "validate.count", "scores": "%s", "test": "%s", "status": "%s"}
)
MATCH = dumps({"key": "validate.match", "status": "%s"})
YAML = "!baseservice"
VERSION = __version__
@classmethod
def yaml_types(cls):
"""Yaml types."""
Asset.as_yaml_type()
Interval.as_yaml_type()
cls.as_yaml_type()
@classmethod
@contextmanager
def context(
cls,
key: str,
argv: Optional[Sequence[str]] = None,
env: Optional[Mapping[str, str]] = None,
):
"""Context."""
configure_logger("dsdk")
logger.info(cls.ON, key)
try:
yield cls.parse(argv=argv, env=env)
except BaseException as e:
logger.error(e)
raise
logger.info(cls.END, key)
@classmethod
def create_gold(cls):
"""Create gold."""
with cls.context("create_gold") as service:
service.on_create_gold()
@classmethod
def main(cls):
"""Main."""
with cls.context("main") as service:
service()
@classmethod
def validate_gold(cls):
"""Validate gold."""
with cls.context("validate_gold") as service:
service.on_validate_gold()
def __init__(
self,
*,
pipeline: Sequence[Task],
as_of: Optional[datetime] = None,
gold: Optional[str] = None,
time_zone: Optional[str] = None,
batch_cls: Callable = Batch,
) -> None:
"""__init__."""
self.gold = gold
self.pipeline = pipeline
self.duration: Optional[Interval] = None
self.as_of = as_of
self.time_zone = time_zone
self.batch_cls = batch_cls
def __call__(self) -> Batch:
"""Run."""
with self.open_batch() as batch:
# if one of the mixins didn't set these properties...
if batch.as_of is None:
batch.as_of = now_utc_datetime()
if batch.time_zone is None:
batch.time_zone = "America/New_York"
if batch.duration is None:
batch.duration = Interval(
on=batch.as_of,
end=None,
)
logger.info(self.PIPELINE_ON, self.__class__.__name__)
for task in self.pipeline:
logger.info(self.TASK_ON, task.__class__.__name__)
task(batch, self)
logger.info(self.TASK_END, task.__class__.__name__)
logger.info(self.PIPELINE_END, self.__class__.__name__)
# if one of the mixins did not set this property...
if batch.duration.end is None:
batch.duration.end = now_utc_datetime()
return batch
@property
def tz_info(self) -> tzinfo:
"""Return tz_info."""
assert self.time_zone is not None
return get_tzinfo(self.time_zone)
def dependency(self, key, cls, kwargs):
"""Dependency."""
dependency = getattr(self, key)
if dependency is not None:
return
logger.debug(
"Injecting dependency: %s, %s, %s",
key,
cls.__name__,
kwargs.keys(),
)
dependency = cls(**kwargs)
setattr(self, key, dependency)
def as_yaml(self) -> Dict[str, Any]:
"""As yaml."""
return {
"as_of": self.as_of,
"duration": self.duration,
"gold": self.gold,
"time_zone": self.time_zone,
}
def on_create_gold(self) -> Batch:
"""On create gold."""
path = self.gold
assert path is not None
run = self()
scores = self.scores(run.id)
n_scores = scores.shape[0]
logger.info("Write %s scores to %s", n_scores, path)
# pylint: disable=no-member
model_version = self.model.version # type: ignore[attr-defined]
with open(path, "wb") as fout:
pickle.dump(
{
"as_of": run.as_of,
"microservice_version": self.VERSION,
"model_version": model_version,
"scores": scores,
"time_zone": run.time_zone,
},
fout,
)
return run
def on_validate_gold(self) -> Batch:
"""On validate gold."""
path = self.gold
assert path is not None
with open(path, "rb") as fin:
gold = pickle.load(fin)
# just set as_of and time_zone from gold
# do not trust that config parameters match
self.as_of = gold["as_of"]
self.time_zone = gold["time_zone"]
scores = gold["scores"]
n_scores = scores.shape[0]
run = self()
test = self.scores(run.id)
n_test = test.shape[0]
n_tests = 0
n_passes = 0
n_tests += 1
try:
assert n_scores == n_test
logger.info(self.COUNT, n_scores, n_test, "pass")
n_passes += 1
except AssertionError:
logger.error(self.COUNT, n_scores, n_test, "FAIL")
n_tests += 1
try:
assert allclose(scores, test)
logger.info(self.MATCH, "pass")
n_passes += 1
except AssertionError:
logger.error(self.MATCH, "FAIL")
assert n_tests == n_passes
return run
@contextmanager
def open_batch(self) -> Generator[Any, None, None]:
"""Open batch."""
logger.info(
self.BATCH_OPEN,
self.as_of,
self.time_zone,
)
yield self.batch_cls(
as_of=self.as_of,
duration=self.duration,
microservice_version=self.VERSION,
time_zone=self.time_zone,
)
logger.info(self.BATCH_CLOSE)
def scores(self, run_id):
"""Get scores."""
raise NotImplementedError()
class Task: # pylint: disable=too-few-public-methods
"""Task."""
def __call__(self, batch: Batch, service: Service) -> None:
"""__call__."""
raise NotImplementedError()
```
#### File: dsdk/test/test_time_conversions.py
```python
from dsdk.utils import (
epoch_ms_from_utc_datetime,
now_utc_datetime,
utc_datetime_from_epoch_ms,
)
def test_conversions():
"""Test conversions."""
expected = now_utc_datetime()
epoch_ms = epoch_ms_from_utc_datetime(expected)
actual = utc_datetime_from_epoch_ms(epoch_ms)
assert expected == actual
``` |
{
"source": "jlubo/memory-consolidation-stc",
"score": 2
} |
#### File: memory-consolidation-stc/analysis/assemblyAvalancheStatistics.py
```python
import numpy as np
import os
import time
import sys
from pathlib import Path
from shutil import copy2
from overlapParadigms import *
from utilityFunctions import *
# main properties (most can be adjusted via commandline parameters, see at the end of the script)
paradigm = "NOOVERLAP" # paradigm of overlaps between assemblies
period_duration = 0.01 # binning period (in units of seconds)
n_thresh = 10 # number of spikes in a binning period to consider them an avalanche
new_plots = True # defines if new spike raster plots shall be created using gnuplot
exc_pop_size = 2500 # number of neurons in the excitatory population
core_size = 600 # total size of one cell assembly
# cell assemblies
coreA, coreB, coreC = coreDefinitions(paradigm, core_size)
# control population
mask_coreA = np.in1d(np.arange(exc_pop_size), coreA)
mask_coreB = np.in1d(np.arange(exc_pop_size), coreB)
mask_coreC = np.in1d(np.arange(exc_pop_size), coreC)
ctrl = np.arange(exc_pop_size)[np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))] # control neurons (neurons that are not within a cell assembly)
ctrl_size = len(ctrl)
####################################
# timeSeries
# Computes the time series of avalanche occurrence and saves it to a file
# timestamp: the timestamp of the simulation data
# spike_raster_file: the name of the spike raster file
# output_dir: relative path to the output directory
# return: the time series as a list of characters
def timeSeries(timestamp, spike_raster_file, output_dir):
t0 = time.time()
# read the last line and compute number of periods
with open(spike_raster_file, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n': # seek last line
f.seek(-2, os.SEEK_CUR)
last_line = f.readline().decode()
num_periods_tot = np.int(np.double(last_line.split('\t\t')[0]) / period_duration) + 1
# count lines
with open(spike_raster_file) as f:
num_rows = sum(1 for _ in f)
print("num_rows =", num_rows)
# counters per period for the different cell assemblies
counterA = np.zeros(num_periods_tot, dtype=np.int)
counterB = np.zeros(num_periods_tot, dtype=np.int)
counterC = np.zeros(num_periods_tot, dtype=np.int)
counterOverall = np.zeros(num_periods_tot, dtype=np.int)
counterCtrl = np.zeros(num_periods_tot, dtype=np.int)
series = ["-" for i in range(num_periods_tot)]
# read all data
f = open(spike_raster_file)
for line in f:
row = line.split('\t\t')
t = np.double(row[0])
n = np.int(row[1])
current_period = np.int(np.floor(t / period_duration))
if n < exc_pop_size:
counterOverall[current_period] += 1
if n in coreA:
counterA[current_period] += 1
if n in coreB:
counterB[current_period] += 1
if n in coreC:
counterC[current_period] += 1
if n in ctrl:
counterCtrl[current_period] += 1
f.close()
# determine active CAs for each period and write data to file
fout = open(os.path.join(output_dir, timestamp + '_CA_time_series.txt'), 'w')
for i in range(num_periods_tot):
fout.write(str(round((i+0.5)*period_duration,4)) + "\t\t") # write time at 1/2 of a period
if counterOverall[i] > n_thresh:
series[i] = "o"
if counterC[i] > n_thresh:
series[i] = "C" + series[i]
if counterB[i] > n_thresh:
series[i] = "B" + series[i]
if counterA[i] > n_thresh:
series[i] = "A" + series[i]
fout.write(series[i] + "\t\t" + str(counterA[i]) + "\t\t" + str(counterB[i]) + "\t\t" + str(counterC[i]) + "\t\t" + str(counterOverall[i]) + "\n")
fout.close()
time_el = round(time.time()-t0) # elapsed time in seconds
time_el_str = "Elapsed time: "
if time_el < 60:
time_el_str += str(time_el) + " s"
else:
time_el_str += str(time_el // 60) + " m " + str(time_el % 60) + " s"
print(time_el_str)
# write firing rates to file
fout = open(os.path.join(output_dir, timestamp + '_firing_rates.txt'), 'w')
fout.write("nu(A) = " + str(np.sum(counterA) / (num_periods_tot*period_duration) / core_size) + "\n")
fout.write("nu(B) = " + str(np.sum(counterB) / (num_periods_tot*period_duration) / core_size) + "\n")
fout.write("nu(C) = " + str(np.sum(counterC) / (num_periods_tot*period_duration) / core_size) + "\n")
fout.write("nu(ctrl) = " + str(np.sum(counterCtrl) / (num_periods_tot*period_duration) / ctrl_size) + "\n")
fout.write("core_size = " + str(core_size) + "\n")
fout.write("ctrl_size = " + str(ctrl_size) + "\n")
fout.close()
return series
####################################
# transitionProbabilities
# Computes the likelihood of avalanche occurrence for each assembly, as well as and the likelihoods of "transitions"/triggering
# of assemblies, and saves the results to a file
# timestamp: the timestamp of the simulation data
# series: the time series as provided by timeSeries(...)
# output_dir: relative path to the output directory
def transitionProbabilities(timestamp, series, output_dir):
np.seterr(divide='ignore', invalid='ignore')
num_periods_tot = len(series)
nA, nB, nC, nO, nN = 0, 0, 0, 0, 0 # frequencies of avalanches in different assemblies/overall
nAA, nBB, nCC, nAB, nBA, nAC, nCA, nBC, nCB = 0, 0, 0, 0, 0, 0, 0, 0, 0 # frequencies of transitions
nOO, nOA, nOB, nOC, nAO, nBO, nCO = 0, 0, 0, 0, 0, 0, 0 # frequencies of transitions involving "overall"
nAN, nBN, nCN, nON, nNA, nNB, nNC, nNO, nNN = 0, 0, 0, 0, 0, 0, 0, 0, 0 # frequencies of transitions into/from void
for i in range(num_periods_tot-1):
if series[i] == "-": # there is no avalanche in this period
nN += 1
if series[i+1] == "-":
nNN += 1
else:
nNO += 1
if "A" in series[i+1]:
nNA += 1
if "B" in series[i+1]:
nNB += 1
if "C" in series[i+1]:
nNC += 1
else: # there is an avalanche in this period
nO += 1
if series[i+1] == "-":
nON += 1
else:
nOO += 1
if "A" in series[i+1]:
nOA += 1
if "B" in series[i+1]:
nOB += 1
if "C" in series[i+1]:
nOC += 1
if "A" in series[i]: # there is an avalanche in A
nA += 1
if series[i+1] == "-":
nAN += 1
else:
nAO += 1
if "A" in series[i+1]:
nAA += 1
if "B" in series[i+1]:
nAB += 1
if "C" in series[i+1]:
nAC += 1
if "B" in series[i]: # there is an avalanche in B
nB += 1
if series[i+1] == "-":
nBN += 1
else:
nBO += 1
if "A" in series[i+1]:
nBA += 1
if "B" in series[i+1]:
nBB += 1
if "C" in series[i+1]:
nBC += 1
if "C" in series[i]: # there is an avalanche in C
nC += 1
if series[i+1] == "-":
nCN += 1
else:
nCO += 1
if "A" in series[i+1]:
nCA += 1
if "B" in series[i+1]:
nCB += 1
if "C" in series[i+1]:
nCC += 1
# human-readable output
fout = open(os.path.join(output_dir, timestamp + '_CA_probabilities.txt'), 'w')
fout.write("Timestamp: " + timestamp)
fout.write("\nTotal number of periods: " + str(num_periods_tot))
fout.write("\n\nTotal probabilities:")
fout.write("\np(A) = " + str(nA / num_periods_tot)) # likelihood of avalanche in A
fout.write("\np(B) = " + str(nB / num_periods_tot)) # likelihood of avalanche in B
fout.write("\np(C) = " + str(nC / num_periods_tot)) # likelihood of avalanche in C
fout.write("\np(overall) = " + str(nO / num_periods_tot)) # probability of avalanche in overall population
fout.write("\np(-) = " + str(nN / num_periods_tot)) # probability of no avalanche
fout.write("\n\nTrigger probabilities:")
fout.write("\np_A(A) = " + str(np.divide(nAA, num_periods_tot))) # likelihood of triggering A
fout.write("\np_A(B) = " + str(np.divide(nAB, num_periods_tot))) # likelihood of triggering B
fout.write("\np_A(C) = " + str(np.divide(nAC, num_periods_tot))) # likelihood of triggering C
fout.write("\np_A(overall) = " + str(np.divide(nAO, nAO+nAN))) # probability of triggering something
fout.write("\np_A(-) = " + str(np.divide(nAN, nAO+nAN))) # probability of triggering nothing
fout.write("\np_B(A) = " + str(np.divide(nBA, num_periods_tot))) # likelihood of triggering A
fout.write("\np_B(B) = " + str(np.divide(nBB, num_periods_tot))) # likelihood of triggering B
fout.write("\np_B(C) = " + str(np.divide(nBC, num_periods_tot))) # likelihood of triggering C
fout.write("\np_B(overall) = " + str(np.divide(nBO, nBO+nBN))) # probability of triggering something
fout.write("\np_B(-) = " + str(np.divide(nBN, nBO+nBN))) # probability of triggering nothing
fout.write("\np_C(A) = " + str(np.divide(nCA, num_periods_tot))) # likelihood of triggering A
fout.write("\np_C(B) = " + str(np.divide(nCB, num_periods_tot))) # likelihood of triggering B
fout.write("\np_C(C) = " + str(np.divide(nCC, num_periods_tot))) # likelihood of triggering C
fout.write("\np_C(overall) = " + str(np.divide(nCO, nCO+nCN))) # probability of triggering something
fout.write("\np_C(-) = " + str(np.divide(nCN, nCO+nCN))) # probability of triggering nothing
fout.write("\np_overall(A) = " + str(np.divide(nOA, num_periods_tot))) # likelihood of triggering A
fout.write("\np_overall(B) = " + str(np.divide(nOB, num_periods_tot))) # likelihood of triggering B
fout.write("\np_overall(C) = " + str(np.divide(nOC, num_periods_tot))) # likelihood of triggering C
fout.write("\np_overall(overall) = " + str(np.divide(nOO, nOO+nON))) # probability of triggering something
fout.write("\np_overall(-) = " + str(np.divide(nON, nOO+nON))) # probability of triggering nothing
fout.write("\np_-(A) = " + str(np.divide(nNA, num_periods_tot))) # likelihood of triggering A
fout.write("\np_-(B) = " + str(np.divide(nNB, num_periods_tot))) # likelihood of triggering B
fout.write("\np_-(C) = " + str(np.divide(nNC, num_periods_tot))) # likelihood of triggering C
fout.write("\np_-(overall) = " + str(np.divide(nNO, nNO+nNN))) # probability of triggering something
fout.write("\np_-(-) = " + str(np.divide(nNN, nNO+nNN))) # probability of triggering nothing
fout.close()
# output for facilitated machine readability
fout = open(os.path.join(output_dir, timestamp + '_CA_probabilities_raw.txt'), 'w')
fout.write(timestamp + "\n\n\n")
fout.write(str(nA / num_periods_tot) + "\n")
fout.write(str(nB / num_periods_tot) + "\n")
fout.write(str(nC / num_periods_tot) + "\n")
fout.write(str(nO / num_periods_tot) + "\n")
fout.write(str(nN / num_periods_tot) + "\n")
fout.close()
n_transitions = sum((nOO, nON, nNO, nNN))
if n_transitions == num_periods_tot-1:
print("Normalization check suceeeded.")
else:
print("Normalization check failed:", n_transitions, "triggerings found, as compared to", num_periods_tot-1, "expected.")
####################################
# spikeRasterPlot
# Creates two spike raster plots in the data directory and copies them to the output directory
# timestamp: the timestamp of the data
# data_dir: the directory containing the simulation data
# output_dir: relative path to the output directory
# new_plots: specifies if new plots shall be created
def spikeRasterPlot(timestamp, data_dir, output_dir, new_plots):
plot_file1 = timestamp + "_spike_raster.png"
plot_file2 = timestamp + "_spike_raster2.png"
work_dir = os.getcwd() # get the current working directory
if data_dir == "":
os.chdir(".")
else:
os.chdir(data_dir) # change to data directory
if new_plots:
fout = open("spike_raster.gpl", "w")
fout.write("set term png enhanced font Sans 20 size 1280,960 lw 2.5\n")
fout.write("set output '" + plot_file1 + "'\n\n")
fout.write("Ne = 2500\n")
fout.write("Ni = 625\n")
fout.write("set xlabel 'Time (s)'\n")
fout.write("unset ylabel\n")
fout.write("set yrange [0:1]\n")
fout.write("set ytics out ('#0' 0.05, '#625' 0.23, '#1250' 0.41, '#1875' 0.59, '#2500' 0.77)\n")
fout.write("plot [x=100:120] '" + timestamp + "_spike_raster.txt' using 1:($2 < Ne ? (0.9*$2/(Ne+Ni) + 0.05) : 1/0) notitle with dots lc 'blue', \\\n")
fout.write(" '" + timestamp + "_spike_raster.txt' using 1:($2 >= Ne ? (0.9*$2/(Ne+Ni) + 0.05) : 1/0) notitle with dots lc 'red'\n\n")
fout.write("###########################################\n")
fout.write("set output '" + plot_file2 + "'\n")
fout.write("plot [x=100:180] '" + timestamp + "_spike_raster.txt' using 1:($2 < Ne ? (0.9*$2/(Ne+Ni) + 0.05) : 1/0) notitle with dots lc 'blue', \\\n")
fout.write(" '" + timestamp + "_spike_raster.txt' using 1:($2 >= Ne ? (0.9*$2/(Ne+Ni) + 0.05) : 1/0) notitle with dots lc 'red'\n")
fout.close()
os.system("gnuplot spike_raster.gpl")
if os.path.exists(plot_file1) and os.path.exists(plot_file2):
copy2(plot_file1, os.path.join(work_dir, output_dir)) # copy spike raster plot #1 to output directory
copy2(plot_file2, os.path.join(work_dir, output_dir)) # copy spike raster plot #2 to output directory
else:
print("Warning: " + data_dir + ": plot files not found.")
os.chdir(work_dir) # change back to previous working directory
######################################
# dirRecursion
# Walks recursively through a directory looking for spike raster data;
# if data are found, computes time series, avalanche likelihoods (and, if specified, creates spike raster plots)
# directory: the directory to consider
# output_dir: relative path to the output directory
# new_plots: specifies if new plots shall be created
def dirRecursion(directory, output_dir, new_plots):
rawpaths = Path(directory)
print("Reading directory " + directory)
rawpaths = Path(directory)
for x in sorted(rawpaths.iterdir()):
dest_file = ""
full_path = str(x)
hpath = os.path.split(full_path)[0] # take head
tpath = os.path.split(full_path)[1] # take tail
if not x.is_dir():
if "_spike_raster.txt" in tpath:
timestamp = tpath.split("_spike_raster.txt")[0]
series = timeSeries(timestamp, full_path, output_dir)
transitionProbabilities(timestamp, series, output_dir)
spikeRasterPlot(timestamp, hpath, output_dir, new_plots)
params_file = os.path.join(hpath, timestamp + "_PARAMS.txt")
if os.path.exists(params_file):
copy2(params_file, output_dir)
else:
print("Warning: " + hpath + ": no parameter file found.")
else:
if hasTimestamp(tpath):
dirRecursion(directory + os.sep + tpath, output_dir, new_plots)
###############################################
# main:
### example call from shell: python3 assemblyAvalancheStatistics.py "OVERLAP10 no AC, no ABC" 0.01 10 False
if len(sys.argv) > 1: # if there is at least one additional commandline arguments
paradigm = sys.argv[1]
try:
coreA, coreB, coreC = coreDefinitions(paradigm, core_size) # re-define cell assemblies
except:
raise
if len(sys.argv) > 2: # if there are at least 2 additional commandline arguments
period_duration = float(sys.argv[2])
if len(sys.argv) > 3: # if there are at least 3 additional commandline arguments
n_thresh = int(sys.argv[3])
if len(sys.argv) > 4: # if there are at least 4 additional commandline arguments
if sys.argv[4] == "0" or sys.argv[4] == "False":
new_plots = False
print("Creation of new plots switched off")
else:
new_plots = True
output_dir = "./avalanche_statistics_" + str(period_duration) + "_" + str(n_thresh) # output directory for analysis results
if not os.path.exists(output_dir):
os.mkdir(output_dir)
print("Output directory:", output_dir)
print("Paradigm:", paradigm)
print("Bin size:", str(period_duration), "s")
print("Detection threshold:", str(n_thresh))
dirRecursion('.', output_dir, new_plots) # walk through directories and analyze data
mergeRawData(output_dir, "_CA_probabilities_raw.txt", "all_trials_raw.txt", remove_raw=True) # merge machine-readable output
```
#### File: memory-consolidation-stc/analysis/calculateMIa.py
```python
from utilityFunctions import *
from valueDistributions import *
import numpy as np
from pathlib import Path
np.set_printoptions(threshold=1e10, linewidth=200) # extend console print range for numpy arrays
# calculateMIa
# Calculates mutual information of the activity distribution of the network at two timesteps and returns it along with the self-information of
# the reference distribution
# nppath: path to the network_plots directory to read the data from
# timestamp: a string containing date and time (to access correct paths)
# Nl_exc: the number of excitatory neurons in one line of a quadratic grid
# time_for_activity: the time that at which the activites shall be read out (some time during recall)
# time_ref: the reference time (for getting the activity distribution during learning)
# core: the neurons in the cell assembly (for stipulation; only required if no activity distribution during learning is available)
def calculateMIa(nppath, timestamp, Nl_exc, time_for_activity, time_ref = "11.0", core = np.array([])):
if time_ref: # use reference firing rate distribution from data (for learned cell assembly)
times_for_readout_list = [time_ref, time_for_activity] # the simulation times at which the activities shall be read out
print("Using reference distribution at " + time_ref + "...")
else: # use model firing rate distribution (for stipulated cell assembly)
times_for_readout_list = [time_for_activity]
v_model = np.zeros(Nl_exc**2)
v_model[core] = 1 # entropy/MI of this distribution for Nl_exc=40: 0.4689956
v_model = np.reshape(v_model, (Nl_exc,Nl_exc))
print("Using stipulated reference distribution...")
connections = [np.zeros((Nl_exc**2,Nl_exc**2)) for x in times_for_readout_list]
h = [np.zeros((Nl_exc**2,Nl_exc**2)) for x in times_for_readout_list]
z = [np.zeros((Nl_exc**2,Nl_exc**2)) for x in times_for_readout_list]
v = [np.zeros((Nl_exc,Nl_exc)) for x in times_for_readout_list]
v_array = np.zeros(Nl_exc*Nl_exc) # data array
rawpaths = Path(nppath)
for i in range(len(times_for_readout_list)):
time_for_readout = times_for_readout_list[i]
path = ""
# look for data file [timestamp]_net_[time_for_readout].txt
for x in rawpaths.iterdir():
tmppath = str(x)
if (timestamp + "_net_" + time_for_readout + ".txt") in tmppath:
path = tmppath
if path == "":
raise FileNotFoundError('"' + timestamp + '_net_' + time_for_readout + '.txt" was not found')
try:
connections[i], h[i], z[i], v[i] = readWeightMatrixData(path, Nl_exc)
except ValueError:
raise
except OSError:
raise
### Activity ###
if time_ref: # use reference firing rate distribution from data (for learned cell assembly)
margEntropyActL = marginalEntropy(v[0])
print("margEntropyActL = " + str(margEntropyActL))
margEntropyActR = marginalEntropy(v[1])
print("margEntropyActR = " + str(margEntropyActR))
jointEntropyAct = jointEntropy(v[0],v[1])
print("jointEntropyAct = " + str(jointEntropyAct))
else: # use model firing rate distribution (for stipulated cell assembly)
margEntropyActL = marginalEntropy(v_model)
print("margEntropyActL = " + str(margEntropyActL))
margEntropyActR = marginalEntropy(v[0])
print("margEntropyActR = " + str(margEntropyActR))
jointEntropyAct = jointEntropy(v_model,v[0])
print("jointEntropyAct = " + str(jointEntropyAct))
### Results and Output ###
MIa = mutualInformation(margEntropyActL, margEntropyActR, jointEntropyAct)
print("MIa = " + str(MIa))
return MIa, margEntropyActL
# marginalEntropy
# Computes the marginal entropy of an array
# a: array of outcomes (e.g., array of activities of all neurons in a network or array of weights of all synapses in a network)
# return: the Shannon entropy of a
def marginalEntropy(a):
val, p = marginalProbDist(a)
p = p[p > 0]
entropy = - np.sum(p * np.log2(p))
return entropy
# jointEntropy
# Computes the joint entropy of two arrays
# a1: first array of outcomes (e.g., array of activities of all neurons in a network or array of weights of all synapses in a network)
# a2: second array of outcomes
# return: the joint Shannon entropy of (a1, a2)
def jointEntropy(a1, a2):
val, p = jointProbDist(a1, a2)
p = p[p > 0]
entropy = - np.sum(p * np.log2(p))
return entropy
# mutualInformation
# Computes the mutual information of two arrays
# margEntropy1: marginal entropy of the first array of outcomes
# margEntropy2: marginal entropy of the second array of outcomes
# jEntropy: joint entropy of both arrays of outcomes
# return: the mutual information of (a1, a2)
def mutualInformation(margEntropy1, margEntropy2, jEntropy):
#margEntropy1 = marginalEntropy(a1)
#margEntropy2 = marginalEntropy(a2)
#jEntropy = jointEntropy(a1, a2)
MI = margEntropy1 + margEntropy2 - jEntropy
return MI
```
#### File: memory-consolidation-stc/analysis/overlapParadigms.py
```python
import numpy as np
# coreDefinitions
# Returns the neuron numbers belonging to each of three assemblies in a given paradigm
# paradigm: name of the paradigm to consider
# core_size: the size of one assembly
# return: the three assemblies
def coreDefinitions(paradigm, core_size = 600):
# full_overlap
# Returns the neuron number belonging to each of three assemblies with equal overlaps
# overlap: overlap between each two cell assemblies (0.1 equals "OVERLAP10" and so on)
# return: the three assemblies
def full_overlap(overlap):
tot_wo_overlap = 1-overlap
half_overlap = overlap / 2
tot2_wo_overlap_oh = 2-overlap-half_overlap
core1 = np.arange(core_size)
core2 = np.arange(int(np.round(tot_wo_overlap*core_size)), int(np.round(tot_wo_overlap*core_size))+core_size)
core3 = np.concatenate(( np.arange(int(np.round(tot2_wo_overlap_oh*core_size)), int(np.round(tot2_wo_overlap_oh*core_size))+int(np.round(tot_wo_overlap*core_size))), \
np.arange(int(np.round(half_overlap*core_size))), \
np.arange(int(np.round(tot_wo_overlap*core_size)), int(np.round(tot_wo_overlap*core_size))+int(np.round(half_overlap*core_size))) ))
return (core1, core2, core3)
# no_ABC_overlap
# Returns the neuron number belonging to each of three assemblies with equal overlaps but no common overlap
# overlap: overlap between each two cell assemblies (0.1 equals "OVERLAP10 no ABC" and so on)
# return: the three assemblies
def no_ABC_overlap(overlap):
tot_wo_overlap = 1-overlap
tot2_wo_overlap_oh = 2 - 2*overlap
core1 = np.arange(core_size)
core2 = np.arange(int(np.round(tot_wo_overlap*core_size)), int(np.round(tot_wo_overlap*core_size))+core_size)
core3 = np.concatenate(( np.arange(int(np.round(tot2_wo_overlap_oh*core_size)), int(np.round(tot2_wo_overlap_oh*core_size))+int(np.round(tot_wo_overlap*core_size))), \
np.arange(int(np.round(overlap*core_size))) ))
return (core1, core2, core3)
# no_AC_no_ABC_overlap
# Returns the neuron number belonging to each of three assemblies with "no AC, no ABC" overlap
# overlap: overlap between each two cell assemblies (0.1 equals "OVERLAP10 no AC, no ABC" and so on)
# return: the three assemblies
def no_AC_no_ABC_overlap(overlap):
tot_wo_overlap = 1-overlap
tot2_wo_overlap_oh = 2 - 2*overlap
core1 = np.arange(core_size)
core2 = np.arange(int(np.round(tot_wo_overlap*core_size)), int(np.round(tot_wo_overlap*core_size))+core_size)
core3 = np.arange(int(np.round(tot2_wo_overlap_oh*core_size)), int(np.round(tot2_wo_overlap_oh*core_size))+core_size)
return (core1, core2, core3)
# no_BC_no_ABC_overlap
# Returns the neuron number belonging to each of three assemblies with "no BC, no ABC" overlap
# overlap: overlap between each two cell assemblies (0.1 equals "OVERLAP10 no BC, no ABC" and so on)
# return: the three assemblies
def no_BC_no_ABC_overlap(overlap):
tot_wo_overlap = 1-overlap
core1 = np.arange(core_size)
core2 = np.arange(int(np.round(tot_wo_overlap*core_size)), int(np.round(tot_wo_overlap*core_size))+core_size)
core3 = np.concatenate(( np.arange(int(np.round(tot_wo_overlap*core_size))+core_size, 2*int(np.round(tot_wo_overlap*core_size))+core_size), \
np.arange(int(np.round(overlap*core_size))) ))
return (core1, core2, core3)
# handling the different overlap paradigms:
if paradigm == "NOOVERLAP":
core1 = np.arange(core_size)
core2 = np.arange(core_size, 2*core_size)
core3 = np.arange(2*core_size, 3*core_size)
elif paradigm == "OVERLAP10":
core1, core2, core3 = full_overlap(0.1)
elif paradigm == "OVERLAP10 no ABC":
core1, core2, core3 = no_ABC_overlap(0.1)
elif paradigm == "OVERLAP10 no AC, no ABC":
core1, core2, core3 = no_AC_no_ABC_overlap(0.1)
elif paradigm == "OVERLAP10 no BC, no ABC":
core1, core2, core3 = no_BC_no_ABC_overlap(0.1)
elif paradigm == "OVERLAP15":
core1, core2, core3 = full_overlap(0.15)
elif paradigm == "OVERLAP15 no ABC":
core1, core2, core3 = no_ABC_overlap(0.15)
elif paradigm == "OVERLAP15 no AC, no ABC":
core1, core2, core3 = no_AC_no_ABC_overlap(0.15)
elif paradigm == "OVERLAP15 no BC, no ABC":
core1, core2, core3 = no_BC_no_ABC_overlap(0.15)
elif paradigm == "OVERLAP20":
core1, core2, core3 = full_overlap(0.2)
elif paradigm == "OVERLAP20 no ABC":
core1, core2, core3 = no_ABC_overlap(0.2)
elif paradigm == "OVERLAP20 no AC, no ABC":
core1, core2, core3 = no_AC_no_ABC_overlap(0.2)
elif paradigm == "OVERLAP20 no BC, no ABC":
core1, core2, core3 = no_BC_no_ABC_overlap(0.2)
else:
raise ValueError("Unknown paradigm: " + paradigm)
return (core1, core2, core3)
```
#### File: run_binary_paper2/priming_and_activation/mergeRawData.py
```python
from pathlib import Path
import os
######################################
# mergeRawData
# Looks in a specified directory for files with a certain substring in the filename and merges them
# (merging the content of the lines) to a single file
# rootpath: relative path to the output directory
# substr: string that the filename of files to be merged has to contain
# output_file: name of the output file
# remove_raw [optional]: removes the raw data files
# sep_str [optional]: the character or string by which to separate the lines in the output file
def mergeRawData(rootpath, substr, output_file, remove_raw=False, sep_str='\t\t'):
path = Path(rootpath)
num_rows = -1
all_data = []
for x in sorted(path.iterdir()): # loop through files in the output directory
x_str = str(x)
if not x.is_dir() and substr in x_str:
f = open(x_str)
single_trial_data = f.read()
f.close()
single_trial_data = single_trial_data.split('\n')
if single_trial_data[-1] == "":
del single_trial_data[-1] # delete empty line
if len(single_trial_data) != num_rows:
if num_rows == -1:
num_rows = len(single_trial_data)
all_data = single_trial_data
else:
raise Exception("Wrong number of rows encountered in: " + x_str)
else:
for i in range(num_rows):
all_data[i] += sep_str + single_trial_data[i]
if remove_raw:
os.remove(x_str)
fout = open(os.path.join(rootpath, output_file), "w")
for i in range(num_rows):
fout.write(all_data[i] + '\n')
fout.close()
``` |
{
"source": "jlucangelio/adventofcode-2017",
"score": 3
} |
#### File: jlucangelio/adventofcode-2017/day20_2.py
```python
from collections import namedtuple
Vector = namedtuple("Vector", "x y z")
Particle = namedtuple("Particle", "pos v a")
def vector_from_s(s):
cs = s.split('=')[1][1:-1]
x, y, z = [int(c) for c in cs.split(',')]
return Vector(x, y, z)
def move_once(part):
new_v = Vector(part.v.x + part.a.x, part.v.y + part.a.y, part.v.z + part.a.z)
new_pos = Vector(part.pos.x + new_v.x, part.pos.y + new_v.y, part.pos.z + new_v.z)
return part._replace(pos=new_pos, v=new_v)
def distance(part):
return sum([int(abs(c)) for c in part.pos])
particles = {}
with open("day20.input") as f:
for i, line in enumerate(f.readlines()):
# p=<2366,784,-597>, v=<-12,-41,50>, a=<-5,1,-2>
sp, sv, sa = line.strip().split(", ")
pos = vector_from_s(sp)
v = vector_from_s(sv)
a = vector_from_s(sa)
particles[i] = Particle(pos, v, a)
for i in range(1000):
collisions = {}
for j in particles:
# print i, j
part = move_once(particles[j])
particles[j] = part
if part.pos not in collisions:
collisions[part.pos] = set()
collisions[part.pos].add(j)
for c, indexes in collisions.iteritems():
if len(indexes) > 1:
for index in indexes:
del particles[index]
print len(particles)
```
#### File: jlucangelio/adventofcode-2017/day23_2.py
```python
import time
INSTRUCTIONS = []
with open("day23.input") as f:
for line in f.readlines():
tokens = line.strip().split()
op = tokens[0]
dst = tokens[1]
src = None
if len(tokens) > 2:
src = tokens[2]
INSTRUCTIONS.append((op, dst, src))
# print INSTRUCTIONS
regs = {
'a': 1,
'b': 0,
'c': 0,
'd': 0,
'e': 0,
'f': 0,
'g': 0,
'h': 0,
}
def get_operand(operand):
if operand in regs:
return regs[operand]
else:
return int(operand)
pc = 0
mul_count = 0
its = 0
while pc >= 0 and pc < len(INSTRUCTIONS):
its += 1
if its % 1000000 == 0:
print pc
print regs
time.sleep(2)
op, dst, src = INSTRUCTIONS[pc]
if op == "set":
regs[dst] = get_operand(src)
if op == "inc":
regs[dst] += 1
elif op == "sub":
regs[dst] -= get_operand(src)
if dst == 'h':
print regs
elif op == "mul":
regs[dst] = regs[dst] * get_operand(src)
# mul_count += 1
elif op == "jnz":
if get_operand(dst) != 0:
pc += get_operand(src)
else:
pc += 1
if op != "jnz":
pc += 1
print pc
print regs
time.sleep(2)
# print mul_count
print regs['h']
```
#### File: jlucangelio/adventofcode-2017/day7.py
```python
def calculate_weights(node, programs):
weight, programs_above = programs[node]
child_weights = []
stack_weights = []
total_weights = []
for child in programs_above:
child_weight, stack_weight = calculate_weights(child, programs)
child_weights.append(child_weight)
stack_weights.append(stack_weight)
total_weights.append(child_weight + stack_weight)
if len(programs_above) > 0:
first_total_weight = total_weights[0]
if any([tweight != first_total_weight for tweight in total_weights]):
print child_weights
print stack_weights
print total_weights
for i, wi in enumerate(total_weights):
if all([wi != wj for j, wj in enumerate(total_weights) if j != i]):
# wi is different from all other weights
wj = total_weights[(i + 1) % 2]
print child_weights[i] + wj - wi
print
return weight, sum(total_weights)
programs = {}
programs_above_others = set()
with open("day7.input") as f:
for line in f.readlines():
tokens = line.strip().split()
name = tokens[0]
weight = int(tokens[1][1:-1])
programs_above = []
if len(tokens) > 3:
for token in tokens[3:]:
if token[-1] == ',':
programs_above.append(token[:-1])
else:
programs_above.append(token)
programs_above_others.update(programs_above)
programs[name] = (weight, programs_above)
root = (set(programs.keys()) - programs_above_others).pop()
print root
print calculate_weights(root, programs)
``` |
{
"source": "jlucangelio/adventofcode-2018",
"score": 3
} |
#### File: jlucangelio/adventofcode-2018/day08.py
```python
from collections import namedtuple
Node = namedtuple("Node", "children metadata value")
def parse_tree(numbers, index):
nchildren = numbers[index]
nmetadata = numbers[index + 1]
cur_index = index + 2
children = []
metadata = []
metadata_sum = 0
value = 0
for _ in range(nchildren):
(tree, new_index, partial_metadata_sum) = parse_tree(numbers, cur_index)
cur_index = new_index
children.append(tree)
metadata_sum += partial_metadata_sum
for mindex in range(nmetadata):
datum = numbers[cur_index + mindex]
metadata.append(datum)
metadata_sum += sum(metadata)
if len(children) == 0:
value = metadata_sum
else:
for datum in metadata:
if datum <= len(children):
value += children[datum - 1].value
return (Node(children, metadata, value), cur_index + nmetadata, metadata_sum)
# with open("day08.small.input") as f:
with open("day08.input") as f:
numbers = [int(n) for n in f.read().split()]
print parse_tree(numbers, 0)
```
#### File: jlucangelio/adventofcode-2018/day16.py
```python
import copy
import re
functions = {}
def addr(a, b, c, regs):
regs[c] = regs[a] + regs[b]
def addi(a, b, c, regs):
regs[c] = regs[a] + b
functions[0] = addr
functions[1] = addi
def mulr(a, b, c, regs):
regs[c] = regs[a] * regs[b]
def muli(a, b, c, regs):
regs[c] = regs[a] * b
functions[2] = mulr
functions[3] = muli
def banr(a, b, c, regs):
regs[c] = regs[a] & regs[b]
def bani(a, b, c, regs):
regs[c] = regs[a] & b
functions[4] = banr
functions[5] = bani
def borr(a, b, c, regs):
regs[c] = regs[a] | regs[b]
def bori(a, b, c, regs):
regs[c] = regs[a] | b
functions[6] = borr
functions[7] = bori
def setr(a, b, c, regs):
regs[c] = regs[a]
def seti(a, b, c, regs):
regs[c] = a
functions[8] = setr
functions[9] = seti
def gt(a, b):
if a > b:
return 1
else:
return 0
def gtir(a, b, c, regs):
regs[c] = gt(a, regs[b])
def gtri(a, b, c, regs):
regs[c] = gt(regs[a], b)
def gtrr(a, b, c, regs):
regs[c] = gt(regs[a], regs[b])
functions[10] = gtir
functions[11] = gtri
functions[12] = gtrr
def eq(a, b):
if a == b:
return 1
else:
return 0
def eqir(a, b, c, regs):
regs[c] = eq(a, regs[b])
def eqri(a, b, c, regs):
regs[c] = eq(regs[a], b)
def eqrr(a, b, c, regs):
regs[c] = eq(regs[a], regs[b])
functions[13] = eqir
functions[14] = eqri
functions[15] = eqrr
count = 0
possible_functions = dict([(i, set(range(16))) for i in range(16)])
with open("day16.input") as f:
# with open("day16.small.input") as f:
lines = f.readlines()
for i in range(0, len(lines), 4):
# Before: [0, 2, 0, 2]
# 6 0 1 1
# After: [0, 1, 0, 2]
before = lines[i].strip()
m = re.match("Before: \[([0-9]), ([0-9]), ([0-9]), ([0-9])\]", before)
regs_before = {}
for j in range(4):
regs_before[j] = int(m.groups()[j])
operation = [int(t) for t in lines[i + 1].strip().split()]
after = lines[i + 2].strip()
m = re.match("After: \[([0-9]), ([0-9]), ([0-9]), ([0-9])\]", after)
regs_after = {}
for j in range(4):
regs_after[j] = int(m.groups()[j])
cur_possible_functions = set([])
for fnum, function in functions.iteritems():
cur_regs = copy.copy(regs_before)
function(operation[1], operation[2], operation[3], cur_regs)
if cur_regs == regs_after:
cur_possible_functions.add(fnum)
if len(cur_possible_functions) >= 3:
count += 1
possible_functions[operation[0]].intersection_update(cur_possible_functions)
print count
while sum([len(s) for s in possible_functions.values()]) > 16:
for op1, s in possible_functions.iteritems():
if len(s) == 1:
for op2, t in possible_functions.iteritems():
if op1 != op2:
e = s.pop()
t.discard(e)
s.add(e)
opcodes = {}
for op, s in possible_functions.iteritems():
opcodes[op] = functions[s.pop()]
print opcodes
regs = dict([(i, 0) for i in range(4)])
with open("day16.2.input") as f:
for line in f:
operation = [int(t) for t in line.strip().split()]
opcodes[operation[0]](operation[1], operation[2], operation[3], regs)
print regs[0]
```
#### File: jlucangelio/adventofcode-2018/day19.py
```python
INPUT = """#ip 3
addi 3 16 3
seti 1 0 4
seti 1 0 1
mulr 4 1 5
eqrr 5 2 5
addr 5 3 3
addi 3 1 3
addr 4 0 0
addi 1 1 1
gtrr 1 2 5
addr 3 5 3
seti 2 9 3
addi 4 1 4
gtrr 4 2 5
addr 5 3 3
seti 1 2 3
mulr 3 3 3
addi 2 2 2
mulr 2 2 2
mulr 3 2 2
muli 2 11 2
addi 5 4 5
mulr 5 3 5
addi 5 16 5
addr 2 5 2
addr 3 0 3
seti 0 8 3
setr 3 2 5
mulr 5 3 5
addr 3 5 5
mulr 3 5 5
muli 5 14 5
mulr 5 3 5
addr 2 5 2
seti 0 0 0
seti 0 0 3
""".splitlines()
MNEMONICS = """goto 17
r4 = 1
r1 = 1
r5 = r4 * r1
if r5 == r2 goto 07
<nop>
goto 8
r0 = r4 + r0
r1 = r1 + 1
if r1 > r2 goto 12
<nop>
goto 3
r4 = r4 + 1
if r4 > r2 goto 07
<nop>
goto 2
r3 = r3 * r3
r2 = r2 + 2
r2 = r2 * r2
r2 = r3 * r2
r2 = r2 * 11
r5 = r5 + 4
r5 = r5 * r3
r5 = r5 + 16
r2 = r2 + r5
r3 = r3 + r0
goto 1
r5 = r3
r5 = r5 * r3
r5 = r3 + r5
r5 = r3 * r5
r5 = r5 * 14
r5 = r5 * r3
r2 = r2 + r5
r0 = 0
goto 1
""".splitlines()
functions = {}
def addr(a, b, c, regs):
regs[c] = regs[a] + regs[b]
def addi(a, b, c, regs):
regs[c] = regs[a] + b
functions["addr"] = addr
functions["addi"] = addi
def mulr(a, b, c, regs):
regs[c] = regs[a] * regs[b]
def muli(a, b, c, regs):
regs[c] = regs[a] * b
functions["mulr"] = mulr
functions["muli"] = muli
def banr(a, b, c, regs):
regs[c] = regs[a] & regs[b]
def bani(a, b, c, regs):
regs[c] = regs[a] & b
functions["banr"] = banr
functions["bani"] = bani
def borr(a, b, c, regs):
regs[c] = regs[a] | regs[b]
def bori(a, b, c, regs):
regs[c] = regs[a] | b
functions["borr"] = borr
functions["bori"] = bori
def setr(a, b, c, regs):
regs[c] = regs[a]
def seti(a, b, c, regs):
regs[c] = a
functions["setr"] = setr
functions["seti"] = seti
def gt(a, b):
if a > b:
return 1
else:
return 0
def gtir(a, b, c, regs):
regs[c] = gt(a, regs[b])
def gtri(a, b, c, regs):
regs[c] = gt(regs[a], b)
def gtrr(a, b, c, regs):
regs[c] = gt(regs[a], regs[b])
functions["gtir"] = gtir
functions["gtri"] = gtri
functions["gtrr"] = gtrr
def eq(a, b):
if a == b:
return 1
else:
return 0
def eqir(a, b, c, regs):
regs[c] = eq(a, regs[b])
def eqri(a, b, c, regs):
regs[c] = eq(regs[a], b)
def eqrr(a, b, c, regs):
regs[c] = eq(regs[a], regs[b])
functions["eqir"] = eqir
functions["eqri"] = eqri
functions["eqrr"] = eqrr
instructions = []
for line in INPUT[1:]:
opcode, a, b, c = line.strip().split()
instructions.append((opcode, int(a), int(b), int(c)))
regs = dict([(i, 0) for i in range(6)])
# regs[0] = 1
ip = 0
ip_binding = int(INPUT[0].split()[1])
cycle = 0
while ip < len(instructions):
if cycle % 1000000 == 0:
print cycle
regs[ip_binding] = ip
opcode, a, b, c = instructions[ip]
# s = "%2d %s" % (ip, MNEMONICS[ip])
# if "r3" in s and s.rfind("r3") > s.find("="):
# l, r = s.split("=")
# s = l + "=" + r.replace("r3", str(ip))
# s += " " * (40 - len(s)) + str(regs)
# print s
functions[opcode](a, b, c, regs)
ip = regs[ip_binding]
ip +=1
cycle += 1
print regs[0]
```
#### File: jlucangelio/adventofcode-2018/day20.py
```python
from collections import namedtuple, deque
INPUT = "WSSEESWWWNW(S|NENNEEEENN(ESSSSW(NWSW|SSEN)|WSWWN(E|WWS(E|SS))))"
INPUT = "ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))"
# INPUT = "ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN"
# INPUT = "ENWWW(NEEE|SSE(EE|N))"
class Pos(namedtuple("Pos", "x y")):
__slots__ = ()
def __add__(self, other):
return Pos(self.x + other.x, self.y + other.y)
N = Pos(0, -1)
S = Pos(0, 1)
W = Pos(-1, 0)
E = Pos(1, 0)
DIRS = {}
DIRS["N"] = N
DIRS["S"] = S
DIRS["E"] = E
DIRS["W"] = W
def parse(regex, begin, end):
# print "parse", regex[begin:end]
if begin == end:
return 0
if regex.find("(", begin, end) == -1 and regex.find(")", begin, end):
branches = regex[begin:end].split("|")
return max([len(b) for b in branches])
first_branch_begin = regex.find("(", begin, end)
first_branch_end = -1
stack = []
cur_branch_begin = -1
branches = []
for i in range(first_branch_begin, end):
c = regex[i]
# print c
if c == "(":
# print i, "open paren"
stack.append(i)
if len(stack) == 1:
cur_branch_begin = i + 1
elif c == ")":
# print i, "close paren"
stack.pop()
if len(stack) == 0:
first_branch_end = i
# print "begin", cur_branch_begin, "end", i
branches.append(parse(regex, cur_branch_begin, i))
break
elif c == "|":
if len(stack) > 1:
continue
else:
# print "begin", cur_branch_begin, "end", i
branches.append(parse(regex, cur_branch_begin, i))
cur_branch_begin = i + 1
return len(regex[begin:first_branch_begin]) + max(branches) + parse(regex, first_branch_end + 1, end)
def walk(regex, begin, end):
if begin == end:
return []
if regex.find("(", begin, end) == -1 and regex.find(")", begin, end):
branches = regex[begin:end].split("|")
# print "branches", branches
return branches
first_branch_begin = regex.find("(", begin, end)
first_branch_end = -1
stack = []
cur_branch_begin = -1
branches = []
for i in range(first_branch_begin, end):
c = regex[i]
# print c
if c == "(":
# print i, "open paren"
stack.append(i)
if len(stack) == 1:
cur_branch_begin = i + 1
elif c == ")":
# print i, "close paren"
stack.pop()
if len(stack) == 0:
first_branch_end = i
# print "begin", cur_branch_begin, "end", i
branches.extend(walk(regex, cur_branch_begin, i))
break
elif c == "|":
if len(stack) > 1:
continue
else:
# print "begin", cur_branch_begin, "end", i
branches.extend(walk(regex, cur_branch_begin, i))
cur_branch_begin = i + 1
res = []
prefix = regex[begin:first_branch_begin]
suffixes = walk(regex, first_branch_end + 1, end)
# print "prefix", prefix
for b in branches:
# print "b", b
if len(suffixes) > 0:
for c in suffixes:
# print "c", c
res.append(prefix + b + c)
else:
res.append(prefix + b)
return res
# print parse(INPUT, 0, len(INPUT))
# for w in walk(INPUT, 0, len(INPUT)):
# print w
# directions = walk(INPUT, 0, len(INPUT))
with open("day20.input") as f:
regex = f.read()
directions = walk(regex, 1, len(regex) - 1)
maze = {}
neighbors = {}
for route in directions:
cur = Pos(0, 0)
for direction in route:
if cur not in neighbors:
neighbors[cur] = set()
n = cur + DIRS[direction]
neighbors[cur].add(n)
cur = n
# print neighbors
# BFS out from 0,0
visiting = deque()
visiting.append((Pos(0, 0), 0))
visited = set()
distances = {}
while len(visiting) > 0:
pos, dist = visiting.popleft()
if pos in visited:
continue
if pos in neighbors:
for n in neighbors[pos]:
visiting.append((n, dist + 1))
visited.add(pos)
distances[pos] = dist
# print distances
# print distances[Pos(0,0)]
print max(distances.values())
print sum([1 for d in distances.values() if d >= 1000])
```
#### File: jlucangelio/adventofcode-2018/day24.py
```python
import copy
import re
IMMUNE = 0
INFECTION = 1
SIDE = {
"immune": IMMUNE,
"infection": INFECTION
}
SIDE_NAME = {
IMMUNE: "immune",
INFECTION: "infection"
}
FIRE = 0
BLUDGEONING = 1
COLD = 2
RADIATION = 3
SLASHING = 4
ATTACK_TYPE = {
"fire": FIRE,
"bludgeoning": BLUDGEONING,
"cold": COLD,
"radiation": RADIATION,
"slashing": SLASHING
}
class Group(object):
def __init__(self, side, index, nunits, unit_hp, attack_damage, attack_type, initiative, immunities, weaknesses):
self.side = side
self.index = index
self.nunits = nunits
self.unit_hp = unit_hp
self.attack_damage = attack_damage
self.attack_type = attack_type
self.initiative = initiative
self.immunities = immunities
self.weaknesses = weaknesses
self.target = None
def ep(self):
return self.nunits * self.attack_damage
def alive(self):
return self.nunits > 0
def select_target(self, target):
self.target = target
def tentative_damage(self, target):
if self.attack_type in target.immunities:
return 0
elif self.attack_type in target.weaknesses:
return self.ep() * 2
else:
return self.ep()
def attack(self):
if not self.target:
return False
damage = self.tentative_damage(self.target)
if damage == 0:
return False
units_killed = min(damage // self.target.unit_hp, self.target.nunits)
self.target.nunits -= units_killed
# template = "%s group %d attacks defending group %d with damage %d killing %d units"
# print template % (SIDE_NAME[self.side], self.index, self.target.index, damage, units_killed)
return True
def __str__(self):
return "Group %d contains %d units, ep %d" % (self.index, self.nunits, self.ep())
def battle(infection, immune, boost=0):
# infection = copy.deepcopy(orig_infection)
# immune = copy.deepcopy(orig_immune)
for g in immune:
g.attack_damage += boost
while len(immune) > 0 and len(infection) > 0:
sides = {}
sides[IMMUNE] = immune
sides[INFECTION] = infection
targets = {}
targets[IMMUNE] = set()
targets[INFECTION] = set()
infection_sorted = sorted(infection, key=lambda g: (g.ep(), g.initiative), reverse=True)
immune_sorted = sorted(immune, key=lambda g: (g.ep(), g.initiative), reverse=True)
for team in [infection_sorted, immune_sorted]:
for g in team:
if not g.alive():
continue
g.select_target(None)
possible_targets = sorted(sides[1 - g.side], key=lambda t: (g.tentative_damage(t), t.ep(), t.initiative), reverse=True)
chosen_target = None
for pt in possible_targets:
if g.tentative_damage(pt) > 0 and pt.alive and pt not in targets[g.side]:
# print "%s group %d would deal defending group %d %d damage" % (SIDE_NAME[g.side], g.index, pt.index, g.tentative_damage(pt))
chosen_target = pt
break
if chosen_target:
g.select_target(chosen_target)
targets[g.side].add(chosen_target)
attack_happened = False
groups = immune + infection
attack_order = sorted(groups, key=lambda g: g.initiative, reverse=True)
for g in attack_order:
if g.alive():
did_attack = g.attack()
attack_happened = attack_happened or did_attack
if not attack_happened:
break
# print sum([g.nunits for g in immune if g.alive()])
# print sum([g.nunits for g in infection if g.alive()])
# print "infection"
# for ing in sorted(infection, key=lambda g: (g.ep(), g.initiative), reverse=True):
# if ing.alive():
# print ing
# print "immune"
# for img in sorted(immune, key=lambda g: (g.ep(), g.initiative), reverse=True):
# if img.alive():
# print img
immune = [g for g in immune if g.alive()]
infection = [g for g in infection if g.alive()]
return infection, immune
groups = {}
groups["immune"] = []
groups["infection"] = []
for filename in ["day24.immune.input", "day24.infection.input"]:
# for filename in ["day24.immune.small.input", "day24.infection.small.input"]:
with open(filename) as file:
side = filename.split(".")[1]
for i, line in enumerate(file):
# 3020 units each with 3290 hit points with an attack that does 10 radiation damage at initiative 16
# 1906 units each with 37289 hit points (immune to radiation; weak to fire) with an attack that does 28 radiation damage at initiative 3
m = re.match("(?P<nunits>[0-9]+) units each with (?P<unit_hp>[0-9]+) hit points(?: \((?P<modifier>[a-z ,;]+)\))? with an attack that does (?P<attack_damage>[0-9]+) (?P<attack_type>[a-z]+) damage at initiative (?P<initiative>[0-9]+)", line.strip())
modifiers = {}
modifiers["immune"] = []
modifiers["weak"] = []
if m.group("modifier"):
for section in m.group("modifier").split("; "):
modifier_type = section.split()[0]
types = [ATTACK_TYPE[t] for t in section.split(" ", 2)[2].split(", ")]
modifiers[modifier_type] = set(types)
nunits = int(m.group("nunits"))
unit_hp = int(m.group("unit_hp"))
attack_damage = int(m.group("attack_damage"))
attack_type = ATTACK_TYPE[m.group("attack_type")]
initiative = int(m.group("initiative"))
groups[side].append(Group(SIDE[side], i + 1, nunits, unit_hp, attack_damage, attack_type, initiative, modifiers["immune"], modifiers["weak"]))
immune = copy.deepcopy(groups["immune"])
infection = copy.deepcopy(groups["infection"])
infection, immune = battle(infection, immune)
print sum([g.nunits for g in immune if g.alive()])
print sum([g.nunits for g in infection if g.alive()])
print
for boost in xrange(1, 1000000):
# print boost
infection, immune = battle(copy.deepcopy(groups["infection"]), copy.deepcopy(groups["immune"]), boost)
remaining_infection = sum([g.nunits for g in infection if g.alive()])
remaining_immune = sum([g.nunits for g in immune if g.alive()])
print remaining_infection, remaining_immune
if remaining_immune > 0 and remaining_infection == 0:
# print boost
break
``` |
{
"source": "jlucangelio/adventofcode-2019",
"score": 3
} |
#### File: jlucangelio/adventofcode-2019/day02.py
```python
import copy
import sys
INPUT = "1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,1,10,19,1,6,19,23,1,10,23,27,2,27,13,31,1,31,6,35,2,6,35,39,1,39,5,43,1,6,43,47,2,6,47,51,1,51,5,55,2,55,9,59,1,6,59,63,1,9,63,67,1,67,10,71,2,9,71,75,1,6,75,79,1,5,79,83,2,83,10,87,1,87,5,91,1,91,9,95,1,6,95,99,2,99,10,103,1,103,5,107,2,107,6,111,1,111,5,115,1,9,115,119,2,119,10,123,1,6,123,127,2,13,127,131,1,131,6,135,1,135,10,139,1,13,139,143,1,143,13,147,1,5,147,151,1,151,2,155,1,155,5,0,99,2,0,14,0"
def execute(program, noun, verb):
memory = copy.copy(program)
memory[1] = noun
memory[2] = verb
pc = 0
while (memory[pc] != 99):
opcode = memory[pc]
op1 = memory[pc+1]
op2 = memory[pc+2]
dest = memory[pc+3]
# print pc, op1, op2, dest
if opcode == 1:
memory[dest] = memory[op1] + memory[op2]
elif opcode == 2:
memory[dest] = memory[op1] * memory[op2]
else:
print "invalid opcode ", opcode
break
pc += 4
return memory[0]
program = [int(t) for t in INPUT.split(',')]
print execute(program, 12, 2)
for noun in range(0, 100):
print noun
for verb in range(0, 100):
if execute(program, noun, verb) == 19690720:
print noun, verb, 100*noun + verb
sys.exit(0)
```
#### File: jlucangelio/adventofcode-2019/day12.py
```python
import copy
from collections import namedtuple
from fractions import gcd
Position = namedtuple("Position", "x y z")
Velocity = namedtuple("Velocity", "x y z")
X = 0
Y = 1
Z = 2
POS = 1
VEL = 2
x_states = set([])
y_states = set([])
z_states = set([])
states = [x_states, y_states, z_states]
def lcm(a, b, c):
return (a * b * c) // gcd(gcd(a, b), c) // gcd(gcd(a, b), c)
def input_to_position(s):
x, y, z = s[1:-1].split(",")
_, x = x.split("=")
_, y = y.split("=")
_, z = z.split("=")
return Position(int(x), int(y), int(z))
def move(moon):
new_pos = []
for c in range(3):
new_pos.append(moon[POS][c] + moon[VEL][c])
moon[POS] = Position(new_pos[X], new_pos[Y], new_pos[Z])
def print_moons(step, moons):
print "After step", step + 1
for m in moons:
print m
print
def moons_to_tuple(moons, coord):
return tuple([(m[POS][coord], m[VEL][coord]) for m in moons])
def add_state(states, moons):
for i in range(len(states)):
states[i].add(moons_to_tuple(moons, i))
INPUT = """<x=-15, y=1, z=4>
<x=1, y=-10, z=-8>
<x=-5, y=4, z=9>
<x=4, y=6, z=-2>""".splitlines()
moons = []
moons.append(["Io", input_to_position(INPUT[0]), Velocity(0, 0, 0)])
moons.append(["Europa", input_to_position(INPUT[1]), Velocity(0, 0, 0)])
moons.append(["Ganymede", input_to_position(INPUT[2]), Velocity(0, 0, 0)])
moons.append(["Callisto", input_to_position(INPUT[3]), Velocity(0, 0, 0)])
print moons
add_state(states, moons)
step = 1
x_cycle = None
y_cycle = None
z_cycle = None
while x_cycle is None or y_cycle is None or z_cycle is None:
for idx in range(len(moons)):
for jdx in range(len(moons)):
if idx < jdx:
pos_i = moons[idx][POS]
pos_j = moons[jdx][POS]
vel_i = moons[idx][VEL]
vel_j = moons[jdx][VEL]
new_veli = []
new_velj = []
for coord in range(3):
if pos_i[coord] < pos_j[coord]:
ic = 1
jc = -1
elif pos_i[coord] > pos_j[coord]:
ic = -1
jc = 1
else:
ic = 0
jc = 0
new_veli.append(vel_i[coord] + ic)
new_velj.append(vel_j[coord] + jc)
moons[idx][VEL] = Velocity(new_veli[X], new_veli[Y], new_veli[Z])
moons[jdx][VEL] = Velocity(new_velj[X], new_velj[Y], new_velj[Z])
for idx in range(len(moons)):
m = moons[idx]
move(m)
# check for cycles
# x
if x_cycle is None and moons_to_tuple(moons, X) in states[X]:
x_cycle = step
print "x", x_cycle
# y
if y_cycle is None and moons_to_tuple(moons, Y) in states[Y]:
y_cycle = step
print "y", y_cycle
# z
if z_cycle is None and moons_to_tuple(moons, Z) in states[Z]:
z_cycle = step
print "z", z_cycle
add_state(states, moons)
# print x_cycle, y_cycle, z_cycle
step += 1
print x_cycle, y_cycle, z_cycle
print lcm(x_cycle, y_cycle, z_cycle)
# total = 0
# for m in moons:
# p = 0
# k = 0
# for i in range(3):
# p += abs(m[1][i])
# k += abs(m[2][i])
# total += p * k
# print total
```
#### File: jlucangelio/adventofcode-2019/intcode.py
```python
import copy
POS = 0
IMM = 1
REL = 2
def calculate_addr(param, mode, base):
offset = param
if mode == REL:
offset += base
return offset
def read_param(memory, param, mode, base):
if mode == IMM:
return param
else:
return memory[calculate_addr(param, mode, base)]
def execute(program, input_values):
memory = copy.copy(program)
cur_input_index = 0
rel_base = 0
outputs = []
pc = 0
while (memory[pc] != 99):
v = str(memory[pc])
opcode = int(v[-2:])
opmode_1 = POS
opmode_2 = POS
opmode_3 = POS
if len(v) > 2:
opmode_1 = int(v[-3])
if len(v) > 3:
opmode_2 = int(v[-4])
if len(v) > 4:
opmode_3 = int(v[-5])
param1 = memory[pc+1]
if opcode == 3:
if opmode_1 == IMM:
print "error, input instruction opmode IMM"
op1 = calculate_addr(param1, opmode_1, rel_base)
else:
op1 = read_param(memory, param1, opmode_1, rel_base)
if opcode == 1 or opcode == 2 or opcode == 5 or opcode == 6 or opcode == 7 or opcode == 8:
param2 = memory[pc+2]
op2 = read_param(memory, param2, opmode_2, rel_base)
if opcode == 1 or opcode == 2 or opcode == 7 or opcode == 8:
# Third operand is the destination operand, always in position/relative mode.
if opmode_3 == IMM:
print "error, destination operand specified IMM"
param3 = memory[pc+3]
dest = calculate_addr(param3, opmode_3, rel_base)
# Instructions
if opcode == 1:
memory[dest] = op1 + op2
pc += 4
elif opcode == 2:
memory[dest] = op1 * op2
pc += 4
elif opcode == 3:
# input
memory[op1] = input_values[cur_input_index]
print "[", pc, "]", "<-", input_values[cur_input_index]
cur_input_index += 1
pc += 2
elif opcode == 4:
# output
output = op1
print "[", pc, "]", output, "->"
outputs.append(output)
pc += 2
elif opcode == 5:
# jump-if-true
if op1 != 0:
pc = op2
else:
pc += 3
elif opcode == 6:
# jump-if-false
if op1 == 0:
pc = op2
else:
pc += 3
elif opcode == 7:
if op1 < op2:
memory[dest] = 1
else:
memory[dest] = 0
pc += 4
elif opcode == 8:
if op1 == op2:
memory[dest] = 1
else:
memory[dest] = 0
pc += 4
elif opcode == 9:
rel_base += op1
pc += 2
else:
print "invalid opcode", opcode
break
print "halt"
return outputs
def execute_until_output(name, program, starting_pc, input_values, starting_rel_base):
memory = copy.copy(program)
cur_input_index = 0
rel_base = starting_rel_base
outputs = []
pc = starting_pc
while (memory[pc] != 99):
v = str(memory[pc])
opcode = int(v[-2:])
opmode_1 = POS
opmode_2 = POS
opmode_3 = POS
if len(v) > 2:
opmode_1 = int(v[-3])
if len(v) > 3:
opmode_2 = int(v[-4])
if len(v) > 4:
opmode_3 = int(v[-5])
param1 = memory[pc+1]
if opcode == 3:
if opmode_1 == IMM:
print "error, input instruction opmode IMM"
op1 = calculate_addr(param1, opmode_1, rel_base)
else:
op1 = read_param(memory, param1, opmode_1, rel_base)
if opcode == 1 or opcode == 2 or opcode == 5 or opcode == 6 or opcode == 7 or opcode == 8:
param2 = memory[pc+2]
op2 = read_param(memory, param2, opmode_2, rel_base)
if opcode == 1 or opcode == 2 or opcode == 7 or opcode == 8:
# Third operand is the destination operand, always in position/relative mode.
if opmode_3 == IMM:
print "error, destination operand specified IMM"
param3 = memory[pc+3]
dest = calculate_addr(param3, opmode_3, rel_base)
# Instructions
if opcode == 1:
memory[dest] = op1 + op2
pc += 4
elif opcode == 2:
memory[dest] = op1 * op2
pc += 4
elif opcode == 3:
# input
memory[op1] = input_values[cur_input_index]
print "[", pc, "]", "<-", input_values[cur_input_index]
cur_input_index += 1
pc += 2
elif opcode == 4:
# output
output = op1
print "[", pc, "]", output, "->"
return output, memory, pc + 2, rel_base
elif opcode == 5:
# jump-if-true
if op1 != 0:
pc = op2
else:
pc += 3
elif opcode == 6:
# jump-if-false
if op1 == 0:
pc = op2
else:
pc += 3
elif opcode == 7:
if op1 < op2:
memory[dest] = 1
else:
memory[dest] = 0
pc += 4
elif opcode == 8:
if op1 == op2:
memory[dest] = 1
else:
memory[dest] = 0
pc += 4
elif opcode == 9:
rel_base += op1
pc += 2
else:
print "[", pc, "]", "invalid opcode", opcode
break
print "halt"
return None, memory, pc, rel_base
``` |
{
"source": "jlucangelio/adventofcode-2020",
"score": 3
} |
#### File: jlucangelio/adventofcode-2020/day05.py
```python
import math
# BOARDING_PASSES = ["FBFBBFFRLR"]
BOARDING_PASSES = [line.strip() for line in open("day05.in").readlines()]
NROWS = 128
NCOLS = 8
def binary_search(floor_inc, ceil_exc, directions):
temp_f = floor_inc
temp_c = ceil_exc
for i in range(len(directions)):
pivot = (temp_f + temp_c) // 2
if directions[i] == "F" or directions[i] == "L":
temp_c = pivot
elif directions[i] == "B" or directions[i] == "R":
temp_f = pivot
# print(temp_f, temp_c, directions[i])
if temp_f == temp_c - 1:
return temp_f
else:
return None
def seat_id(boarding_pass):
row = binary_search(0, NROWS, boarding_pass[0:7])
col = binary_search(0, NCOLS, boarding_pass[7:])
return row * 8 + col
seat_ids = [seat_id(bp) for bp in BOARDING_PASSES]
print(max(seat_ids))
prev = None
for seat_id in sorted(seat_ids):
if prev != None:
if seat_id == prev + 2:
print(prev + 1)
break
prev = seat_id
```
#### File: jlucangelio/adventofcode-2020/day12.py
```python
from collections import namedtuple
with open("day12.in") as f:
INSTRUCTIONS = [line.strip() for line in f.readlines()]
Pos = namedtuple("Pos", "x y")
Dir = namedtuple("Dir", "x y")
N = Dir(0, 1)
S = Dir(0, -1)
E = Dir(1, 0)
W = Dir(-1, 0)
directions = {"N": N, "S": S, "E": E, "W": W}
rotations = {"L90": Dir(0, 1),
"L180": Dir(-1, 0),
"L270": Dir(0, -1),
"R90": Dir(0, -1),
"R180": Dir(-1, 0),
"R270": Dir(0, 1)}
def scale(k, d):
return Dir(k * d.x, k * d.y)
def move(p, d):
return Pos(p.x + d.x, p.y + d.y)
def rotate(d, r):
a = d.x
b = d.y
c = r.x
d = r.y
return Dir(a*c - b*d, a*d + b*c)
pos = Pos(0, 0)
direction = E
for ins in INSTRUCTIONS:
a = ins[0]
val = int(ins[1:])
if a == "N" or a == "S" or a == "E" or a == "W" or a == "F":
if a == "F":
d = direction
else:
d = directions[a]
pos = move(pos, scale(val, d))
elif a == "L" or a == "R":
direction = rotate(direction, rotations[ins])
print(abs(pos.x) + abs(pos.y))
pos = Pos(0, 0)
waypoint = Dir(10, 1)
for ins in INSTRUCTIONS:
a = ins[0]
val = int(ins[1:])
if a == "N" or a == "S" or a == "E" or a == "W":
d = directions[a]
waypoint = move(waypoint, scale(val, d))
elif a == "F":
pos = move(pos, scale(val, waypoint))
elif a == "L" or a == "R":
waypoint = rotate(waypoint, rotations[ins])
print(abs(pos.x) + abs(pos.y))
```
#### File: jlucangelio/adventofcode-2020/day13.py
```python
DEPART = 1000677
SCHEDULE = "29,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,41,x,x,x,x,x,x,x,x,x,661,x,x,x,x,x,x,x,x,x,x,x,x,13,17,x,x,x,x,x,x,x,x,23,x,x,x,x,x,x,x,521,x,x,x,x,x,37,x,x,x,x,x,x,x,x,x,x,x,x,19"
buses = [int(t) for t in SCHEDULE.split(",") if t != "x"]
waits = [(((DEPART // b) + 1) * b - DEPART, b) for b in buses]
earliest = min(waits, key=lambda v: v[0])
print(earliest[0] * earliest[1])
buses = SCHEDULE.split(",")
a_s = []
n_s = []
for i, b in enumerate(buses):
if b != "x":
b = int(b)
if i == 0:
print("x = %d mod %d" % (i, b))
a_s.append(i)
n_s.append(b)
else:
if b - i < 0:
r = -i % b
else:
r = b - i
print("x = %d mod %d" % (r, b))
a_s.append(r)
n_s.append(b)
from functools import reduce
def chinese_remainder(n, a):
sum = 0
prod = reduce(lambda a, b: a*b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * mul_inv(p, n_i) * p
return sum % prod
def mul_inv(a, b):
b0 = b
x0, x1 = 0, 1
if b == 1: return 1
while a > 1:
q = a // b
a, b = b, a%b
x0, x1 = x1 - q * x0, x0
if x1 < 0: x1 += b0
return x1
print(chinese_remainder(n_s, a_s))
```
#### File: jlucangelio/adventofcode-2020/day19.py
```python
import re
rules = {}
with open("day19.in") as f:
LINES = f.read().splitlines()
for i, l in enumerate(LINES):
if l == "":
break
# 58: 127 99 | 105 36
number, r = l.split(": ")
if "\"" in r:
rule = r[1]
else:
rule = {tuple([int(r) for r in opt.split()]) for opt in r.split(" | ")}
rules[int(number)] = rule
def build_regexp(n, rules, part2=False):
rule = rules[n]
if type(rule) == str:
return rule
if part2:
#8: 42 | 42 8
#11: 42 31 | 42 11 31
r_42 = build_regexp(42, rules)
if n == 8:
return "(" + r_42 + "+" + ")"
if n == 11:
r_31 = build_regexp(31, rules)
#42{1}31{1}|42{2}31{2}|...
return "(" + "|".join([r_42 + "{%d}" % i + r_31 + "{%d}" % i for i in range(1, 10)]) + ")"
return "(" + "|".join(["".join([build_regexp(c, rules, part2) for c in opt]) for opt in rule]) + ")"
r = re.compile(build_regexp(0, rules))
r_part2 = re.compile(build_regexp(0, rules, part2=True))
count = 0
count_part2 = 0
for l in LINES[i+1:]:
if r.fullmatch(l) is not None:
count += 1
if r_part2.fullmatch(l) is not None:
count_part2 += 1
print(count)
print(count_part2)
``` |
{
"source": "jlucangelio/adventofcode-2021",
"score": 3
} |
#### File: jlucangelio/adventofcode-2021/day15.py
```python
import heapq
with open("day15.in") as f:
risk_level = [[int(l) for l in row] for row in f.read().splitlines()]
# print(len(risk_level), len(risk_level[0]))
# SMALL_INPUT = """1163751742
# 1381373672
# 2136511328
# 3694931569
# 7463417111
# 1319128137
# 1359912421
# 3125421639
# 1293138521
# 2311944581""".splitlines()
# risk_level = [[int(l) for l in row] for row in SMALL_INPUT]
class Pos(object):
def __init__(self, x, y, max_x, max_y):
self.x = x
self.y = y
self.max_x = max_x
self.max_y = max_y
def neighbors(self):
if self.x > 0:
yield Pos(self.x - 1, self.y, self.max_x, self.max_y)
if self.y > 0:
yield Pos(self.x, self.y - 1, self.max_x, self.max_y)
if self.x < self.max_x:
yield Pos(self.x + 1, self.y, self.max_x, self.max_y)
if self.y < self.max_y:
yield Pos(self.x, self.y + 1, self.max_x, self.max_y)
def as_tuple(self):
return (self.x, self.y)
def __str__(self):
return "(%d, %d)" % (self.x, self.y)
def shortest_path(m, source):
q = []
# q = set()
dist = {}
prev = {}
l = len(m)
for i in range(l):
for j in range(l):
p = (i, j)
# q.add(p)
dist[p] = 9 * 2 * l + 1
prev[p] = None
heapq.heappush(q, (0, source))
# dist[source] = 0
while len(q) > 0:
pri, u = heapq.heappop(q)
# u = min(q, key=lambda x: dist[x])
# q.remove(u)
if pri <= dist[u]:
dist[u] = pri
for v in Pos(u[0], u[1], l - 1, l - 1).neighbors():
vt = v.as_tuple()
# if vt in q:
alt = dist[u] + m[v.x][v.y]
if alt < dist[vt]:
dist[vt] = alt
prev[vt] = u
heapq.heappush(q, (dist[vt], vt))
return dist, prev
def increase_and_wrap(rl, i, j, l):
shift = i // l + j // l
# print(i, j, shift)
if rl + shift > 9:
return (rl + shift) % 10 + 1
else:
return rl + shift
dist, prev = shortest_path(risk_level, (0, 0))
print(dist[(99, 99)])
new_m = []
l = len(risk_level)
for i in range(5*l):
row = [0 for _ in range(5*l)]
for j in range(5*l):
row[j] = increase_and_wrap(risk_level[i % l][j % l], i, j, l)
new_m.append(row)
dist, prev = shortest_path(new_m, (0, 0))
print(dist[(l * 5 - 1, l * 5 - 1)])
```
#### File: jlucangelio/adventofcode-2021/day22.py
```python
from copy import copy
from collections import defaultdict, namedtuple
with open("day22.in") as f:
# with open("day22.small.in") as f:
lines = f.read().splitlines()
reactor = defaultdict(bool)
for line in lines[:20]:
# on x=0..45,y=-21..27,z=-28..20
# print(line)
switch, coords = line.split()
x, y, z = coords.split(",")
xmin, xmax = [int(v) for v in x.split("=")[1].split("..")]
ymin, ymax = [int(v) for v in y.split("=")[1].split("..")]
zmin, zmax = [int(v) for v in z.split("=")[1].split("..")]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
for z in range(zmin, zmax + 1):
reactor[(x, y, z)] = True if switch == "on" else False
print(sum([1 if v else 0 for v in reactor.values()]))
class Range(object):
def __init__(self, rmin, rmax):
self.rmin = rmin
self.rmax = rmax
def span(self):
return abs(self.rmax - self.rmin + 1)
def contains(self, block):
return self.rmin <= block <= self.rmax
def overlaps(self, other):
return self.contains(other.rmin) or self.contains(other.rmax)
def includes(self, other):
return self.contains(other.rmin) and self.contains(other.rmax)
def intersection(self, other):
if self.includes(other):
return other
if other.includes(self):
return self
if not self.overlaps(other):
return None
if self.contains(other.rmin) and self.contains(other.rmax):
return Range(other.rmin, other.rmax)
elif self.contains(other.rmin):
return Range(other.rmin, self.rmax)
elif self.contains(other.rmax):
return Range(self.rmin, other.rmax)
def __str__(self):
return "%d..%d" % (self.rmin, self.rmax)
class Cuboid(object):
def __init__(self, xrange, yrange, zrange, on):
self.xrange = xrange
self.yrange = yrange
self.zrange = zrange
self.on = on
self.exclusions = []
def volume(self):
return self.xrange.span() * self.yrange.span() * self.zrange.span()
def exclusive_volume(self):
return (self.xrange.span() * self.yrange.span() * self.zrange.span() -
sum([e.exclusive_volume() for e in self.exclusions]))
def contains(self, other):
return all([self.xrange.includes(other.xrange),
self.yrange.includes(other.yrange),
self.zrange.includes(other.zrange)])
def intersect(self, other):
xint = self.xrange.intersection(other.xrange)
yint = self.yrange.intersection(other.yrange)
zint = self.zrange.intersection(other.zrange)
if all([intersection is not None for intersection in [xint, yint, zint]]):
overlap = Cuboid(xint, yint, zint, other.on)
return overlap
else:
return None
def exclude(self, other):
overlap = self.intersect(other)
if not overlap:
return
for e in self.exclusions:
e.exclude(overlap)
self.exclusions.append(overlap)
return overlap
def num_on(self):
return self.exclusive_volume() if self.on else 0
def __str__(self):
return "Cuboid(x=%s,y=%s,z=%s,%s)" % (self.xrange, self.yrange, self.zrange, self.on)
original_cubes = []
for line in lines:
# on x=0..45,y=-21..27,z=-28..20
# print(line)
switch, coords = line.split()
x, y, z = coords.split(",")
xmin, xmax = [int(v) for v in x.split("=")[1].split("..")]
ymin, ymax = [int(v) for v in y.split("=")[1].split("..")]
zmin, zmax = [int(v) for v in z.split("=")[1].split("..")]
c = Cuboid(Range(xmin, xmax), Range(ymin, ymax), Range(zmin, zmax), switch == "on")
original_cubes.append(c)
for i, ic in enumerate(original_cubes):
for j, jc in enumerate(original_cubes):
if i < j:
original_cubes[i].exclude(original_cubes[j])
print(sum([c.num_on() for c in original_cubes]))
``` |
{
"source": "jlucangelio/adventofcode",
"score": 3
} |
#### File: jlucangelio/adventofcode/day12.py
```python
import sys
def print_regs(regs):
print "A:%d B:%d C:%d D:%d" % (regs["a"],regs["b"],regs["c"],regs["d"])
regs = {
"a": 0,
"b": 0,
"c": 1,
"d": 0
}
pc = 0
instructions = []
steps = 0
with open(sys.argv[1]) as f:
lines = f.readlines()
instructions = [line.strip().split() for line in lines]
while pc < len(instructions):
tokens = instructions[pc]
op = tokens[0]
# print_regs(regs)
# print pc, " ".join(tokens)
if op == "cpy":
src = tokens[1]
dst = tokens[2]
if src in regs:
regs[dst] = regs[src]
else:
regs[dst] = int(src)
offset = 1
elif op == "inc":
regs[tokens[1]] += 1
offset = 1
elif op == "dec":
regs[tokens[1]] -= 1
offset = 1
elif op == "jnz":
val = tokens[1]
offset = int(tokens[2])
if val in regs:
if regs[val] == 0:
offset = 1
elif int(val) == 0:
offset = 1
pc += offset
steps += 1
print regs["a"]
```
#### File: jlucangelio/adventofcode/day14.py
```python
import hashlib
SEED = "yjdafjpo"
# SEED = "abc"
# hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
def all_equal(str):
return all([c == str[0] for c in str])
def repeat_hash(s):
h = hashlib.md5(s).hexdigest()
for _ in range(2016):
h = hashlib.md5(h).hexdigest()
return h
hashes = {}
index = 0
triplets = {}
quintets = {}
keys = {}
while len(keys) < 64:
if index % 1000 == 0:
print "at index %d" % index
if index in hashes:
h = hashes[index]
else:
message = SEED + str(index)
h = repeat_hash(message)
for i, char in enumerate(h):
if i < 2:
continue
possible_run = h[i-2:i+1]
if all_equal(possible_run):
first_triplet = possible_run
if first_triplet in quintets:
for qindex in quintets[first_triplet]:
if qindex > index and qindex - index <= 1000:
print index, first_triplet, qindex
keys[index] = first_triplet
break
if index not in keys:
for qindex in range(index + 1, index + 1001):
if qindex in hashes:
newh = hashes[qindex]
else:
newmessage = SEED + str(qindex)
newh = repeat_hash(newmessage)
for i, char in enumerate(newh):
if i < 4:
continue
possible_run = newh[i-4:i+1]
# if all_equal(possible_run):
# prefix = possible_run[:3]
# if prefix in quintets:
# quintets[prefix].add(qindex)
# else:
# quintets[prefix] = set([qindex])
if possible_run[0] != first_triplet[0]:
continue
if all_equal(possible_run):
quintet = possible_run
print index, first_triplet, qindex, quintet
keys[index] = first_triplet
if first_triplet in quintets:
quintets[first_triplet].add(qindex)
else:
quintets[first_triplet] = set([qindex])
break
# only consider first triplet
break
index += 1
print "final index", index - 1
```
#### File: jlucangelio/adventofcode/day15.py
```python
import sys
# Disc #1 has 13 positions; at time=0, it is at position 1.
# Disc #2 has 19 positions; at time=0, it is at position 10.
# Disc #3 has 3 positions; at time=0, it is at position 2.
# Disc #4 has 7 positions; at time=0, it is at position 1.
# Disc #5 has 5 positions; at time=0, it is at position 3.
# Disc #6 has 17 positions; at time=0, it is at position 5.
def chinese_remainder(n, a):
s = 0
prod = reduce(lambda a, b: a*b, n)
for n_i, a_i in zip(n, a):
p = prod / n_i
s += a_i * mul_inv(p, n_i) * p
return s % prod
def mul_inv(a, b):
b0 = b
x0, x1 = 0, 1
if b == 1: return 1
while a > 1:
q = a / b
a, b = b, a%b
x0, x1 = x1 - q * x0, x0
if x1 < 0: x1 += b0
return x1
discs = {}
with open(sys.argv[1]) as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split()
discnum = int(tokens[1][1:])
npositions = int(tokens[3])
starting_pos = int(tokens[11][:-1])
print discnum, "npositions", npositions, "starting_pos", starting_pos
discs[discnum] = (npositions, starting_pos)
print
delays = []
for i in range(1, len(discs) + 1):
npos, start = discs[i]
delay = (npos - (start + i)) % npos
if delay == 0:
delay = npos
print i, "delay", delay
delays.append(delay)
delay = 0
while True:
if all([delay % discs[i][0] == delays[i-1] for i in range(1, len(discs) + 1)]):
print delay
break
delay += 1
if delay % 100000 == 0:
print delay
print chinese_remainder([discs[i][0] for i in range(1, len(discs) + 1)], delays)
```
#### File: jlucangelio/adventofcode/day3.py
```python
count = 0
def is_triangle(a, b, c):
return a + b > c and a + c > b and b + c > a
with open("day3.input") as f:
lines = f.readlines()
# for line in lines:
# a, b, c = line.strip().split()
# a = int(a)
# b = int(b)
# c = int(c)
for i in range(len(lines) / 3):
line1 = lines[3*i].strip().split()
line2 = lines[3*i+1].strip().split()
line3 = lines[3*i+2].strip().split()
line1a = int(line1[0])
line1b = int(line1[1])
line1c = int(line1[2])
line2a = int(line2[0])
line2b = int(line2[1])
line2c = int(line2[2])
line3a = int(line3[0])
line3b = int(line3[1])
line3c = int(line3[2])
if is_triangle(line1a, line2a, line3a):
count += 1
if is_triangle(line1b, line2b, line3b):
count += 1
if is_triangle(line1c, line2c, line3c):
count += 1
print count
```
#### File: jlucangelio/adventofcode/day4.py
```python
def is_real(name, checksum):
frequencies = {}
for letter in name:
if letter not in frequencies:
frequencies[letter] = 1
continue
frequencies[letter] += 1
sorted_letters = sorted(frequencies.keys(), key=lambda k: (frequencies[k], ord("z") - ord(k)), reverse=True)
return checksum in "".join(sorted_letters)
def rotate(name, sector_id):
decrypted_name = ""
for token in name.split("-"):
for letter in token:
decrypted_name += chr((ord(letter) - ord("a") + int(sector_id)) % 26 + ord("a"))
decrypted_name += " "
print decrypted_name, sector_id
s = 0
with open("day4.input") as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split("-")
name = "".join(tokens[:-1])
sector_id, checksum = tokens[-1].split("[")
checksum = checksum[:-1]
if is_real(name, checksum):
s += int(sector_id)
rotate(name, sector_id)
# print name, sector_id, checksum
print s
```
#### File: jlucangelio/adventofcode/day9p2.py
```python
import sys
def decompress_line(line):
if "(" not in line:
# print "not in", line
return len(line), 1
# There's at least one (marker).
length = 0
in_marker = False
collecting_rep = False
marker = ""
rep = ""
nchars = 0
nreps = 0
for char in line:
if collecting_rep:
rep += char
nchars -= 1
if nchars == 0:
collecting_rep = False
# print "rep", rep
sub_nchars, sub_nreps = decompress_line(rep)
length += sub_nchars * sub_nreps * nreps
nreps = 0
rep = ""
continue
if in_marker:
# Markers don't count for length.
if char == ")":
in_marker = False
nchars, nreps = marker.split("x")
nchars = int(nchars)
nreps = int(nreps)
collecting_rep = True
else:
marker += char
elif not in_marker:
if char == "(":
in_marker = True
marker = ""
else:
length += 1
assert(nchars == 0)
return length, 1
length = 0
with open(sys.argv[1]) as f:
lines = f.readlines()
count = 0
for line in lines:
line = line.strip()
# print line
l, reps = decompress_line(line)
print l * reps
print
``` |
{
"source": "jlucangelio/matasano-crypto-challenges",
"score": 2
} |
#### File: jlucangelio/matasano-crypto-challenges/ecb_cut-n-paste.py
```python
import utils
from collections import OrderedDict
KEY = utils.ByteArray.random(16)
def parse(s):
res = OrderedDict()
fields = s.split("&")
for field in fields:
k, v = field.split("=")
res[k] = v
return res
def unparse(d):
res = []
for k in d:
v = d[k]
if type(v) == str and ("=" in v or "&" in v):
continue
res.append("%s=%s" % (k, v))
return "&".join(res)
def profile_for(email):
return unparse(OrderedDict([("email", email), ("uid", 10), ("role", "user")]))
def encrypt_profile(email):
ptext = utils.ByteArray.fromString(profile_for(email))
ptext.pkcs7pad(utils.AES_BLOCKSIZE_BYTES)
return utils.aes_ecb_encrypt(ptext, KEY)
def decrypt_profile(p):
ptext = utils.aes_ecb_decrypt(p, KEY).asString()
last_byte = ord(ptext[-1])
if last_byte >= 1 and last_byte <= 15:
ptext = ptext[:-last_byte]
return parse(ptext)
if __name__ == "__main__":
s = "foo=bar&baz=qux&zap=zazzle"
print parse(s)
print s == unparse(parse(s))
print "profile_for", profile_for("<EMAIL>") == "email=<EMAIL>&uid=10&role=user"
print decrypt_profile(encrypt_profile("<EMAIL>"))
# len("email=<EMAIL>") == 16
first_block = "<EMAIL>"
role = "admin"
nbytes = 16 - len(role)
ptext = role + (chr(nbytes) * nbytes)
print repr(ptext), len(ptext)
admin_block = encrypt_profile(first_block + ptext).blocksAsHexStrings(16)[1]
# "email=<EMAIL>&uid=10&role=" + "admin" + padding
ptext = "<EMAIL>&uid=10&role="
s = "A" * (16 - (len(ptext) % 16))
print s, len(s + ptext)
blocks = encrypt_profile(s + "@bar.com").blocksAsHexStrings(16)
p = blocks[:-1] + [admin_block]
print decrypt_profile(utils.ByteArray.fromHexString("".join(p)))
```
#### File: jlucangelio/matasano-crypto-challenges/ecb_decryption.py
```python
import utils
import ecb_cbc_detection
UNKNOWN_STRING = """<KEY>"""
KEY = utils.ByteArray.random(16)
def oracle(input_data):
input_ba = utils.ByteArray.fromString(input_data)
input_ba.extend(utils.ByteArray.fromBase64(UNKNOWN_STRING))
input_ba.pkcs7pad(utils.AES_BLOCKSIZE_BYTES)
ret = utils.aes_ecb_encrypt(input_ba, KEY)
return ret
if __name__ == "__main__":
bs = None
padding = None
l = len(oracle(""))
for i in range(64):
ctext = oracle("A" * (i+1))
if len(ctext) != l:
bs = len(ctext) - l
padding = i
break
print "block size", bs
print "padding", padding
if ecb_cbc_detection.detect_ecb_cbc(oracle) != "ECB":
print "Not ECB"
unknown_string = ""
blocks = {}
for unknown_block in range(l / bs):
print unknown_block
for i in range(bs):
blocks.clear()
for c in range(256):
attempt = "A" * (bs - i - 1) + unknown_string + chr(c)
ctext = oracle(attempt)
block = ctext.block(bs, unknown_block).asHexString()
blocks[block] = attempt
input_data = "A" * (bs - i - 1)
ctext = oracle(input_data).block(bs, unknown_block).asHexString()
block = blocks[ctext]
unknown_string += block[-1]
if len(unknown_string) == l - padding:
break
print unknown_string
``` |
{
"source": "jlucartc/MetodosNumericos20182",
"score": 4
} |
#### File: MetodosNumericos20182/etapa 1/metodo_newton_raphson.py
```python
from sympy import*
import math
import time
#import matplotlib.pyplot as plt #plot
x = symbols('x')
print("\n\n-- Método de Newton-Raphson --\n")
fx = sympify((str)(input("Digite f(x): "))) # recebe a função
dfx = diff(fx,x) # calcula a derivada da função recebida
x0 = (float)(input("Digite x0: ")) # recebe o ponto inicial
def phix(a, b, c): # define função phi(x)
return a - (b.subs(x,x0)/c.subs(x,x0))
e = (float)(input("Digite o valor da precisão: ")) # recebe o valor da precisão
c = 0 # inicia contador de iterações
start = time.time()
end = 0
str_k = 'k'
str_x = 'x'
str_fx = 'f(x)'
print("\n{0:^2}".format(str_k) + "|{0:^14}".format(str_x) + "|{0:^14}".format(str_fx))
while (c < 30) :
print("{0:^2}".format(c) + "|{0:^14.6e}".format(x0) + "|{0:^14.6e}".format(fx.subs(x,x0)))
phi = phix(x0,fx,dfx) # calcula phi(x0)
if(abs(fx.subs(x,x0)) < e): # checa se a função em x0 é menor ou igual à precisão desejada
end = time.time() # calcula tempo final
print("-> Raiz = "+str(float(phi))) # imprime a raiz encontrada
print("-> iterações: "+str(c)) # imprime o número de iterações
break # sai do laço
x0 = phi # caso f(x) não seja perto de 0 o suficiente, x0 recebe o valor de phi(x0) e segue no laço
if(c == 29):
end = time.time() # calcula tempo final
print("-> Número máximo de iterações atingido")
c+=1
print("-> Tempo de execução: "+str(end - start)+" segundos\n\n") # imprime o tempo de execução na tela
```
#### File: MetodosNumericos20182/etapa 1/secant_method.py
```python
import sys
import math
from timeit import default_timer as timer
"""
" Para executar este método no caso de teste no qual f(x) = x³ - 9x + 3 e a
" precisão vale 0.0005 (exemplo apresentado nos slides), deve-se definir os
" dois pontos do intervalo (0 e 1) e o número de interações (50) no ato do
" chamamento da função, bem como escrever a seguinte passagem no terminal:
" python .\secant_method.py "x**3 - 9*x + 3" 0.0005
"""
f = lambda x:eval(sys.argv[1]) # recebe a função
e = float(sys.argv[2]) # recebe a precisão
def q(x0, x1): # define a função phi(x)
return (x0 * f(x1) - x1 * f(x0)) / (f(x1) - f(x0))
start = timer()
def secant_method(x0, x1, max_interactions): # função que realiza as iterações
fx0 = f(x0)
fx1 = f(x1)
if abs(fx0) <= e:
return x0
if abs(fx1) <= e:
return x1
else:
k = 1
str_k = 'k'
str_x = 'x'
str_fx = 'f(x)'
print("{0:^2}".format(str_k) + "|{0:^14}".format(str_x) + "|{0:^14}".format(str_fx))
while k <= max_interactions:
x2 = q(x0, x1)
fx2 = f(x2)
print("{0:^2}".format(k) + "|{0:^14.6e}".format(x2) + "|{0:^14.6e}".format(fx2))
if abs(fx2) <= e:
return x2
x0 = x1
x1 = x2
k += 1
return x2
end = timer()
root = secant_method(0,1, 50) # definindo os pontos iniciais e o número máximo de iterações
print("Root: %.6e." % root)
print("Runtime: %.6e seconds." % (end - start))
```
#### File: MetodosNumericos20182/etapa 2/gaussJacobi.py
```python
import numpy as np
from sympy import *
from math import *
from timeit import default_timer as timer
start = None
end = None
def maxXi(Xn,X):
n = None
d = None
for i in range(Xn.shape[0]):
if(np.copy(Xn[i,0]) != 0):
nk = abs(np.copy(Xn[i,0]) - np.copy(X[i,0]))/abs(np.copy(Xn[i,0]))
dk = abs(np.copy(Xn[i,0]))
if n == None or nk > n:
n = nk
if d == None or dk > d:
d = dk
return n/d
A = np.matrix(eval(input("Digite uma matriz : ")))
A = A.astype(float)
X = np.matrix(eval(input("Digite X : ")))
e = float(input("Digite a precisão: "))
B = np.copy(A[:,A.shape[1]-1])
A = np.delete(np.copy(A),A.shape[1]-1,1)
C = np.asmatrix(np.zeros([A.shape[0],A.shape[1]]))
C = C.astype(float)
G = np.copy(B)
for i in range(C.shape[0]):
for j in range(C.shape[1]):
if i != j:
C[i,j] = (np.copy(A[i,j])/np.copy(A[i,i]))*(-1)
G[i,0] = (np.copy(G[i,0]))/(np.copy(A[i,i]))
C[i,i] = 0
Xn = None
z = True
print("Matriz C:\n",C)
print("Matriz G:\n",G)
start = timer()
while(z):
Xn = (np.copy(C) @ np.copy(X)) + np.copy(G)
d = maxXi(np.copy(Xn),np.copy(X))
if(d < e):
z = False
else:
X = np.copy(Xn)
end = timer()
print("Resposta de Gauss-Jacobi: ")
print(Xn)
print("Tempo de execucao total: %e segundos" % (end - start))
``` |
{
"source": "jlucartc/MetodosNumericosTrabalhoExtra20182",
"score": 4
} |
#### File: jlucartc/MetodosNumericosTrabalhoExtra20182/1_3_simpson.py
```python
import numpy as np
from sympy import *
from math import *
def regra_1_3_Simpson(fx,a,h,x):
return ((h)*(fx.subs(x,a) + 4*fx.subs(x,a+h) + fx.subs(x,a+2*h)))/3
x = symbols('x')
fx = sympify(str(input("\n\n Digite a função f(x): ")))
a = float(input(" Digite o começo do intervalo de integração: "))
b = float(input(" Digite o fim do intervalo de integração: "))
t = float(input(" Digite o modo de integração: (0 - sem repetição, 1 - com repetição): "))
if(t == 0):
Ir = integrate(fx,(x,a,b))
Ia = regra_1_3_Simpson(fx,a,abs(b-a)/2,x)
print(" Integral aproximada: "+str(Ia)+"\n\n")
elif(t == 1):
m = int(input(" Digite a quantidade m de intervalos: "))
h = float(abs(b-a)/m)
Et = -(h**5/90)*diff(diff(diff(diff(fx,x),x),x),x)
Es = Et.subs(x,a)
if(m%2 == 0 and m*h == (b-a)):
Ia = 0
for i in range(0,m-1,2):
Es += Et.subs(x,a+h)
Ia += regra_1_3_Simpson(fx,a,h,x)
a += 2*h
print(" Integral aproximada: "+str(Ia)+"\n\n")
else:
print(" Erro: m não é múltiplo de 2\n\n")
```
#### File: jlucartc/MetodosNumericosTrabalhoExtra20182/interpolacao_sistema_linear.py
```python
import numpy as np
from sympy import *
from math import *
from timeit import default_timer as timer
def resolveTriangular(A):
R = np.asmatrix([0]*A.shape[0]) # matriz de valores das icógnitas
R = R.astype(float)
for i in range(A.shape[0]-1,-1,-1):
R[0,i] = np.copy(R[0,i]) + np.copy(A[i,A.shape[0]])
for j in range(A.shape[0]-1,i,-1):
R[0,i] = np.copy(R[0,i]) - np.copy(A[i,j])
R[0,i] = (np.copy(R[0,i])/np.copy(A[i,i]))
A[:,i] = np.copy(A[:,i])*np.copy(R[0,i])
return [A,R]
def pivotacaoParcial(A):
p = 0
m = None
for i in range(0,A.shape[0]):
for j in range(i+1,A.shape[0]):
if A[i,i] < A[j,i]:
p += 1
Temp = np.copy(A[i])
A[i] = np.copy(A[j])
A[j] = np.copy(Temp)
for k in range(i+1,A.shape[0]):
d = np.copy(A[k,i])
d = d/np.copy(A[i,i])
A[k,:] = np.copy(A[k,:]) - (np.copy(A[i,:])*d)
return [A,p]
start = None
end = None
x = np.matrix(eval(input("\n\n Digite o vetor de pontos x0;x1;x1;{...};xn : ")))
x = x.astype(float)
y = np.matrix(eval(input(" Digite o vetor de pontos f(x0);f(x1);{...};f(xn) :")))
x = x.astype(float)
n = x.shape[0] # quantidade de linhas de x = quantidade de pontos
v = np.zeros([n,n])
v[:,0] = 1
for i in range(1,n):
v[:,i] = np.transpose(np.power(np.copy(x),i))
v = np.hstack((np.copy(v),np.copy(y)))
start = timer()
r = resolveTriangular(pivotacaoParcial(v)[0])[1]
end = timer()
st = ""
for j in range(0,r.shape[1]):
if(j == 0): # se for o termo independente
if(r[0,j] > 0):
st += " + "+str(r[0,j])
elif(r[0,j] < 0):
st += " "+str(r[0,j])
elif(j != r.shape[0]-1):
if(r[0,j] > 0):
st += " + "+str(r[0,j])+"*x**"+str(j)
elif(r[0,j] < 0):
st += " "+str(r[0,j])+"*x**"+str(j)
x = symbols('x')
st = sympify(str(st))
x0 = float(input(" Digite um valor de x para ser testado no polinômio da interpolação: "))
print(" y(x) = "+str(st))
print(" y("+str(x0)+"): "+str(st.subs(x,x0)))
print(" Tempo de execucao total: %e segundos\n\n" % (end - start))
```
#### File: jlucartc/MetodosNumericosTrabalhoExtra20182/regra_dos_trapezios.py
```python
import numpy as np
from sympy import *
from math import *
def regraDosTrapezios(fx,a,b,x):
return ((b-a)*(fx.subs(x,a) + fx.subs(x,b)))/2
x = symbols('x')
fx = sympify(str(input("\n\n Digite a função f(x): ")))
a = float(input(" Digite o começo do intervalo de integração: "))
b = float(input(" Digite o fim do intervalo de integração: "))
t = float(input(" Digite o modo de integração: (0 - sem repetição, 1 - com repetição): "))
if(t == 0):
Ia = regraDosTrapezios(fx,a,b,x)
print(" Integral aproximada: "+str(Ia)+"\n\n")
elif(t == 1):
m = int(input(" Digite a quantidade m de divisões: "))
h = abs(b-a)/m
Et = (-h**3/12)*diff(diff(fx,x),x).subs(x,a)
if(m*h < (b-a)):
hEx = (b-a) - h*m
Ia = 0
xk = a
for i in range(0,m+1):
if(i == m):
Et += -(h**3/12)*diff(diff(fx,x),x).subs(x,a+hEx)
Ia += regraDosTrapezios(fx,a,(a+hEx),x)
a += hEx
else:
Et += -(h**3/12)*diff(diff(fx,x),x).subs(x,a+h)
Ia += regraDosTrapezios(fx,a,(a+h),x)
a += h
print(" Integral aproximada: "+str(Ia)+"\n\n")
else:
Ia = 0
xk = a
for i in range(0,m):
Et += -(h**3/12)*(diff(diff(fx,x),x).subs(x,a+h))
Ia += regraDosTrapezios(fx,a,(a+h),x)
a += h
print(" Integral aproximada: "+str(Ia)+"\n\n")
``` |
{
"source": "jlucas-esri/Geospatial-Center-Code",
"score": 3
} |
#### File: misc/dataFrameToDatabase/dataFrameToDatabase.py
```python
import logging
import time
import pandas as pd
from pandas.errors import EmptyDataError
import sqlalchemy
from typing import Union, List
class DataFrameToDatabase:
def __init__(self, df:Union[pd.DataFrame, pd.io.parsers.TextFileReader],
dbTableName:str,
driver:str,
username:str=None,
password:str=<PASSWORD>,
address:str=None,
dbName:str=None,
port:Union[int, str]=None,
query:dict={},
dbEcho:bool=True,
if_exists:str='fail',
index:bool=True,
index_label:str=None,
chunksize:int=None,
dtype:dict=None,
):
#private
self._logger = logging.getLogger('DataFrameToDatabase')
self._logger.setLevel(logging.INFO)
#default value updated in self._validateDataFrame
self._isIterable = False
#pd.DataFrame.to_sql variables
self._index = index
self._index_label = index_label
self._chunksize = chunksize
self._dtype = dtype
self._dbTableName = dbTableName
if if_exists not in ['fail', 'append', 'replace']:
raise ValueError('if_exists must be set to "fails", "replace", or "append"')
elif if_exists == 'replace':
self._logger.warning(f'Table "{dbTableName}" will be overwritten.')
self._if_exists = if_exists
#validating and categorizing it as iterable or not
self._logger.info('Validating DataFrame...')
if self._validateDataFrame(df):
self._df = df
self._logger.info('Valid DataFrame')
#validating db params
self._logger.info('Validating database parameters...')
if self._validateDbParameters(driver, username, password, address, port, dbName, query):
#sqlalchemy.create_engine parameters
self._dbEcho = dbEcho
self._driver = driver
self._username = username
self._password = password
self._address = address
self._port = port
self._dbName = dbName
self._query = query
self._logger.info('Valid database parameters')
# self._logger.info('Inserting data...')
# self.insertData()
def _validateDataFrame(self, df):
"""
Validates that the df isn't empty and categorizes it as iterable (TextFileReader) or not iterable (DataFrame)
"""
#if the df is a standard DataFrame
if type(df) == pd.DataFrame:
self._logger.info('Using regular dataframe')
if df.empty:
self._logger.error('Empty dataframe')
raise EmptyDataError('DataFrame is empty')
self.colsAndTypes = {name: df.dtypes[name] for name in list(df.columns)}
self._isIterable = False
#if the df is a large file read in through chunks
elif type(df) == pd.io.parsers.TextFileReader:
self._logger.info('Using large dataframe')
for chunk in df:
self.colsAndTypes = {name: chunk.dtypes[name] for name in list(chunk.columns)}
if chunk.empty:
self._logger.error('Empty dataframe')
raise EmptyDataError('DataFrame is empty')
break
self._isIterable = True
else:
raise TypeError(f'Invalid df type. Type "{type(df)}" is not a DataFrame or TextFileReader')
return True
def _validateDbParameters(self, driver, username, password, address, port, dbName, query):
"""
Validates database parameters by passing it into create_engine. If it succeeds, the parameters are valid
"""
try:
# if driver:
# driver = '+' + driver
# if port:
# port = ':' + str(port)
# if password:
# password = ':' + password
# if address:
# address = '@' + address
dbUrl = sqlalchemy.engine.URL.create(drivername=driver,
username=username,
password=password,
host=address,
port=port,
database=dbName,
query=query)
self._engine = sqlalchemy.create_engine(dbUrl, echo=self._dbEcho)
except Exception as e:
self._logger.exception(e)
raise e
else:
return True
def insertData(self):
"""
Inserts data into the database depending on the type of DataFrame given
"""
if self._isIterable:
#boolean tracking if function DataFrame.to_sql has been run for any chunk
updated = False
for chunk in self._df:
start = time.time()
if not updated:
chunk.to_sql(name=self._dbTableName,
con=self._engine,
if_exists=self._if_exists,
index=self._index,
index_label=self._index_label,
chunksize=self._chunksize,
dtype=self._dtype)
updated = True
elif updated:
chunk.to_sql(name=self._dbTableName,
con=self._engine,
if_exists='append',
index=self._index,
index_label=self._index_label,
chunksize=self._chunksize,
dtype=self._dtype)
end = time.time()
self._logger.info(f'Chunk inserted in {end-start:.3f} seconds')
elif not self._isIterable:
start = time.time()
self._df.to_sql(name=self._dbTableName,
con=self._engine,
if_exists=self._if_exists,
index=self._index,
index_label=self._index_label,
chunksize=self._chunksize,
dtype=self._dtype)
end = time.time()
self._logger.info(f'DataFrame inserted in {end-start:.3f} seconds')
def main(self):
self._logger.info('Inserting data...')
self.insertData()
``` |
{
"source": "jlucas-esri/GrantsWebScraper",
"score": 2
} |
#### File: src/scraper/htmlRetriever.py
```python
import requests
class HtmlRetriever:
def __init__(self, driver):
self.content = driver.page_source
@staticmethod
def main(driver):
retriever = HtmlRetriever(driver)
return retriever.content
```
#### File: src/scraper/iteratePages.py
```python
import time
import logging
from selenium.common.exceptions import NoSuchElementException
class IteratePages:
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
_logger.addHandler(handler)
def __init__(self, driver):
self.driver = driver
self.driver.switch_to.default_content()
self.driver.switch_to_frame('embeddedIframe')
# @staticmethod
# def run():
# pass
def __iter__(self):
self.iteratedOnce = False
return self
def __next__(self):
try:
if not self.iteratedOnce:
self.iteratedOnce = True
return self.driver
nextButton = self.driver.find_element_by_css_selector('a[title=\"Click Next Page\"]')
nextButton.click()
#leaving time for the page to load
time.sleep(3)
self.driver.switch_to.default_content()
self.driver.switch_to_frame('embeddedIframe')
return self.driver
except NoSuchElementException as e:
self._logger.debug('No more pages left')
raise StopIteration
def _iterate(self):
pass
``` |
{
"source": "JlucasS777/Aprendendo-Python",
"score": 3
} |
#### File: Aprendendo Python/cursopythonudamy/aula_29_funcao_4.py
```python
variavel = 'valor'
def func():
print(variavel)
func()
def func2():
global variavel
variavel = 'Outro valor'
print(variavel)
func()
func2()
print(variavel)
```
#### File: uteis/numeros/__init__.py
```python
def fatorial(n):
f=1
for c in range (1,n+1):
f*=c
return f
def dobro(n):
return n*2
def triplo(n):
return n*3
``` |
{
"source": "jlucete/ParallelWaveGAN-AutoVC",
"score": 2
} |
#### File: autovc/bin/train.py
```python
import argparse
import logging
import os
import sys
from collections import defaultdict
import matplotlib
import numpy as np
import soundfile as sf
import torch
import yaml
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from tqdm import tqdm
import autovc
from autovc.datasets import AudioMelDataset
from autovc.datasets import AudioMelSCPDataset
from parallel_wavegan.utils import read_hdf5
import autovc.models
import autovc.optimizers
# set to avoid matplotlib error in CLI environment
matplotlib.use("Agg")
class Trainer(object):
"""Customized trainer module for AutoVC training."""
def __init__(self,
steps,
epochs,
data_loader,
sampler,
model,
criterion,
optimizer,
scheduler,
config,
device=torch.device("cpu"),
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
data_loader (dict): Dict of data loaders. It must contrain "train" and "dev" loaders.
model (dict): Dict of models. It must contain "generator" and "discriminator" models.
criterion (dict): Dict of criterions. It must contrain "stft" and "mse" criterions.
optimizer (dict): Dict of optimizers. It must contrain "generator" and "discriminator" optimizers.
scheduler (dict): Dict of schedulers. It must contrain "generator" and "discriminator" schedulers.
config (dict): Config dict loaded from yaml format configuration file.
device (torch.deive): Pytorch device instance.
"""
self.steps = steps
self.epochs = epochs
self.data_loader = data_loader
self.sampler = sampler
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.config = config
self.device = device
self.writer = SummaryWriter(config["outdir"])
self.finish_train = False
self.total_train_loss = defaultdict(float)
self.total_eval_loss = defaultdict(float)
def run(self):
"""Run training."""
self.tqdm = tqdm(initial=self.steps,
total=self.config["train_max_steps"],
desc="[train]")
while True:
# train one epoch
self._train_epoch()
# check whether training is finished
if self.finish_train:
break
self.tqdm.close()
logging.info("Finished training.")
def save_checkpoint(self, checkpoint_path):
"""Save checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be saved.
"""
state_dict = {
"optimizer": {
"generator": self.optimizer["generator"].state_dict(),
},
"scheduler": {
"generator": self.scheduler["generator"].state_dict(),
},
"steps": self.steps,
"epochs": self.epochs,
}
if self.config["distributed"]:
state_dict["model"] = {
"generator": self.model["generator"].module.state_dict(),
}
else:
state_dict["model"] = {
"generator": self.model["generator"].state_dict(),
}
if not os.path.exists(os.path.dirname(checkpoint_path)):
os.makedirs(os.path.dirname(checkpoint_path))
torch.save(state_dict, checkpoint_path)
def load_checkpoint(self, checkpoint_path, load_only_params=False):
"""Load checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be loaded.
load_only_params (bool): Whether to load only model parameters.
"""
state_dict = torch.load(checkpoint_path, map_location="cpu")
if self.config["distributed"]:
self.model["generator"].module.load_state_dict(state_dict["model"]["generator"])
else:
self.model["generator"].load_state_dict(state_dict["model"]["generator"])
if not load_only_params:
self.steps = state_dict["steps"]
self.epochs = state_dict["epochs"]
self.optimizer["generator"].load_state_dict(state_dict["optimizer"]["generator"])
self.scheduler["generator"].load_state_dict(state_dict["scheduler"]["generator"])
def _train_step(self, batch):
"""Train model one step."""
# parse batch
x, y = batch
x = tuple([x_.to(self.device) for x_ in x])
y = y.to(self.device)
x_real = x[-2]
emb_org = x[-1]
#######################
# Generator #
#######################
x_real = x_real.transpose(1,2)
# Identity mapping loss
x_identic, x_identic_psnt, code_real = self.model["generator"](x_real, emb_org, emb_org)
g_loss_id = self.criterion["mse"](x_real, x_identic)
g_loss_id_psnt = self.criterion["mse"](x_real, x_identic_psnt)
self.total_train_loss["train/identity_mapping_loss"] += g_loss_id.item()
self.total_train_loss["train/identity_mapping_loss_psnt"] += g_loss_id_psnt.item()
# Code Semantic loss
code_reconst = self.model["generator"](x_identic_psnt, emb_org, None)
g_loss_cd = self.criterion["l1"](code_real, code_reconst)
self.total_train_loss["train/code_semantic_loss"] += g_loss_cd.item()
# Total generator loss
g_loss = g_loss_id + g_loss_id_psnt + self.config["lambda_cd"] * g_loss_cd
self.total_train_loss["train/generator_loss"] += g_loss.item()
# update generator
self.optimizer["generator"].zero_grad()
g_loss.backward()
if self.config["generator_grad_norm"] > 0:
torch.nn.utils.clip_grad_norm_(
self.model["generator"].parameters(),
self.config["generator_grad_norm"])
self.optimizer["generator"].step()
self.scheduler["generator"].step()
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _train_epoch(self):
"""Train model one epoch."""
for train_steps_per_epoch, batch in enumerate(self.data_loader["train"], 1):
# train one step
self._train_step(batch)
# check interval
if self.config["rank"] == 0:
self._check_log_interval()
self._check_eval_interval()
self._check_save_interval()
# check whether training is finished
if self.finish_train:
return
# update
self.epochs += 1
self.train_steps_per_epoch = train_steps_per_epoch
logging.info(f"(Steps: {self.steps}) Finished {self.epochs} epoch training "
f"({self.train_steps_per_epoch} steps per epoch).")
# needed for shuffle in distributed training
if self.config["distributed"]:
self.sampler["train"].set_epoch(self.epochs)
@torch.no_grad()
def _eval_step(self, batch):
"""Evaluate model one step."""
# parse batch
x, y = batch
x = tuple([x_.to(self.device) for x_ in x])
y = y.to(self.device)
x_real = x[-2]
emb_org = x[-1]
#######################
# Generator #
#######################
x_real = x_real.transpose(1,2)
x_identic, x_identic_psnt, code_real = self.model["generator"](x_real, emb_org, emb_org)
# Identity mapping loss
g_loss_id = self.criterion["mse"](x_real, x_identic)
g_loss_id_psnt = self.criterion["mse"](x_real, x_identic_psnt)
self.total_eval_loss["eval/identity_mapping_loss"] += g_loss_id.item()
self.total_eval_loss["eval/identity_mapping_loss_psnt"] += g_loss_id_psnt.item()
# Code Semantic loss
code_reconst = self.model["generator"](x_identic_psnt, emb_org, None)
g_loss_cd = self.criterion["l1"](code_real, code_reconst)
self.total_eval_loss["eval/code_semantic_loss"] += g_loss_cd.item()
# Total generator loss
g_loss = g_loss_id + g_loss_id_psnt + self.config["lambda_cd"] * g_loss_cd
self.total_eval_loss["eval/generator_loss"] += g_loss.item()
def _eval_epoch(self):
"""Evaluate model one epoch."""
logging.info(f"(Steps: {self.steps}) Start evaluation.")
# change mode
for key in self.model.keys():
self.model[key].eval()
# calculate loss for each batch
for eval_steps_per_epoch, batch in enumerate(tqdm(self.data_loader["dev"], desc="[eval]"), 1):
# eval one step
self._eval_step(batch)
# save intermediate result
if eval_steps_per_epoch == 1:
self._genearete_and_save_intermediate_result(batch)
logging.info(f"(Steps: {self.steps}) Finished evaluation "
f"({eval_steps_per_epoch} steps per epoch).")
# average loss
for key in self.total_eval_loss.keys():
self.total_eval_loss[key] /= eval_steps_per_epoch
logging.info(f"(Steps: {self.steps}) {key} = {self.total_eval_loss[key]:.4f}.")
# record
self._write_to_tensorboard(self.total_eval_loss)
# reset
self.total_eval_loss = defaultdict(float)
# restore mode
for key in self.model.keys():
self.model[key].train()
@torch.no_grad()
def _genearete_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
# FIXME(Jaegeon): implement ParallelWaveGAN-AutoVC end-to-end intermediate result.
'''
# delayed import to avoid error related backend error
import matplotlib.pyplot as plt
# generate
x_batch, y_batch = batch
x_batch = tuple([x.to(self.device) for x in x_batch])
y_batch = y_batch.to(self.device)
y_batch_ = self.model["generator"](*x_batch)
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 1):
# convert to ndarray
y, y_ = y.view(-1).cpu().numpy(), y_.view(-1).cpu().numpy()
# plot figure and save it
figname = os.path.join(dirname, f"{idx}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavfile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(figname.replace(".png", "_ref.wav"), y,
self.config["sampling_rate"], "PCM_16")
sf.write(figname.replace(".png", "_gen.wav"), y_,
self.config["sampling_rate"], "PCM_16")
if idx >= self.config["num_save_intermediate_results"]:
break
'''
def _write_to_tensorboard(self, loss):
"""Write to tensorboard."""
for key, value in loss.items():
self.writer.add_scalar(key, value, self.steps)
def _check_save_interval(self):
if self.steps % self.config["save_interval_steps"] == 0:
self.save_checkpoint(
os.path.join(self.config["outdir"], f"checkpoint-{self.steps}steps.pkl"))
logging.info(f"Successfully saved checkpoint @ {self.steps} steps.")
def _check_eval_interval(self):
if self.steps % self.config["eval_interval_steps"] == 0:
self._eval_epoch()
def _check_log_interval(self):
if self.steps % self.config["log_interval_steps"] == 0:
for key in self.total_train_loss.keys():
self.total_train_loss[key] /= self.config["log_interval_steps"]
logging.info(f"(Steps: {self.steps}) {key} = {self.total_train_loss[key]:.4f}.")
self._write_to_tensorboard(self.total_train_loss)
# reset
self.total_train_loss = defaultdict(float)
def _check_train_finish(self):
if self.steps >= self.config["train_max_steps"]:
self.finish_train = True
class Collater(object):
"""Customized collater for Pytorch DataLoader in training."""
def __init__(self,
batch_max_steps=20480,
hop_size=256,
aux_context_window=0,
use_noise_input=False,
len_crop=128,
):
"""Initialize customized collater for PyTorch DataLoader.
Args:
batch_max_steps (int): The maximum length of input signal in batch.
hop_size (int): Hop size of auxiliary features.
aux_context_window (int): Context window size for auxiliary feature conv.
use_noise_input (bool): Whether to use noise input.
len_crop (int): Length of single crop.
"""
if batch_max_steps % hop_size != 0:
batch_max_steps += -(batch_max_steps % hop_size)
assert batch_max_steps % hop_size == 0
self.batch_max_steps = batch_max_steps
self.batch_max_frames = batch_max_steps // hop_size
self.hop_size = hop_size
self.aux_context_window = aux_context_window
self.use_noise_input = use_noise_input
self.len_crop = len_crop
# set useful values in random cutting
self.start_offset = 0
self.end_offset = -(self.batch_max_frames)
self.mel_threshold = self.batch_max_frames
def __call__(self, batch):
"""Convert into batch tensors.
Args:
batch (list): list of tuple of the pair of audio, features and speaker embedding.
Returns:
Tensor: Gaussian noise batch (B, 1, T).
Tensor: Auxiliary feature batch (B, C, T'), where
T = (T' - 2 * aux_context_window) * hop_size.
Tensor: Speaker Embedding (B, spk_emb_dim, 1)
Tensor: Target signal batch (B, 1, T).
"""
# check length
batch = [self._adjust_length(*b) for b in batch if len(b[1]) > self.mel_threshold]
xs, cs, es = [b[0] for b in batch], [b[1] for b in batch], [b[2] for b in batch]
# make batch with random cut
c_lengths = [len(c) for c in cs]
start_frames = np.array([np.random.randint(
self.start_offset, cl + self.end_offset) for cl in c_lengths])
x_starts = start_frames * self.hop_size
x_ends = x_starts + self.batch_max_steps
y_batch = [x[start: end] for x, start, end in zip(xs, x_starts, x_ends)]
c_batch = []
for tmp in cs:
tmp = np.array(tmp)
if tmp.shape[0] < self.len_crop:
len_pad = self.len_crop - tmp.shape[0]
c_batch.append(np.pad(tmp, ((0,len_pad),(0,0)), 'constant'))
elif tmp.shape[0] > self.len_crop:
left = np.random.randint(tmp.shape[0]-self.len_crop)
c_batch.append(tmp[left:left+self.len_crop, :])
else:
c_batch.append(tmp)
e_batch = es
# convert each batch to tensor, asuume that each item in batch has the same length
y_batch = torch.tensor(y_batch, dtype=torch.float).unsqueeze(1) # (B, 1, T)
c_batch = torch.tensor(c_batch, dtype=torch.float).transpose(2, 1) # (B, C, T')
e_batch = torch.tensor(e_batch, dtype=torch.float) # (B, E, 1)
# make input noise signal batch tensor
if self.use_noise_input:
z_batch = torch.randn(y_batch.size()) # (B, 1, T)
return (z_batch, c_batch, e_batch), y_batch
else:
return (c_batch, e_batch,), y_batch
def _adjust_length(self, x, c, e):
"""Adjust the audio and feature lengths.
Note:
Basically we assume that the length of x and c are adjusted
through preprocessing stage, but if we use other library processed
features, this process will be needed.
"""
if len(x) < len(c) * self.hop_size:
x = np.pad(x, (0, len(c) * self.hop_size - len(x)), mode="edge")
elif len(x) > len(c) * self.hop_size:
x = x[:-(len(x)-len(c)*self.hop_size)]
# check the legnth is valid
if len(x) != len(c) * self.hop_size:
logging.warning(f"len x : {len(x)}, len c : {len(c)}")
#assert len(x) == len(c) * self.hop_size
return x, c, e
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train Parallel WaveGAN (See detail in parallel_wavegan/bin/train.py).")
parser.add_argument("--train-wav-scp", default=None, type=str,
help="kaldi-style wav.scp file for training. "
"you need to specify either train-*-scp or train-dumpdir.")
parser.add_argument("--train-feats-scp", default=None, type=str,
help="kaldi-style feats.scp file for training. "
"you need to specify either train-*-scp or train-dumpdir.")
parser.add_argument("--train-spkemb-scp", default=None, type=str,
help="kaldi-style spkemb.scp file for training. "
"you need to specify either train-*-scp or train-dumpdir.")
parser.add_argument("--train-segments", default=None, type=str,
help="kaldi-style segments file for training.")
parser.add_argument("--train-dumpdir", default=None, type=str,
help="directory including training data. "
"you need to specify either train-*-scp or train-dumpdir.")
parser.add_argument("--dev-wav-scp", default=None, type=str,
help="kaldi-style wav.scp file for validation. "
"you need to specify either dev-*-scp or dev-dumpdir.")
parser.add_argument("--dev-feats-scp", default=None, type=str,
help="kaldi-style feats.scp file for vaidation. "
"you need to specify either dev-*-scp or dev-dumpdir.")
parser.add_argument("--dev-spkemb-scp", default=None, type=str,
help="kaldi-style spkemb.scp file for vaidation. "
"you need to specify either dev-*-scp or dev-dumpdir.")
parser.add_argument("--dev-segments", default=None, type=str,
help="kaldi-style segments file for validation.")
parser.add_argument("--dev-dumpdir", default=None, type=str,
help="directory including development data. "
"you need to specify either dev-*-scp or dev-dumpdir.")
parser.add_argument("--outdir", type=str, required=True,
help="directory to save checkpoints.")
parser.add_argument("--config", type=str, required=True,
help="yaml format configuration file.")
parser.add_argument("--pretrain", default="", type=str, nargs="?",
help="checkpoint file path to load pretrained params. (default=\"\")")
parser.add_argument("--resume", default="", type=str, nargs="?",
help="checkpoint file path to resume training. (default=\"\")")
parser.add_argument("--verbose", type=int, default=1,
help="logging level. higher is more logging. (default=1)")
parser.add_argument("--rank", "--local_rank", default=0, type=int,
help="rank for distributed training. no need to explictly specify.")
args = parser.parse_args()
args.distributed = False
if not torch.cuda.is_available():
device = torch.device("cpu")
else:
device = torch.device("cuda")
# effective when using fixed size inputs
# see https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(args.rank)
# setup for distributed training
# see example: https://github.com/NVIDIA/apex/tree/master/examples/simple/distributed
if "WORLD_SIZE" in os.environ:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1
if args.distributed:
torch.distributed.init_process_group(backend="nccl", init_method="env://")
# suppress logging for distributed training
if args.rank != 0:
sys.stdout = open(os.devnull, "w")
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
else:
logging.basicConfig(
level=logging.WARN, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if (args.train_feats_scp is not None and args.train_dumpdir is not None) or \
(args.train_feats_scp is None and args.train_dumpdir is None):
raise ValueError("Please specify either --train-dumpdir or --train-*-scp.")
if (args.dev_feats_scp is not None and args.dev_dumpdir is not None) or \
(args.dev_feats_scp is None and args.dev_dumpdir is None):
raise ValueError("Please specify either --dev-dumpdir or --dev-*-scp.")
if (args.train_spkemb_scp is not None and args.train_dumpdir is not None) or \
(args.train_spkemb_scp is None and args.train_dumpdir is None):
raise ValueError("Please specify either --train-dumpdir or --train-*-scp.")
if (args.dev_spkemb_scp is not None and args.dev_dumpdir is not None) or \
(args.dev_spkemb_scp is None and args.dev_dumpdir is None):
raise ValueError("Please specify either --dev-dumpdir or --dev-*-scp.")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = autovc.__version__ # add version info
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
#FIXME(<NAME>): This is not for AutoVC
mel_length_threshold = config["batch_max_steps"] // config["hop_size"] + \
2 * config["autovc_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if args.train_wav_scp is None or args.dev_wav_scp is None:
if config["format"] == "hdf5":
audio_query, mel_query, spkemb_query = "*.h5", "*.h5", "*.h5"
audio_load_fn = lambda x: read_hdf5(x, "wave") # NOQA
mel_load_fn = lambda x: read_hdf5(x, "feats") # NOQA
spkemb_load_fn = lambda x: read_hdf5(x, "spkemb")
elif config["format"] == "npy":
audio_query, mel_query, spkemb_query = "*-wave.npy", "*-feats.npy", "*-spkemb.npy"
audio_load_fn = np.load
mel_load_fn = np.load
spkemb_load_fn = np.load
else:
raise ValueError("support only hdf5 or npy format.")
if args.train_dumpdir is not None:
train_dataset = AudioMelDataset(
root_dir=args.train_dumpdir,
audio_query=audio_query,
mel_query=mel_query,
spkemb_query=spkemb_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
spkemb_load_fn=spkemb_load_fn,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
train_dataset = AudioMelSCPDataset(
wav_scp=args.train_wav_scp,
feats_scp=args.train_feats_scp,
spkemb_scp=args.train_spkemb_scp,
segments=args.train_segments,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
logging.info(f"The number of training files = {len(train_dataset)}.")
if args.dev_dumpdir is not None:
dev_dataset = AudioMelDataset(
root_dir=args.dev_dumpdir,
audio_query=audio_query,
mel_query=mel_query,
spkemb_query=spkemb_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
spkemb_load_fn=spkemb_load_fn,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
else:
dev_dataset = AudioMelSCPDataset(
wav_scp=args.dev_wav_scp,
feats_scp=args.dev_feats_scp,
spkemb_scp=args.dev_spkemb_scp,
segments=args.dev_segments,
mel_length_threshold=mel_length_threshold,
allow_cache=config.get("allow_cache", False), # keep compatibility
)
logging.info(f"The number of development files = {len(dev_dataset)}.")
dataset = {
"train": train_dataset,
"dev": dev_dataset,
}
# get data loader
collater = Collater(
batch_max_steps=config["batch_max_steps"],
hop_size=config["hop_size"],
# keep compatibility
aux_context_window=config["autovc_generator_params"].get("aux_context_window", 0),
# keep compatibility
use_noise_input=config.get(
"generator_type", "ParallelWaveGANGenerator") != "MelGANGenerator",
len_crop=config["len_crop"],
)
sampler = {"train": None, "dev": None}
if args.distributed:
# setup sampler for distributed training
from torch.utils.data.distributed import DistributedSampler
sampler["train"] = DistributedSampler(
dataset=dataset["train"],
num_replicas=args.world_size,
rank=args.rank,
shuffle=True,
)
sampler["dev"] = DistributedSampler(
dataset=dataset["dev"],
num_replicas=args.world_size,
rank=args.rank,
shuffle=False,
)
data_loader = {
"train": DataLoader(
dataset=dataset["train"],
shuffle=False if args.distributed else True,
collate_fn=collater,
batch_size=config["batch_size"],
num_workers=config["num_workers"],
sampler=sampler["train"],
pin_memory=config["pin_memory"],
),
"dev": DataLoader(
dataset=dataset["dev"],
shuffle=False if args.distributed else True,
collate_fn=collater,
batch_size=config["batch_size"],
num_workers=config["num_workers"],
sampler=sampler["dev"],
pin_memory=config["pin_memory"],
),
}
# define models and optimizers
generator_class = getattr(
autovc.models,
# keep compatibility
config.get("generator_type", "AutoVCGenerator"),
)
setattr(generator_class, 'num_mels', config["num_mels"])
model = {
"generator": generator_class(
**config["autovc_generator_params"]).to(device),
}
criterion = {
"l1": torch.nn.L1Loss().to(device),
"mse": torch.nn.MSELoss().to(device),
}
generator_optimizer_class = getattr(
autovc.optimizers,
# keep compatibility
config.get("generator_optimizer_type", "RAdam"),
)
optimizer = {
"generator": generator_optimizer_class(
model["generator"].parameters(),
**config["generator_optimizer_params"],
),
}
generator_scheduler_class = getattr(
torch.optim.lr_scheduler,
# keep compatibility
config.get("generator_scheduler_type", "StepLR"),
)
scheduler = {
"generator": generator_scheduler_class(
optimizer=optimizer["generator"],
**config["generator_scheduler_params"],
),
}
if args.distributed:
# wrap model for distributed training
try:
from apex.parallel import DistributedDataParallel
except ImportError:
raise ImportError("apex is not installed. please check https://github.com/NVIDIA/apex.")
model["generator"] = DistributedDataParallel(model["generator"])
logging.info(model["generator"])
# define trainer
trainer = Trainer(
steps=0,
epochs=0,
data_loader=data_loader,
sampler=sampler,
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
config=config,
device=device,
)
# load pretrained parameters from checkpoint
if len(args.pretrain) != 0:
trainer.load_checkpoint(args.pretrain, load_only_params=True)
logging.info(f"Successfully load parameters from {args.pretrain}.")
# resume from checkpoint
if len(args.resume) != 0:
trainer.load_checkpoint(args.resume)
logging.info(f"Successfully resumed from {args.resume}.")
# run training loop
try:
trainer.run()
except KeyboardInterrupt:
trainer.save_checkpoint(
os.path.join(config["outdir"], f"checkpoint-{trainer.steps}steps.pkl"))
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
``` |
{
"source": "jlucier/6.S062-car-swarm",
"score": 3
} |
#### File: 6.S062-car-swarm/test/tests.py
```python
import sys
sys.path.append('../')
from car import Car
from driver import Preset
def test_collisions():
car = Car()
car._vicon_client.start()
time.sleep(2)
print "Running..."
try:
car._detect_collisions()
except KeyboardInterrupt:
car._kill = True
time.sleep(.5)
print "Done"
def test_system():
straight = raw_input("Drive straight? ")
path = Preset.STRAIGHT if straight == 'y' else None
if not path:
path = Preset.COUNTER_CLOCKWISE_CIRCLE if raw_input("Counter clockwise? ") == 'y' else path
car = Car(path=path)
car.start()
try:
raw_input("Running... press enter to start driving\n")
car._driver.go()
raw_input('Press enter to stop...')
except KeyboardInterrupt:
pass
car.stop()
print "Done"
def test_bystander():
# no collision detection
car = Car()
car._vicon_client.start()
car._talker.start()
car._message_worker.start()
car._main_worker.start()
try:
raw_input("Running... press enter to stop\n")
except KeyboardInterrupt:
pass
car._driver.stop()
car._driver.destroy()
car._kill = True
car._vicon_client.stop()
car._talker.stop()
car._message_worker.join()
car._main_worker.join()
print "Done"
def main():
if len(sys.argv) < 2:
test_system()
elif sys.argv[1] == 'collisions':
test_collision()
elif sys.argv[1] == 'bystander':
test_bystander()
else:
print "Usage: collisions / cp / mp"
if __name__ == '__main__':
main()
``` |
{
"source": "jluckenbaugh2/Deepcut",
"score": 3
} |
#### File: Deepcut/src/SendFinal.py
```python
import socket
import numpy as np
# also I had to open the port on the rpi and on my computer
# for opening ports on linux (rpi) see https://raspberrypi.stackexchange.com/questions/69123/how-to-open-a-port
# for opening ports on windows see https://www.tomshardware.com/news/how-to-open-firewall-ports-in-windows-10,36451.html
# for opening ports on macos see https://www.macworld.co.uk/how-to/how-open-specific-ports-in-os-x-1010-firewall-3616405/
def sendRPI(filename):
s = socket.socket()
host = socket.gethostbyname("192.168.1.7") # ip address of rpi
port = 3333
s.connect((host, port))
print("Connected")
filename_s = filename.split('_')
for i in range(len(filename_s)):
if 'bpm' in filename_s[i]:
temp = filename_s[i].split('=')
bpm = int(temp[-1])
bpmB = bpm.to_bytes(64, 'big')
s.send(bpmB)
## Send file after connecting
numB = 1024
# filename = "TTSRap.mp4"
filenameA = "Accepted.txt"
file = open(filename, 'rb')
file_data = file.read()
lengthB = len(file_data)
numkB = int(np.ceil(lengthB/numB))
numkB_inB = numkB.to_bytes(64, 'big')
s.send(numkB_inB)
for k in range(numkB):
if (k+1)*numB < lengthB:
data = file_data[k*numB:((k+1)*numB)]
else:
data = file_data[k*numB:lengthB]
s.send(data)
print("%d/%d Sent" % (k, numkB))
fileA = open(filenameA, 'wb')
file_dataA = s.recv(1024)
fileA.write(file_dataA)
fileA.close()
print("%d/%d Accepted" % (k, numkB))
print("File has been sent")
``` |
{
"source": "Jluct/PyMassMailer",
"score": 3
} |
#### File: PyMassMailer/PyMassMailer/ConfigData.py
```python
from configparser import ConfigParser
import os
import sys
class ConfigData:
conf = ''
parser = ''
def __init__(self, conf='conf.ini'):
if os.path.exists(conf):
self.conf = conf
else:
print("Config not exist for path: " + conf)
sys.exit(1)
self.parser = ConfigParser()
self.parser.read(self.conf)
def __del__(self):
pass
def get(self, section, field):
if not self.parser.has_section(section):
return False
return self.parser.get(section, field)
def get_section(self, section):
section_data = {}
options = self.parser.options(section)
for option in options:
if self.parser.get(section, option):
section_data[option] = self.parser.get(section, option)
else:
section_data[option] = None
return section_data
``` |
{
"source": "jludvice/django-exchange",
"score": 2
} |
#### File: django-exchange/exchange/managers.py
```python
from django.db import models
class ExchangeRateManager(models.Manager):
def get_query_set(self):
return super(ExchangeRateManager, self).get_query_set()\
.select_related('source', 'target')
def get_rate(self, source_currency, target_currency):
return self.get(source__code=source_currency,
target__code=target_currency).rate
``` |
{
"source": "jludwiczak/rossmann-toolbox",
"score": 3
} |
#### File: rossmann_toolbox/utils/dssp.py
```python
from Bio.PDB.DSSP import _make_dssp_dict
import pandas as pd
import gzip, os
def run_dssp(pdb_path, dssp_bin=None):
dssp_path = f'{pdb_path.rstrip(".pdb")}.dssp'
os.system(f'{dssp_bin} {pdb_path} > {dssp_path}')
dssp_data = parse_dssp_output(dssp_path)
return dssp_data
def parse_dssp_output(dssp_fn, use_gzip=False):
'''
extracts secondary structure labels frm dssp file
'''
if use_gzip:
f = gzip.open(dssp_fn, 'rt')
else:
f = open(dssp_fn, 'r')
lines = [line.rstrip() for line in f.readlines()[28:]]
f.close()
dssp = {int(line[0:5].strip()): {'pdb_num': line[5:11].strip(), 'pdb_chain': line[11:12].strip(),
'pdb_resn': line[13].strip(), 'pdb_ss': line[16:17]} for line in lines}
dssp = pd.DataFrame.from_dict(dssp, orient='index')
return dssp
```
#### File: rossmann_toolbox/utils/embedder.py
```python
import pandas as pd
#import h5py
class Embedder:
def __init__(self, cuda_device=-1, tokens_per_batch=16000):
"""
Wrapper for efficient embedding of protein sequences with various embedding methods
:param cuda_device: Index of the CUDA device to use when embedding (-1 if CPU)
:param tokens_per_batch: Number of tokens (amino acids per encoded sequence batch) - depends on available GPU VRAM
"""
self.cuda_device = cuda_device
self.tokens_per_batch = tokens_per_batch
@staticmethod
def _validate_input(data):
"""
Validates input pd.DataFrame with sequences that are to embedded
:param data: input pd.DataFrame
:return:
"""
# Validate input DataFrame
if not isinstance(data, pd.DataFrame):
raise TypeError('Data must be a pandas DataFrame!')
if 'sequence' not in data.columns:
raise KeyError('DataFrame must contain sequence column!')
def _batch_df(self, data):
"""
Mark the input DataFrame so that each batch contains not more than 'self.tokens_per_batch' amino acids.
:param data: input DataFrame
:return: copy of the input DataFrame with additional 'batch' column
"""
b_df = data.copy()
b_df['seq_len'] = b_df['sequence'].apply(len) # Calculate length of each sequence in DataFrame
b_df = b_df.sort_values(by='seq_len') # Sort sequences by length
b_df['cum_seq_len'] = b_df['seq_len'].cumsum() # Calculate cumulative sequence lengths to split into batches
b_df['batch'] = b_df['cum_seq_len'] // self.tokens_per_batch
return b_df
def _encode_batch_api(self, sequences):
raise NotImplementedError('Fetching embedding via API is not available for the selected model!')
def encode(self, data, out_fn=None, api=False):
if out_fn is not None:
f = h5py.File(out_fn, 'w')
self._validate_input(data)
df = self._batch_df(data)
results = {}
for batch in df['batch'].unique():
b_df = df[df['batch'] == batch]
sequences = b_df['sequence'].tolist()
embs = self._encode_batch_api(sequences) if api else self._encode_batch(sequences)
for emb, idx in zip(embs, b_df.index.values):
if out_fn is not None:
f.create_dataset(idx, data=emb)
else:
results[idx] = emb
if out_fn is not None:
f.close()
else:
return results
```
#### File: rossmann_toolbox/utils/graph_loader_opt.py
```python
import copy
import random
from packaging import version
import torch
import dgl
import numpy as np
import pandas as pd
import networkx as nx
from .bio_params import LABEL_DICT, ACIDS_MAP_DEF, SS_MAP_EXT, CM_THRESHOLD
def collate(samples):
(graphs, labels) = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
return batched_graph, torch.tensor(labels)
class GraphECMLoaderBalanced(torch.utils.data.Dataset):
needed_columns = {'seq', 'secondary'}
threshold = CM_THRESHOLD
FLOAT_DTYPE = np.float32
counter = 0
res_info = {}; graphs = {}; edge_features = {}; node_features = {}; labels_encoded = {}
def __init__(self, frame, contact_maps, edge_features, foldx_info, balance_classes=False, **kw_args):
'''
params:
frame (pd.DataFrame) with columns: seq, alnpositions, simplified_cofactor(can be none)
ss (np.ndarray) with locations of ss
adjecency_matrix (np.ndarray) regular adjecency matrix used in defining graph structure
device (str) cpu default
cofactor (str)
use_ohe_features (bool) tells if add one hot encoding residue to node features
add_epsilon (False, or float) if float adds add_epsilon value to empty alignment field
'''
columns = frame.columns.tolist()
assert not (self.needed_columns - set(columns)), f'no column(s) {self.needed_columns - set(columns)}'
assert isinstance(frame, pd.DataFrame), 'frame should be DataFrame'
assert isinstance(contact_maps, dict)
assert isinstance(edge_features, dict)
assert isinstance(foldx_info, dict)
assert frame.shape[0] != 0, 'given empty frame'
available_indices = list(foldx_info.keys())
if 'simplified_cofactor' not in frame.columns.tolist():
frame['simplified_cofactor'] = 'NAD'
self.balance_classes = balance_classes
self.indices = []
frame = frame[frame.index.isin(available_indices)]
for i, (idx, row) in enumerate(frame.iterrows()):
seq_emb = np.asarray([ACIDS_MAP_DEF[s] for s in row['seq']], dtype=np.int64)
sec_emb = np.asarray([SS_MAP_EXT[s] for s in row['secondary']], dtype=np.int64)
self.res_info[idx] = (seq_emb, sec_emb)
self.graphs[idx] = contact_maps[idx][0] < self.threshold
edge_dist_based = np.nan_to_num(edge_features[idx])
edge_foldx_based = foldx_info[idx]['edge_data']
self.edge_features[idx] = np.concatenate([edge_dist_based, edge_foldx_based], axis=1).astype(self.FLOAT_DTYPE)
self.node_features[idx] = foldx_info[idx]['node_data'].astype(self.FLOAT_DTYPE)
#print(self.node_features[idx], foldx_info)
self.labels_encoded[idx] = LABEL_DICT[row['simplified_cofactor']]
self.indices.append(idx)
self.NAD_indices, self.non_NAD_indices = [], []
for idx, cof in self.labels_encoded.items():
if cof == 0:
self.NAD_indices.append(idx)
else:
self.non_NAD_indices.append(idx)
#self._validate_dicts()
self._map_new_()
#self._fix_samples_balancing_()
self.num_samples_per_epoch = len(self.indices)
if self.num_samples_per_epoch == 0:
print(len(self.labels_encoded))
print(len(self.NAD_indices), len(self.non_NAD_indices))
raise ValueError('zero length loader')
def __len__(self):
return self.num_samples_per_epoch
def __getitem__(self, idx):
idx_m = self.index_map[idx]
features_edge = self.edge_features[idx_m]
features_node = self.node_features[idx_m]
g = dgl.from_networkx(nx.Graph(self.graphs[idx_m]))
seq_res_nb, sec_res_nb = self.res_info[idx_m]
if self.balance_classes:
if self.counter == self.num_samples_per_epoch:
self._fix_samples_balancing_()
else:
self.counter += 1
g.ndata['residues'] = torch.from_numpy(seq_res_nb)
g.ndata['secondary'] = torch.from_numpy(sec_res_nb)
g.edata['features'] = torch.from_numpy(features_edge)
g.ndata['features'] = torch.from_numpy(features_node)
return g, self.labels_encoded[idx_m]
def _fix_samples_balancing_(self):
if self.balance_classes:
NAD_subsample = len(self.NAD_indices)
NAD_subsample = int(self.SUBSAMPLE*len(self.NAD_indices))
random.shuffle(self.NAD_indices)
NAD_subsamples = self.NAD_indices[:NAD_subsample]
self.indices = NAD_subsamples + self.non_NAD_indices
random.shuffle(self.indices)
self.counter = 0
else:
self.indices = list(self.res_info.keys())
self._map_new_()
def _map_new_(self):
self.index_map = {num : idx for num, idx in enumerate(self.indices)}
def _validate_dicts(self):
pass
'''
for idx in self.adjecency_matrix.keys():
if self.embeddings[idx].shape[0] != self.res_info[idx][0].size:
raise ValueError(f'shape mismatch for idx {idx} between emb and res_info')
'''
```
#### File: rossmann_toolbox/utils/struct_loader.py
```python
import os
import sys
import torch
import dgl
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from ..models import GatLit
from .graph_loader_opt import GraphECMLoaderBalanced
def collate(samples):
'''
dgl batch to gpu
https://discuss.dgl.ai/t/best-way-to-send-batched-graphs-to-gpu/171/9
'''
(graphs, labels) = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
return batched_graph, torch.tensor(labels)
def sigmoid(x):
return 1/(1 + np.exp(-x))
class Deepligand3D:
NUM_WORKERS = 4
LABEL_DICT = {'NAD' : 0, 'NADP' : 1, 'SAM' : 2, 'FAD' : 3}
LABEL_DICT_R = {0 :'NAD', 1 :'NADP', 2 :'SAM', 3:'FAD' }
COFACTORS = list(LABEL_DICT.keys())
CM_THRESHOLD = None # assigned when data loader is initialized
def __init__(self, weights_dir, device='cpu', **kw_args):
"""
Monte carlo Deepligand version
params:
path_model (str) path to model weights
device (str) 'cpu' or 'cuda'
"""
assert device in ['cuda', 'cpu']
self.device_type = device
self.COFACTORS_STD = [f'{cof}_std' for cof in self.COFACTORS]
self.CLASSES = len(self.COFACTORS)
self.list_of_model_paths = [f'{weights_dir}/struct_ensemble/model{i}.ckpt' for i in range(1, 5)]
self.list_of_hp_paths = [f'{weights_dir}/struct_ensemble//hp{i}.ckpt' for i in range(1, 5)]
self.device = torch.device(device)
self.model_list = torch.nn.ModuleList()
for path, hp_path in zip(self.list_of_model_paths, self.list_of_hp_paths):
conf = torch.load(path)
hps = torch.load(hp_path)
model = GatLit(hps)
model.load_state_dict(conf)
self.model_list.append(model.eval().to(self.device))
def predict(self, dataframe, contact_maps, edge_feats, foldx_info, raw_scores = False, verbose=False):
"""
params:
dataframe (pd.DataFrame) with 'seq' and 'secondary' columns
contact_maps (dict) similar as above
edge_feats (dict) similar as above
raw_scores (bool) if True probabilities are replaced with raw scores
verbose (bool) default False
returns:
pd.DataFrame
"""
available_sequences = foldx_info.keys() & contact_maps.keys() & edge_feats.keys()
self.sequences = dataframe['seq'].tolist()
indices = dataframe.index.tolist()
num_sequences = len(self.sequences)
available_sequences = available_sequences & set(indices)
if verbose and (len(available_sequences) == 0):
raise ValueError('mismatched keys')
else:
pass
#print('seq to process:', len(available_sequences), num_sequences)
indices_with_embeddings = [i for i, seq in enumerate(indices) if seq in available_sequences]
mask_with_embeddings = [True if idx in indices_with_embeddings else False for idx in indices]
sequences_without_embeddings = set(indices) - available_sequences
if verbose and (len(sequences_without_embeddings) > 0):
print(f'found {len(sequences_without_embeddings)} sequences without embeddings/contact maps')
dataframe_no_missings = dataframe[~dataframe.index.isin(sequences_without_embeddings)].copy()
else:
dataframe_no_missings = dataframe.copy()
loader = self._prepare_samples(dataframe_no_missings, contact_maps, edge_feats, foldx_info)
single_preds = []
for x, _ in loader:
if self.device_type == 'cuda':
x = x.to(self.device)
storage = [model(x, scores=raw_scores).detach().unsqueeze(0) for model in self.model_list]
storage = torch.cat(storage, axis=0)
single_preds.append(storage)
# (num_models, loader_len, 4)
del loader
# (num_rounds, num_models, loader_len, 4)
results = torch.cat(single_preds, axis=1)
cofactors_means = []; cofactors_std = []
means_with_nans = np.empty((num_sequences, 4))
means_with_nans[:] = np.nan
stds_with_nans = means_with_nans.copy()
mean = results.view(-1, num_sequences, 4).mean(0)
std = results.view(-1, num_sequences, 4).std(0)
mean, std = mean.cpu().numpy(), std.cpu().numpy()
means_with_nans[indices_with_embeddings, :] = mean
stds_with_nans[indices_with_embeddings, :] = std
df_results = pd.DataFrame(means_with_nans, columns=self.COFACTORS)
df_results_std = pd.DataFrame(stds_with_nans, columns=self.COFACTORS_STD)
df_results['seq'] = self.sequences
df_output = pd.concat([df_results, df_results_std], axis=1).round(8)
del df_results, df_results_std
return df_output
def _prepare_samples(self, dataframe, contact_maps, edge_feats, foldx_info, BATCH_SIZE=64):
self.CM_THRESHOLD = GraphECMLoaderBalanced.threshold
dataset = GraphECMLoaderBalanced(frame=dataframe,
contact_maps=contact_maps,
edge_features=edge_feats,
foldx_info=foldx_info)
if len(dataset) == 0:
raise ValueError('dataset is empty')
kw_args = {'batch_size' : BATCH_SIZE,
'shuffle' : False,
'num_workers' : self.NUM_WORKERS,
'drop_last' : False
}
dataset_loaded = []
#pin dataset to class instance for futher use
self.dataset = dataset
loader = DataLoader(dataset, collate_fn=collate, **kw_args)
return loader
def generate_embeddings(self, dataframe, contact_maps, edge_feats, foldx_info, verbose=False,
as_array=False, **kw_args):
"""
params:
dataframe (pd.DataFrame) with 'seq' and 'secondary' columns
contact_maps (dict) similar as above
edge_feats (dict) similar as above
raw_scores (bool) if True probabilities are replaced with raw scores
verbose (bool) default False
as_array (bool) if True returns numpy array
returns:
np.ndarray/dict
"""
available_sequences = foldx_info.keys() & contact_maps.keys() & edge_feats.keys()
self.sequences = dataframe['seq'].tolist()
indices = dataframe.index.tolist()
num_sequences = len(self.sequences)
available_sequences = available_sequences & set(indices)
if verbose and (len(available_sequences) == 0):
raise ValueError('mismatched keys')
else:
pass
#print('seq to process:', len(available_sequences), num_sequences)
indices_with_embeddings = [i for i, seq in enumerate(indices) if seq in available_sequences]
mask_with_embeddings = [True if idx in indices_with_embeddings else False for idx in indices]
sequences_without_embeddings = set(indices) - available_sequences
if verbose and (len(sequences_without_embeddings) > 0):
print(f'found {len(sequences_without_embeddings)} sequences without embeddings/contact maps')
dataframe_no_missings = dataframe[~dataframe.index.isin(sequences_without_embeddings)].copy()
else:
dataframe_no_missings = dataframe.copy()
loader = self._prepare_samples(dataframe_no_missings, contact_maps, edge_feats, foldx_info, BATCH_SIZE=1)
single_preds = []
with torch.no_grad():
for x, _ in loader:
if self.device_type == 'cuda':
x = x.to(self.device)
storage = [self.forward_pass(x, model).unsqueeze(0) for model in self.model_list]
storage = torch.cat(storage, axis=0).cpu().numpy()
if not as_array:
storage = {cof_name : storage[:, :, i].mean(0)[:, np.newaxis] for i, cof_name in enumerate(self.COFACTORS)}
single_preds.append(storage)
del loader
return single_preds
def forward_pass(self, g, model):
"""
execute custom DL forward pass to extract node scores
"""
features = model(g, nodes=True)
g.ndata['sum'] = features
h = dgl.sum_nodes(g, 'sum')
######### attention block ###############
attn = h.clone()
attn = torch.nn.functional.relu(attn)
attn = model.affine(attn)
attn = torch.sigmoid(attn)
features = features*attn
feats = features.cpu().detach()
feats = feats.reshape(-1, 4)
return feats
class ShowNxGraph:
#default cmap
'''
base class for graph plots
'''
def __init__(self, cmap=plt.cm.GnBu, figsize=(10, 10)):
'''
optional params:
cmap - plt.cm object
figsize - tuple(int, int) size of a plot
'''
self.cmap = cmap
self.figsize = figsize
def color_nodes(self, nodes):
colored = []
for node in nodes:
colored.append(self.cmap(node))
return colored
def draw(self, g, residue_scores, node_labels, node_positions = 'default', node_size=150, ax = None):
'''
draw
params:
g (nx.graph)
residue_scores (np.ndarray, dict) - if array color nodes with default cmap if dict
( in form of {nude_nb : (R,G,B)}) used given values
node_labels (list) node names
node_positions (str or nx.layout) position of residues
node_size (int) node size
ax (None or plt.figure) if None creates plt.subplots instance if figure fits to ax
return:
fig, ax
'''
assert isinstance(residue_scores, (dict, np.ndarray)), 'wrong type of residue_scores'
assert node_labels is None or isinstance(node_labels, (list, str)), 'wrong arg type'
assert isinstance(node_positions, (str, dict)), 'wrong arg type'
assert isinstance(node_size, int), 'wrong arg type'
if node_labels is not None:
sec_labels = {i: s for i,s in enumerate(node_labels)}
#define topology
#if len(set(secondary) - {'C1','C2', 'C3', 'C4', 'E1', 'E2', 'H1', 'H2'}) > 0:
# secondary = reduce_ss_alphabet(secondary, True)
#else:
# secondary = {i : s for i,s in enumerate(secondary)}
if isinstance(node_positions, str):
p = Positions(list(secondary.values()))
p._find_changes()
positions = p.get_positions(p._new_sec_as_dict())
else:
positions = node_positions
#define colors
if isinstance(residue_scores, np.ndarray):
node_colors = self.color_nodes(residue_scores)
elif isinstance(residue_scores, dict):
node_colors = list(residue_scores.values())
if ax is None:
fig, ax = plt.subplots(1,1,figsize=self.figsize)
else:
fig = None
nx.draw_networkx_nodes(g, positions, node_color=node_colors, ax=ax, alpha=0.9, node_size=node_size)
nx.draw_networkx_edges(g, positions, ax=ax, alpha=0.4)
if node_labels is not None:
nx.draw_networkx_labels(g, positions, labels = sec_labels,ax=ax)
return fig, ax
```
#### File: rossmann-toolbox/tests/test_hhsearch.py
```python
import pytest
import os
import numpy as np
from Bio import SeqIO
from rossmann_toolbox import RossmannToolbox
class TestHHSearch:
@pytest.mark.hhsearch
def test_core_detection_evaluation_hhsearch(self):
data = {str(entry.id): str(entry.seq) for entry in SeqIO.parse('test-data/fasta/full_length.fas', 'fasta')}
rtb = RossmannToolbox(use_gpu=False, hhsearch_loc=os.environ['HHSEARCH'])
preds = rtb.predict(data, mode='seq', core_detect_mode='hhsearch', importance=False)
assert preds['3m6i_A']['sequence'] == 'VLICGAGPIGLITMLCAKAAGACPLVITDIDE'
pr_arr = np.asarray(list(preds['3m6i_A'].values())[0:-1])
ref_arr = np.load('test-data/ref/seq_full_length_detect_eval_hhsearch.npy')
assert np.square(pr_arr.flatten() - ref_arr.flatten()).mean(axis=0) < 10e-5
``` |
{
"source": "jludwig79/dota-stats",
"score": 3
} |
#### File: dota_stats/server/server.py
```python
import json
import os
import plotly
import plotly.graph_objs as go
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from dota_stats import win_rate_pick_rate, fetch_summary
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DOTA_DB_URI']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
def get_health_chart(days, timezone, hour=True):
"""Fetches a statistics health summary table and formats into a plotly
chart."""
df_summary, rows = fetch_summary.get_health_summary(days, timezone, hour)
# For plot
fig = go.Figure(data=[
go.Bar(name='Normal',
x=df_summary.index.values,
y=df_summary['normal']),
go.Bar(name='High',
x=df_summary.index.values,
y=df_summary['high']),
go.Bar(name='Very High',
x=df_summary.index.values,
y=df_summary['very_high']),
])
fig.update_layout(barmode='stack')
record_count_plot = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return record_count_plot, rows
@app.route('/')
def status():
"""Index page for server, currently contains everything on the site"""
# Win rate / pick rate by skill level
df_sql = win_rate_pick_rate.get_current_win_rate_table(3)
radiant_vs_dire = []
pick_vs_win = {}
time_range = list(set(df_sql['time_range']))[0]
for skill in list(set(df_sql['skill'])):
df_sub = df_sql[df_sql['skill'] == skill]
radiant_vs_dire.append(
100 * (df_sub.sum()['radiant_win'] /
(df_sub.sum()['radiant_total'])))
pick_vs_win[skill] = go.Figure(
go.Scatter(
x=df_sub['total'].values,
y=df_sub['win_pct'].values,
text=df_sub['hero'].values,
mode='markers+text',
textposition='top center'))
pick_vs_win[skill].update_layout(
title="Skill {0}: {1}".format(skill, time_range),
margin=dict(l=20, r=0, t=50, b=20),
height=550,
width=550)
pick_vs_win[skill].update_xaxes({'title': 'Number of Games'})
pick_vs_win[skill].update_yaxes({'title': 'Win %'})
win_rate_1 = json.dumps(pick_vs_win[1], cls=plotly.utils.PlotlyJSONEncoder)
win_rate_2 = json.dumps(pick_vs_win[2], cls=plotly.utils.PlotlyJSONEncoder)
win_rate_3 = json.dumps(pick_vs_win[3], cls=plotly.utils.PlotlyJSONEncoder)
# ---------------------------------------------------------------
# Health metrics
# ---------------------------------------------------------------
rec_plot30, _ = get_health_chart(30, 'US/Eastern', hour=False)
rec_plot3, rec_count_table = get_health_chart(3, 'US/Eastern')
return render_template("index.html",
radiant_vs_dire=radiant_vs_dire,
win_rate_1=win_rate_1,
win_rate_2=win_rate_2,
win_rate_3=win_rate_3,
rec_count_table=rec_count_table,
rec_plot3=rec_plot3,
rec_plot30=rec_plot30, )
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
``` |
{
"source": "jluech/PGAcloud_Client",
"score": 3
} |
#### File: client/provider_drivers/abstract_driver.py
```python
from abc import ABC, abstractmethod
class AbstractDriver(ABC):
name = ""
@abstractmethod
def __init__(self, configuration, logger):
self._configuration = configuration
self._logger = logger
@abstractmethod
def setup_cloud_environment(self):
pass
```
#### File: PGAcloud_Client/utilities/docker_utils.py
```python
import os
import docker
__docker_client = None
__management_network = None
def get_docker_client(cert_path, host_addr, host_port):
global __docker_client
if __docker_client:
return __docker_client
tls_config = docker.tls.TLSConfig(
ca_cert=os.path.join(cert_path, "ca.pem"),
client_cert=(
os.path.join(cert_path, "cert.pem"),
os.path.join(cert_path, "key.pem")
),
verify=True
)
__docker_client = docker.DockerClient(
base_url="tcp://{addr_}:{port_}".format(
addr_=host_addr,
port_=host_port
),
tls=tls_config,
version="auto",
)
return __docker_client
def get_management_network():
global __management_network
if __management_network:
return __management_network
if not __docker_client:
raise Exception("Create a docker client first, before creating a network...")
# Creates a new docker network to bridge the manager to the runners.
__management_network = __docker_client.networks.create(
name="pga-management",
driver="overlay",
check_duplicate=True,
attachable=True,
scope="swarm",
labels={"PGAcloud": "PGA-Management"},
)
return __management_network
``` |
{
"source": "jluech/PGAcloud_Crossover",
"score": 3
} |
#### File: PGAcloud_Crossover/crossover/crossers.py
```python
import logging
import random
from abc import ABC, abstractmethod
from enum import Enum
from population.individual import Individual
class Crossers(Enum):
OnePoint = "one_point",
MultiPoint = "multi_point",
Uniform = "uniform",
class AbstractCrossover(ABC):
@ abstractmethod
def perform_crossover(self, individual1, individual2, crossover_rate):
# Perform the crossover on two Individual's.
# Returns a Pair of two Individual's.
pass
class OnePointCrossover(AbstractCrossover):
def perform_crossover(self, individual1, individual2, crossover_rate):
crossover_chance = random.randint(0, 100) / 100
# logging.info("Crossover chance = " + str(crossover_chance))
crossover_occurred = (crossover_rate >= crossover_chance)
if crossover_occurred:
solution_length = individual1.solution.__len__()
if solution_length != individual2.solution.__len__():
raise Exception("Crossover aborted: Individuals' solution strings are of different length!")
logging.info("Crossover occurred between individuals {ind1_} and {ind2_}.".format(
ind1_=individual1,
ind2_=individual2,
))
crossover_point = random.randint(1, solution_length-1)
crossed1 = Individual("{ind1_}{ind2_}".format(
ind1_=individual1.solution[:crossover_point], # first part of 1
ind2_=individual2.solution[crossover_point:], # second part of 2
))
crossed2 = Individual("{ind2_}{ind1_}".format(
ind2_=individual2.solution[:crossover_point], # first part of 2
ind1_=individual1.solution[crossover_point:], # second part of 1
))
return [crossed1, crossed2]
else:
logging.info("Crossover did not occur between individuals {ind1_} and {ind2_}.".format(
ind1_=individual1,
ind2_=individual2,
))
return [individual1, individual2]
class MultiPointCrossover(AbstractCrossover):
def perform_crossover(self, individual1, individual2, crossover_rate):
pass
class UniformCrossover(AbstractCrossover):
def perform_crossover(self, individual1, individual2, crossover_rate):
pass
```
#### File: PGAcloud_Crossover/crossover/crossover.py
```python
import logging
from crossover.crossers import Crossers, OnePointCrossover, MultiPointCrossover, UniformCrossover
from utilities.utils import forward_crosser, get_property
def apply_crossover(individual1, individual2):
# Applies the chosen crossover operator on the two individuals and returns a pair {p1: x, p2: y}
logging.info("Performing crossover on individuals {ind1_} and {ind2_}".format(
ind1_=individual1,
ind2_=individual2,
))
crosser = get_crosser()
crossover_rate = get_crossover_rate()
return crosser.perform_crossover(individual1, individual2, crossover_rate)
def get_crosser():
crosser = forward_crosser()
if crosser == Crossers.OnePoint:
return OnePointCrossover()
elif crosser == Crossers.MultiPoint:
__crosser = MultiPointCrossover()
raise Exception("MultiPointCrossover not implemented yet!")
elif crosser == Crossers.Uniform:
__crosser = UniformCrossover()
raise Exception("UniformCrossover not implemented yet!")
else:
raise Exception("No valid Crosser defined!")
def get_crossover_rate():
rate = float(get_property("CROSSOVER_RATE"))
logging.info("CROSSOVER_RATE={_rate} retrieved.".format(_rate=rate))
return rate
``` |
{
"source": "jluech/PGAcloud_Fitness_Agent",
"score": 2
} |
#### File: PGAcloud_Fitness_Agent/agent/operator.py
```python
import json
import logging
import os
from population.individual import Individual, IndividualEncoder
from utilities import utils
def call_operator(individual):
# Write individual to file in input_path.
# https://docs.python-guide.org/scenarios/serialization/#json-file-nested-data
input_path = utils.get_custom_setting("input_path")
with open(input_path, "x") as f:
json.dump(individual, f, sort_keys=True, cls=IndividualEncoder)
# Call operator with provided command.
command_str = utils.get_custom_setting("command")
logging.info("Retrieved command string: {cmdstr_}".format(cmdstr_=command_str))
fixed_command = ""
marker_open = 0
marker_close = -1
idx = 0
for char in command_str:
if char == "{":
marker_open = idx
elif char == "}":
prev_marker = marker_close+1 # initially 0
fixed_command += command_str[prev_marker:marker_open]
marker_close = idx
key = command_str[marker_open+1:marker_close]
value = utils.get_property(key)
if type(value) is list:
param_string = ",".join(value)
else:
param_string = str(value)
fixed_command += param_string
idx += 1
logging.info("Calling operator with: {cmd_}".format(cmd_=fixed_command))
logs, error = utils.execute_command(
command=fixed_command,
working_directory=None,
environment_variables=None,
executor="AGENT",
livestream=True,
)
logging.info(logs)
# Retrieve individual from file in output_path.
# https://docs.python-guide.org/scenarios/serialization/#json-file-nested-data
output_path = utils.get_custom_setting("output_path")
with open(output_path, "r") as f:
ind_dict = json.load(f)
resulting_individual = Individual(ind_dict["solution"], ind_dict["fitness"])
logging.info("Reading from output file: {}".format(resulting_individual))
# Delete the files that were created to ensure fresh start.
os.remove(input_path)
os.remove(output_path)
return resulting_individual
``` |
{
"source": "jluech/PGAcloud_Initializer",
"score": 2
} |
#### File: PGAcloud_Initializer/message_handler/rabbit_message_queue.py
```python
import json
import logging
import pika
from initializer.initialization import apply_initialization
from message_handler.message_handler import MessageHandler
from population.individual import IndividualEncoder
from utilities import utils
def receive_initialization_callback(channel, method, properties, body):
queue_name = utils.get_messaging_source()
payload_dict = json.loads(body)
amount = int(payload_dict.get("amount"))
individual_id = int(payload_dict.get("id"))
logging.info("rMQ:{queue_}: Received initialization request for {amount_} individual(s).".format(
queue_=queue_name,
amount_=amount,
))
generated_individuals = apply_initialization(amount, individual_id)
for individual in generated_individuals:
send_message_to_queue(
channel=channel,
payload=individual
)
def send_message_to_queue(channel, payload):
# Route the message to the next queue in the model.
next_recipient = utils.get_messaging_target()
channel.queue_declare(queue=next_recipient, auto_delete=True, durable=True)
# Send message to given recipient.
logging.info("rMQ: Sending {ind_} to {dest_}.".format(
ind_=payload,
dest_=next_recipient,
))
channel.basic_publish(
exchange="",
routing_key=next_recipient,
body=json.dumps(payload, cls=IndividualEncoder),
# Delivery mode 2 makes the broker save the message to disk.
# This will ensure that the message be restored on reboot even
# if RabbitMQ crashes before having forwarded the message.
properties=pika.BasicProperties(
delivery_mode=2,
),
)
class RabbitMessageQueue(MessageHandler):
def __init__(self, pga_id):
# Establish connection to rabbitMQ.
self.connection = pika.BlockingConnection(pika.ConnectionParameters(
host="rabbitMQ--{id_}".format(id_=pga_id),
socket_timeout=30,
))
def receive_messages(self):
# Define communication channel.
channel = self.connection.channel()
# Create queue for initialization.
queue_name = utils.get_messaging_source()
channel.queue_declare(queue=queue_name, auto_delete=True, durable=True)
# Actively listen for messages in queue and perform callback on receive.
channel.basic_consume(
queue=queue_name,
on_message_callback=receive_initialization_callback,
)
logging.info("rMQ:{queue_}: Waiting for initialization requests.".format(
queue_=queue_name
))
channel.start_consuming()
def send_message(self, individuals):
# Define communication channel.
channel = self.connection.channel()
send_message_to_queue(
channel=channel,
payload=individuals
)
```
#### File: PGAcloud_Initializer/population/pair.py
```python
class Pair(object):
def __init__(self, individual1, individual2):
self.ind1 = individual1
self.ind2 = individual2
``` |
{
"source": "jluech/PGAcloud_Manager",
"score": 2
} |
#### File: PGAcloud_Manager/manager/__main__.py
```python
import logging
import os
from flask import Flask, jsonify, request
from werkzeug.utils import secure_filename
from orchestrator.docker_orchestrator import DockerOrchestrator
from utilities import utils
logging.basicConfig(level=logging.INFO)
# App initialization.
mgr = Flask(__name__)
# Create a directory in a known location to save files to.
utils.__set_files_dir(mgr.instance_path)
@mgr.route("/status", methods=["GET"])
def status():
return "OK"
@mgr.route("/files/<int:pga_id>", methods=["GET"])
def get_files(pga_id):
"""
Get all uploaded YAML files as a dictionary.
:return: dict of uploaded YAML files as JSON
"""
files_dict = utils.get_uploaded_files_dict(pga_id)
return jsonify(files_dict)
@mgr.route("/pga", methods=["POST"])
def create_pga():
"""
Creates a new Parallel Genetic Algorithm in the cloud.
:arg master_host: the ip address or hostname of the master node.
:type master_host: str
:arg orchestrator: the chosen cloud orchestrator.
:type orchestrator: str
:return (dict): id [int] and model [str] of new pga
"""
# Recognizes the correct orchestrator.
master_host = request.args.get("master_host")
orchestrator_name = request.args.get("orchestrator")
if not orchestrator_name:
raise Exception("No cloud orchestrator provided! Aborting deployment.")
orchestrator = get_orchestrator(orchestrator_name, master_host)
pga_id = orchestrator.pga_id
logging.info("Creating new PGA: {}.".format(pga_id))
# Saves all the files that were uploaded with the request.
file_keys = [*request.files]
utils.create_pga_subdir(pga_id)
files_dir = utils.get_uploaded_files_path(pga_id)
file_names = []
if "config" not in file_keys:
raise Exception("No PGA configuration provided! Aborting deployment.")
for file_key in file_keys:
file = request.files[file_key]
if file_key == "config":
file_name = secure_filename("config.yml")
elif file_key == "population":
file_name = secure_filename("population.yml")
else:
file_name = secure_filename(file.filename)
file_names.append(file_name)
file.save(os.path.join(files_dir, file_name))
# Retrieves the configuration and appends the current PGAs id.
config_path = os.path.join(files_dir, "config.yml")
config_file = open(config_path, mode="a")
config_file.write("\npga_id: {id_}\n".format(id_=pga_id))
config_file.close()
configuration = utils.parse_yaml(config_path)
# Determines the model to deploy.
model = configuration.get("model")
if not model:
raise Exception("No PGA model provided! Aborting deployment.")
if model == "Master-Slave":
# Retrieves the configuration details.
services = {}
services_config = configuration.get("services")
for service_key in [*services_config]:
service = services_config.get(service_key)
services[service.get("name")] = service
setups = {}
images_config = configuration.get("setups")
for service_key in [*images_config]:
service = images_config.get(service_key)
setups[service.get("name")] = service
operators = {}
operators_config = configuration.get("operators")
for service_key in [*operators_config]:
service = operators_config.get(service_key)
operators[service.get("name")] = service
population = {}
population_config = configuration.get("population")
for population_key in [*population_config]:
population[population_key] = population_config.get(population_key)
properties = {}
properties_config = configuration.get("properties")
for property_key in [*properties_config]:
properties[property_key] = properties_config.get(property_key)
# Creates the new PGA.
all_services = utils.merge_dict(services, utils.merge_dict(setups, utils.merge_dict(
operators, utils.merge_dict(population, properties))))
model_dict = construct_model_dict(model, all_services)
orchestrator.setup_pga(model_dict=model_dict, services=services, setups=setups, operators=operators,
population=population, properties=properties, file_names=file_names)
logging.info("Distribute properties:")
orchestrator.distribute_properties(properties=properties)
logging.info("Initialize properties:")
orchestrator.initialize_population(population=population)
elif model == "Island":
raise Exception("Island model not implemented yet. Aborting deployment.") # TODO 204: implement island model
else:
raise Exception("Custom model detected.") # TODO 205: implement for custom models
return jsonify({
"id": orchestrator.pga_id,
"model": model,
"status": "created"
})
@mgr.route("/pga/<int:pga_id>/start", methods=["PUT"])
def start_pga(pga_id):
"""
Starts the PGA identified by the pga_id route param.
:param pga_id: the PGA id of the PGA to be started.
:type pga_id: int
:arg orchestrator: the chosen cloud orchestrator.
:type orchestrator: str
"""
# Recognizes the correct orchestrator.
master_host = request.args.get("master_host")
orchestrator_name = request.args.get("orchestrator")
if not orchestrator_name:
raise Exception("No cloud orchestrator provided! Aborting deployment.")
orchestrator = get_orchestrator(orchestrator_name, master_host, pga_id)
# Starts the chosen PGA.
logging.info("Starting PGA {}.".format(orchestrator.pga_id))
response = orchestrator.start_pga() # Makes a blocking call to Runner.
result_json = response.json()
fittest_dict = result_json["fittest"]
return jsonify({
"id": orchestrator.pga_id,
"status": "finished",
"fittest": fittest_dict,
})
@mgr.route("/pga/<int:pga_id>/stop", methods=["PUT"])
def stop_pga(pga_id):
# Recognizes the correct orchestrator.
master_host = request.args.get("master_host")
orchestrator_name = request.args.get("orchestrator")
if not orchestrator_name:
raise Exception("No cloud orchestrator provided! Aborting deployment.")
orchestrator = get_orchestrator(orchestrator_name, master_host, pga_id)
# Stops the chosen PGA.
logging.info("Terminating PGA {}.".format(orchestrator.pga_id))
exit_code = orchestrator.stop_pga()
if exit_code != 202:
logging.error("Terminating PGA {id_} finished with unexpected exit code: {code_}".format(
id_=orchestrator.pga_id,
code_=exit_code,
))
status_code = "error_{}".format(exit_code)
else:
status_code = "removed"
# Removes the PGA components.
logging.info("Removing components of PGA {}.".format(orchestrator.pga_id))
orchestrator.remove_pga()
return jsonify({
"id": orchestrator.pga_id,
"status": status_code
})
def get_orchestrator(orchestrator_name, master_host, pga_id=None):
if orchestrator_name == "docker":
return DockerOrchestrator(master_host, pga_id)
elif orchestrator_name == "kubernetes":
logging.error("Kubernetes orchestrator not yet implemented! Falling back to docker orchestrator.")
return DockerOrchestrator(master_host, pga_id) # TODO 202: implement kubernetes orchestrator
else:
raise Exception("Unknown orchestrator requested!")
def construct_model_dict(model, all_services):
if model == "Master-Slave":
# init = RUN/(INIT/)FE/RUN
# model = RUN/SEL/CO/MUT/FE/RUN
model_dict = {
"runner": {
"source": "generation",
"init_gen": "initializer",
"init_eval": "fitness",
"pga": "selection"
},
"initializer": {
"source": "initializer",
"target": "fitness"
},
"selection": {
"source": "selection",
"target": "crossover"
},
"crossover": {
"source": "crossover",
"target": "mutation"
},
"mutation": {
"source": "mutation",
"target": "fitness"
},
"fitness": {
"source": "fitness",
"target": "generation"
}
}
elif model == "Island":
model_dict = {}
raise Exception("Island model not implemented yet!")
else:
model_dict = {}
raise Exception("Custom models not implemented yet!")
return model_dict
if __name__ == "__main__":
mgr.run(host="0.0.0.0")
```
#### File: PGAcloud_Manager/orchestrator/docker_orchestrator.py
```python
import json
import logging
import os
import time
import traceback
import warnings
import docker
from orchestrator.orchestrator import Orchestrator
from utilities import utils
WAIT_FOR_CONFIRMATION_DURATION = 45.0
WAIT_FOR_CONFIRMATION_EXCEEDING = 15.0
WAIT_FOR_CONFIRMATION_TROUBLED = 30.0
WAIT_FOR_CONFIRMATION_SLEEP = 2 # seconds
class DockerOrchestrator(Orchestrator):
def __init__(self, master_host, pga_id):
super().__init__(pga_id)
self.host = master_host
self.docker_master_client = self.__create_docker_client(
host_ip=master_host,
host_port=2376
# default docker port; Note above https://docs.docker.com/engine/security/https/#secure-by-default
)
# Common orchestrator functionality.
def setup_pga(self, model_dict, services, setups, operators, population, properties, file_names):
self.__create_network()
configs = self.__create_configs(file_names)
deploy_init = (not population.get("use_initial_population") or properties.get("USE_INIT"))
self.__deploy_stack(services=services, setups=setups, operators=operators,
configs=configs, model_dict=model_dict, deploy_initializer=deploy_init)
def scale_component(self, service_name, scaling):
if service_name.__contains__(Orchestrator.name_separator):
effective_name = service_name.split(Orchestrator.name_separator)[0]
else:
effective_name = service_name
if effective_name in ("runner", "manager"):
warnings.warn("Scaling aborted: Scaling of runner or manager services not permitted!")
else:
found_services = self.docker_master_client.services.list(filters={"name": service_name})
if not found_services.__len__() > 0:
raise Exception("No service {name_} found for scaling!".format(name_=service_name))
service = found_services[0]
service.scale(replicas=scaling)
def remove_pga(self):
# Removes the docker services of this PGA.
pga_filter = {"label": "PGAcloud=PGA-{id_}".format(id_=self.pga_id)}
current_services = self.docker_master_client.services.list(filters=pga_filter)
if current_services.__len__() > 0:
for service in current_services:
service.remove()
duration = 0.0
start = time.perf_counter()
while current_services.__len__() > 0 and duration < WAIT_FOR_CONFIRMATION_DURATION:
current_services = self.docker_master_client.services.list(filters=pga_filter)
time.sleep(WAIT_FOR_CONFIRMATION_SLEEP) # avoid network overhead
duration = time.perf_counter() - start
if duration >= WAIT_FOR_CONFIRMATION_DURATION:
logging.info("Exceeded waiting time of {time_} seconds. It may have encountered an error. "
"Please verify or try again shortly.".format(time_=WAIT_FOR_CONFIRMATION_DURATION))
else:
logging.info("Successfully removed docker services for PGA {}.".format(self.pga_id))
else:
logging.info("No docker services of PGA {} running that could be removed.".format(self.pga_id))
# Removes the docker configs used for file sharing.
current_configs = self.docker_master_client.configs.list(filters=pga_filter)
if current_configs.__len__() > 0:
for conf in current_configs:
conf.remove()
timer = 0
start = time.perf_counter()
while current_configs.__len__() > 0 and timer < WAIT_FOR_CONFIRMATION_DURATION:
current_configs = self.docker_master_client.configs.list(filters=pga_filter)
time.sleep(WAIT_FOR_CONFIRMATION_SLEEP) # avoid network overhead
timer = time.perf_counter() - start
if timer >= WAIT_FOR_CONFIRMATION_DURATION:
logging.info("We seem to have encountered an error when removing the docker configs. "
"Please verify or try again shortly.")
else:
logging.info("Successfully removed docker configs for PGA {}.".format(self.pga_id))
else:
logging.info("No matching docker configs found that could be removed.")
# Removes the docker network.
pga_networks = self.docker_master_client.networks.list(filters=pga_filter)
if pga_networks.__len__() > 0:
for network in pga_networks:
network.remove()
timer = 0
start = time.perf_counter()
while pga_networks.__len__() > 0 and timer < WAIT_FOR_CONFIRMATION_DURATION:
pga_networks = self.docker_master_client.networks.list(filters=pga_filter)
time.sleep(WAIT_FOR_CONFIRMATION_SLEEP) # avoid network overhead
timer = time.perf_counter() - start
if timer >= WAIT_FOR_CONFIRMATION_DURATION:
logging.info("We seem to have encountered an error when removing the docker network. "
"Please verify or try again shortly.")
else:
logging.info("Successfully removed docker network for PGA {}.".format(self.pga_id))
else:
logging.info("No PGA docker network found that could be removed.")
# Commands to control the orchestrator.
def __deploy_stack(self, services, setups, operators, configs, model_dict, deploy_initializer):
# Creates a service for each component defined in the configuration.
# Deploy the support services (e.g., MSG and DB).
supports = {}
for support_key in [*services]:
support = services.get(support_key)
new_service = self.__create_docker_service(service_dict=support, network=self.pga_network)
self.__update_service_with_configs(configs=configs, service_name=new_service.name)
supports[support.get("name")] = new_service
# Ensure services are starting up in the background while waiting for them.
for support_key in [*supports]:
support = supports.get(support_key) # actual docker service
self.__wait_for_service(service_name=support.name)
# Deploy the setup services (e.g., RUN or INIT).
setup_services = {}
for setup_key in [*setups]:
setup = setups.get(setup_key)
setup_name = setup.get("name")
if setup_name == "runner":
# Creates the runner service with bridge network.
new_service = self.docker_master_client.services.create(
image=setup.get("image"),
name="runner{sep_}{id_}".format(
sep_=Orchestrator.name_separator,
id_=self.pga_id
),
hostname=setup.get("name"),
networks=[self.pga_network.name, "pga-management"],
labels={"PGAcloud": "PGA-{id_}".format(id_=self.pga_id)},
endpoint_spec={
"Mode": "dnsrr"
},
)
elif setup_name == "initializer":
if deploy_initializer:
new_service = self.__create_docker_service(service_dict=setup, network=self.pga_network)
else:
continue # no need to deploy initializer if initial population is provided.
else:
new_service = self.__create_docker_service(service_dict=setup, network=self.pga_network)
self.scale_component(service_name=new_service.name, scaling=setup.get("scaling"))
container_config_name = self.__create_container_config(new_service.name, setup_key, model_dict)
self.__update_service_with_configs(configs=configs, service_name=new_service.name,
container_config=container_config_name)
setup_services[setup_name] = new_service
# Deploy the genetic operator services.
for operator_key in [*operators]:
operator = operators.get(operator_key)
new_service = self.__create_docker_service(service_dict=operator, network=self.pga_network)
self.scale_component(service_name=new_service.name, scaling=operator.get("scaling"))
container_config_name = self.__create_container_config(new_service.name, operator_key, model_dict)
self.__update_service_with_configs(configs=configs, service_name=new_service.name,
container_config=container_config_name)
# Wait for setups before initiating properties or population.
if deploy_initializer:
initializer = setup_services.get("initializer")
self.__wait_for_service(service_name=initializer.name)
for setup_key in [*setup_services]:
if setup_key == "initializer":
continue # no need to wait for initializer if not deployed, or already waited for
setup = setup_services.get(setup_key) # actual docker service
self.__wait_for_service(service_name=setup.name)
# Commands for docker stuff.
def __create_docker_client(self, host_ip, host_port):
tls_config = docker.tls.TLSConfig(
ca_cert="/run/secrets/SSL_CA_PEM",
client_cert=(
"/run/secrets/SSL_CERT_PEM",
"/run/secrets/SSL_KEY_PEM"
),
verify=True
)
docker_client = docker.DockerClient(
base_url="tcp://{host_}:{port_}".format(
host_=host_ip,
port_=host_port
),
tls=tls_config,
)
return docker_client
def __create_network(self):
# Creates a new docker network.
self.pga_network = self.docker_master_client.networks.create(
name="pga-overlay-{id_}".format(id_=self.pga_id),
driver="overlay",
check_duplicate=True,
attachable=True,
scope="swarm",
labels={"PGAcloud": "PGA-{id_}".format(id_=self.pga_id)},
)
def __create_configs(self, file_names):
# Creates docker configs for file sharing.
configs = []
stored_files_path = utils.get_uploaded_files_path(self.pga_id)
for file_name in file_names:
try:
file_path = os.path.join(stored_files_path, file_name)
file = open(file_path, mode="rb")
file_content = file.read()
file.close()
config_name = "{id_}{sep_}{name_}".format(
id_=self.pga_id,
sep_=Orchestrator.name_separator,
name_=file_name
)
self.docker_master_client.configs.create(
name=config_name,
data=file_content,
labels={"PGAcloud": "PGA-{id_}".format(id_=self.pga_id)}
)
configs.append(config_name)
except Exception as e:
traceback.print_exc()
logging.error(traceback.format_exc())
return configs
def __create_container_config(self, service_name, service_key, model_dict):
effective_name = service_name.split(Orchestrator.name_separator)[0]
config_name = "{id_}{sep_}{name_}-config.yml".format(
id_=self.pga_id,
sep_=Orchestrator.name_separator,
name_=effective_name
)
config_content = model_dict[service_key]
config_content["pga_id"] = self.pga_id
self.docker_master_client.configs.create(
name=config_name,
data=json.dumps(config_content),
labels={"PGAcloud": "PGA-{id_}".format(id_=self.pga_id)}
)
return config_name
def __create_docker_service(self, service_dict, network):
return self.docker_master_client.services.create(
image=service_dict.get("image"),
name="{name_}{sep_}{id_}".format(
name_=service_dict.get("name"),
sep_=Orchestrator.name_separator,
id_=self.pga_id
),
hostname=service_dict.get("name"),
networks=[network.name],
labels={"PGAcloud": "PGA-{id_}".format(id_=self.pga_id)},
endpoint_spec={
"Mode": "dnsrr"
},
)
def __update_service_with_configs(self, configs, service_name, container_config=None):
# Updates the given service with the new configs.
logging.info("Updating {name_} with docker configs.".format(name_=service_name))
config_param = self.__prepare_array_as_script_param(configs, container_config)
script_path = os.path.join(os.getcwd(), "utilities/docker_service_update_configs.sh")
script_args = "--service {service_} --host {host_} --configs {confs_}"
utils.execute_command(
command=script_path + " " + script_args.format(
service_=service_name,
host_=self.host,
confs_=config_param,
),
working_directory=os.curdir,
environment_variables=None,
executor="StackDeploy",
)
def __wait_for_service(self, service_name):
# Waits until the given service has at least one instance which is running and ready.
logging.info("Waiting for {name_} service.".format(name_=service_name))
script_path = os.path.join(os.getcwd(), "utilities/docker_service_wait_until_running.sh")
script_args = "--service {service_} --host {host_}"
utils.execute_command(
command=script_path + " " + script_args.format(
service_=service_name,
host_=self.host,
),
working_directory=os.curdir,
environment_variables=None,
executor="StackDeploy",
)
# Auxiliary commands.
def __prepare_array_as_script_param(self, general_configs, container_config):
param = ""
for conf in general_configs:
param += "{} ".format(conf)
if container_config is None:
param += "--"
else:
param += container_config
param += " --"
return param
```
#### File: PGAcloud_Manager/utilities/utils.py
```python
import logging
import os
import subprocess
import sys
import yaml
files_dir = ""
# --- General util commands ---
def execute_command(
command,
working_directory,
environment_variables,
executor,
logger=logging,
livestream=False
):
logger_prefix = ""
if executor:
logger_prefix = executor + ": "
process = subprocess.Popen(
command,
cwd=working_directory,
env=environment_variables,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
)
logger.debug(logger_prefix + "command: " + command)
stdout = ""
for line in iter(process.stdout.readline, b''):
line = str(line, "utf-8")
stdout += line
if livestream:
sys.stdout.write(line)
else:
logger.debug(logger_prefix + "command output: " + line.rstrip())
return_code = process.wait()
stdout = stdout.rstrip()
return stdout, return_code
def merge_dict(dict1, dict2):
res = {**dict1, **dict2}
return res
def parse_yaml(yaml_file_path):
with open(yaml_file_path, mode="r", encoding="utf-8") as yaml_file:
content = yaml.safe_load(yaml_file) or {}
return content
# --- File and path handling commands ---
def get_uploaded_files_path(pga_id):
return os.path.join(files_dir, str(pga_id))
def get_uploaded_files_dict(pga_id):
files_dict = {}
directory = get_uploaded_files_path(pga_id)
files = os.listdir(directory)
for filename in files:
name = filename.split(".")[0]
yaml_dict = parse_yaml(os.path.join(directory, filename))
yaml_dict["_filename"] = filename
files_dict[name] = yaml_dict
return files_dict
def get_filename_from_path(file_path):
if file_path.__contains__("\\"):
filename = file_path.split("\\")[-1].split(".")[0]
else:
filename = file_path.split("/")[-1].split(".")[0]
return filename
def create_pga_subdir(pga_id):
os.makedirs(os.path.join(files_dir, str(pga_id)))
def __set_files_dir(path):
global files_dir
files_dir = os.path.join(path, 'files')
os.makedirs(files_dir, exist_ok=True)
``` |
{
"source": "jluech/PGAcloud_Mutation",
"score": 2
} |
#### File: PGAcloud_Mutation/mutation/__main__.py
```python
import logging
import time
from database_handler.handlers import DatabaseHandlers
from database_handler.redis_handler import RedisHandler
from message_handler.handlers import MessageHandlers
from message_handler.rabbit_message_queue import RabbitMessageQueue
from mutation.mutators import Mutators
from utilities import utils
logging.basicConfig(level=logging.INFO)
DATABASE_HANDLER = DatabaseHandlers.Redis
MESSAGE_HANDLER = MessageHandlers.RabbitMQ
MUTATOR = Mutators.BitFlip
RELEVANT_PROPERTIES = ["MUTATION_RATE"]
def listen_for_mutation():
pga_id = utils.get_pga_id()
database_handler = get_database_handler(pga_id)
for prop in RELEVANT_PROPERTIES:
value = database_handler.retrieve(prop)
timer = 0
start = time.perf_counter()
while value is None and timer < 45:
time.sleep(1)
value = database_handler.retrieve(prop)
timer = time.perf_counter() - start
if timer >= 10:
raise Exception("Could not load property: {key_}".format(key_=prop))
utils.set_property(
property_key=prop,
property_value=value.decode("utf-8")
)
message_handler = get_message_handler(pga_id)
message_handler.receive_messages()
def get_database_handler(pga_id):
if DATABASE_HANDLER == DatabaseHandlers.Redis:
return RedisHandler(pga_id)
else:
raise Exception("No valid DatabaseHandler defined!")
def get_message_handler(pga_id):
if MESSAGE_HANDLER == MessageHandlers.RabbitMQ:
return RabbitMessageQueue(pga_id)
else:
raise Exception("No valid MessageHandler defined!")
if __name__ == "__main__":
utils.__set_mutator(MUTATOR)
listen_for_mutation()
``` |
{
"source": "jluech/PGAcloud_Runner",
"score": 3
} |
#### File: PGAcloud_Runner/database_handler/redis_handler.py
```python
import json
import logging
import redis
from database_handler.database_handler import DatabaseHandler
from population.individual import IndividualEncoder
class RedisHandler(DatabaseHandler):
def __init__(self, pga_id):
self.redis = redis.Redis(host="redis--{id_}".format(id_=pga_id))
def store_properties(self, properties_dict):
prop_keys = [*properties_dict]
for prop_key in prop_keys:
value = properties_dict[prop_key]
if not type(value) in [str, int, list]:
value = str(value)
logging.info("redis: Storing property '{prop_}'={val_}".format(
prop_=prop_key,
val_=value,
))
if type(value) is list:
for val in value:
self.redis.lpush(prop_key, val)
else:
self.redis.set(prop_key, value)
def store_population(self, population):
logging.info("redis: Storing population.")
self.redis.delete("population")
for individual in population:
serialized_individual = json.dumps(individual, cls=IndividualEncoder)
self.redis.lpush("population", serialized_individual)
def retrieve_item(self, property_name):
return self.redis.get(property_name)
def retrieve_list(self, property_name):
return self.redis.lrange(property_name, 0, -1)
```
#### File: PGAcloud_Runner/utilities/utils.py
```python
import logging
import operator
import os
from re import match
import yaml
PGA_NAME_SEPARATOR = "--"
__CONTAINER_CONF = None
__PROPERTIES = {}
__EVALUATED_INDIVIDUALS = []
# YAML command
def parse_yaml(yaml_file_path):
with open(yaml_file_path, mode="r", encoding="utf-8") as yaml_file:
content = yaml.safe_load(yaml_file) or {}
return content
# Commands for population and individuals
def collect_and_reset_received_individuals():
global __EVALUATED_INDIVIDUALS
received = sort_population_by_fitness(__EVALUATED_INDIVIDUALS)
__EVALUATED_INDIVIDUALS = []
return received
def save_received_individual(individual):
global __EVALUATED_INDIVIDUALS
__EVALUATED_INDIVIDUALS.append(individual)
current_length = __EVALUATED_INDIVIDUALS.__len__()
return current_length >= int(get_property("POPULATION_SIZE")), current_length
def sort_population_by_fitness(population):
# Sorts and returns population by fitness, in descending order (fittest first).
return sorted(population, key=operator.attrgetter("fitness"), reverse=True)
# Commands for properties
def get_messaging_source():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["source"]
def get_messaging_init_gen():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["init_gen"]
def get_messaging_init_eval():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["init_eval"]
def get_messaging_pga():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["pga"]
def get_pga_id():
if not __CONTAINER_CONF:
__retrieve_container_config()
return __CONTAINER_CONF["pga_id"]
def __retrieve_container_config():
# Retrieve locally saved config file.
files = [f for f in os.listdir("/") if match(r'[0-9]+--runner-config\.yml', f)]
# https://stackoverflow.com/questions/2225564/get-a-filtered-list-of-files-in-a-directory/2225927#2225927
# https://regex101.com/
if not files.__len__() > 0:
raise Exception("Error retrieving the container config: No matching config file found!")
config = parse_yaml("/{}".format(files[0]))
global __CONTAINER_CONF
__CONTAINER_CONF = {
"pga_id": config.get("pga_id"),
"source": config.get("source"),
"init_gen": config.get("init_gen"),
"init_eval": config.get("init_eval"),
"pga": config.get("pga")
}
logging.info("Container config retrieved: {conf_}".format(conf_=__CONTAINER_CONF))
def get_property(property_key):
return __PROPERTIES[property_key]
def set_property(property_key, property_value):
__PROPERTIES[property_key] = property_value
``` |
{
"source": "jluech/PGAcloud_Selection",
"score": 3
} |
#### File: PGAcloud_Selection/selection/selectors.py
```python
import logging
import math
import random
from abc import ABC, abstractmethod
from enum import Enum
class Selectors(Enum):
RouletteWheel = "roulette",
Tournament = "tournament",
Rank = "rank",
class AbstractSelection(ABC):
@ abstractmethod
def perform_selection(self, population):
# Perform the selection on the population, a list of Individual's.
# Returns a list of Pair's.
pass
class RouletteWheelSelection(AbstractSelection):
def perform_selection(self, population):
size = population.__len__()
iterations = math.ceil(size / 2)
parents = []
fitness_sum = 0
for individual in population:
fitness_sum += individual.fitness
for i in range(iterations):
parent1 = self.__spin_the_wheel(population, fitness_sum)
parent2 = self.__spin_the_wheel(population, fitness_sum)
selected_parents = [parent1, parent2]
parents.append(selected_parents)
logging.info("Selected parents: {sel_}.".format(sel_=selected_parents))
return parents
@staticmethod
def __spin_the_wheel(population, fitness_sum):
partial_fitness_sum = 0.0
wheel_spin = random.uniform(0, fitness_sum) # select individual at random
selected = population[0] # select first individual if wheel_spin=0
for individual in population: # retrieve selected individual
if partial_fitness_sum >= wheel_spin: # consider fitness boundaries can also be selected
break
selected = individual
partial_fitness_sum += individual.fitness
return selected
class TournamentSelection(AbstractSelection):
def perform_selection(self, population):
pass
class RankSelection(AbstractSelection):
def perform_selection(self, population):
pass
class Pair(object):
def __init__(self, parent1, parent2):
self.p1 = parent1
self.p2 = parent2
``` |
{
"source": "jluethi/ForkStitcher",
"score": 3
} |
#### File: ForkStitcher/fork-stitcher/gui.py
```python
from pathlib import Path
import tkinter as tk
import tkinter.messagebox
import tkinter.filedialog
from tkinter.scrolledtext import ScrolledText
import _tkinter
import time
import logging
import threading
import queue
import tkinter.font as font
from stitch_MAPS_annotations import Stitcher
from sites_of_interest_parser import MapsXmlParser
# TODO: Figure out how to run pyimagej and tkinter at the same time on Macs, see suggestions here:
# https://github.com/imagej/pyimagej/issues/39
# import imagej
# ij = imagej.init('/Applications/Fiji.app')
class QueueHandler(logging.Handler):
"""Class that accepts logs and adds them to a queue
"""
# Based on: https://github.com/beenje/tkinter-logging-text-widget
def __init__(self, logging_queue):
super().__init__()
self.logging_queue = logging_queue
def emit(self, log_statement):
self.logging_queue.put(log_statement)
class LoggingWindow:
# Based on: https://github.com/beenje/tkinter-logging-text-widget
def __init__(self, master):
self.master = master
self.scrolled_text = ScrolledText(master=master, state='disabled', height=15)
self.scrolled_text.grid(row=0, column=0)
self.scrolled_text.configure(font='TkFixedFont')
self.scrolled_text.tag_config('INFO', foreground='black')
self.scrolled_text.tag_config('DEBUG', foreground='gray')
self.scrolled_text.tag_config('WARNING', foreground='orange')
self.scrolled_text.tag_config('ERROR', foreground='red')
# Get the logger
self.logger = logging.getLogger()
self.log_queue = queue.Queue()
self.queue_handler = QueueHandler(self.log_queue)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(message)s')
self.queue_handler.setFormatter(formatter)
self.logger.addHandler(self.queue_handler)
# Start polling messages from the queue
self.master.after(100, self.poll_log_queue)
self.autoscroll = tk.BooleanVar()
tk.Checkbutton(master, text='Autoscroll Log', variable=self.autoscroll).\
grid(row=1, column=0, sticky=tk.W)
self.autoscroll.set(True)
def display(self, record):
msg = self.queue_handler.format(record)
self.scrolled_text.configure(state='normal')
self.scrolled_text.insert(tk.END, msg + '\n', record.levelname)
self.scrolled_text.configure(state='disabled')
# Autoscroll to the bottom
if self.autoscroll.get():
self.scrolled_text.yview(tk.END)
def poll_log_queue(self):
# Check every 100ms if there is a new message in the queue to display
while True:
try:
record = self.log_queue.get(block=False)
except queue.Empty:
break
else:
self.display(record)
self.master.after(100, self.poll_log_queue)
class Gui:
def __init__(self, master):
self.master = master
frame = tk.Frame(master)
self.font = font.Font()
# ***** Menu *****
menu = tk.Menu(master)
master.config(menu=menu)
file_menu = tk.Menu(menu)
edit_menu = tk.Menu(menu)
menu.add_cascade(label='File', menu=file_menu)
# file_menu.add_separator()
file_menu.add_command(label='Quit', command=frame.quit)
menu.add_cascade(label='Edit', menu=edit_menu)
edit_menu.add_command(label='Reset to default', command=self.reset_parameters)
# ***** User Inputs *****
file_picker_label = tk.Label(master, text='Project folder:')
self.project_path = tk.StringVar()
self.file_picker_entry = tk.Entry(master, textvariable=self.project_path, width=30)
file_picker_button = tk.Button(master, text='Choose Directory', command=self.ask_for_path)
file_picker_label.grid(row=0, column=0, sticky=tk.E, pady=(10, 0), padx=(25,5))
self.file_picker_entry.grid(row=0, column=1, sticky=tk.W, pady=(10, 0))
file_picker_button.grid(row=0, column=2, sticky=tk.W, pady=(10, 0))
self.classifier_input = tk.BooleanVar()
tk.Checkbutton(master, text='Load input from classifier', variable=self.classifier_input,
command=self.display_csv_picker).grid(row=1, column=1, pady=(6, 0), sticky=tk.W)
self.csv_picker_label = tk.Label(master, text='Classifier CSV file:')
self.csv_path = tk.StringVar()
self.csv_picker_entry = tk.Entry(master, textvariable=self.csv_path, width=30)
self.csv_picker_button = tk.Button(master, text='Choose CSV file', command=self.ask_for_file)
grid_pos = 4
tk.Label(master, text='Advanced Options', font=(self.font, 14, 'bold')).grid(row=grid_pos, column=1,
pady=(20, 0), sticky=tk.W)
# TODO: Find out how to hide advanced options by default
self.output_folder = tk.StringVar()
tk.Label(master, text='Output folder name images:').grid(row=grid_pos + 1, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.output_folder).grid(row=grid_pos + 1, column=1, sticky=tk.W)
self.csv_folder_name = tk.StringVar()
tk.Label(master, text='Output folder name CSVs:').grid(row=grid_pos + 2, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.csv_folder_name).grid(row=grid_pos + 2, column=1, sticky=tk.W)
self.max_processes = tk.IntVar()
tk.Label(master, text='Number of parallel processes:').grid(row=grid_pos + 3, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.max_processes).grid(row=grid_pos + 3, column=1, sticky=tk.W)
self.batch_size = tk.IntVar()
tk.Label(master, text='Batch size:').grid(row=grid_pos + 4, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.batch_size).grid(row=grid_pos + 4, column=1, sticky=tk.W)
self.highmag_layer = tk.StringVar()
tk.Label(master, text='MAPS high magnification layer:').grid(row=grid_pos + 5, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.highmag_layer).grid(row=grid_pos + 5, column=1, sticky=tk.W)
self.stitch_threshold = tk.IntVar()
tk.Label(master, text='Stitch threshold:').grid(row=grid_pos + 6, column=0, sticky=tk.E, padx=(25,5))
tk.Entry(master, textvariable=self.stitch_threshold).grid(row=grid_pos + 6, column=1, sticky=tk.W)
self.eight_bit = tk.BooleanVar()
tk.Checkbutton(master, text='8 bit output', variable=self.eight_bit).grid(row=grid_pos + 7, column=1, sticky=tk.W)
self.arrow_overlay = tk.BooleanVar()
tk.Checkbutton(master, text='Add an arrow overlay that points to the fork', variable=self.arrow_overlay). \
grid(row=grid_pos + 8, column=1, sticky=tk.W)
self.contrast_enhance = tk.BooleanVar()
tk.Checkbutton(master, text='Produce contrast enhanced images', variable=self.contrast_enhance). \
grid(row=grid_pos + 9, column=1, sticky=tk.W)
self.continue_processing = tk.BooleanVar()
tk.Checkbutton(master, text='Continue processing an experiment', variable=self.continue_processing).\
grid(row=grid_pos + 10, column=1, sticky=tk.W)
# Run button
self.run_button_text = tk.StringVar()
self.run_button = tk.Button(master, textvariable=self.run_button_text, width=10)
self.run_button_ready()
self.run_button.grid(row=15, column=2, sticky=tk.W, pady=10, padx=10)
# Reset button
self.reset_button = tk.Button(master, text='Reset Parameters', width=20, command=self.reset_parameters)
self.reset_button.grid(row=15, column=0, sticky=tk.E, pady=10, padx=10)
# Stop button (available during run)
self.reset_parameters()
def reset_parameters(self):
self.project_path.set('')
self.max_processes.set(5)
self.eight_bit.set(True)
self.batch_size.set(5)
self.output_folder.set('stitchedForks')
self.csv_folder_name.set('annotations')
self.highmag_layer.set('highmag')
self.stitch_threshold.set(1000)
self.arrow_overlay.set(True)
self.contrast_enhance.set(True)
self.continue_processing.set(False)
self.classifier_input.set(False)
self.csv_path.set('')
def run(self):
project_dir = Path(self.project_path.get())
base_path = project_dir.parent
project_name = project_dir.name
params_set = self.check_all_parameters_set()
if params_set and not self.continue_processing.get() and not self.classifier_input.get():
self.create_logging_window()
self.run_button_to_running()
log_file_path = str(Path(project_dir) / (project_name + '.log'))
logger = MapsXmlParser.create_logger(log_file_path)
logger.info('Process experiment {}'.format(project_name))
# thread = threading.Thread(target=self.dummy, args=(10, ))
# thread.daemon = True
# thread.start()
thread = threading.Thread(target=self.run_from_beginning, args=(base_path, project_name,))
thread.daemon = True
thread.start()
elif params_set and self.continue_processing.get():
self.create_logging_window()
self.run_button_to_running()
logging.info('Continuing to process experiment {}'.format(project_name))
thread = threading.Thread(target=self.continue_run, args=(base_path, project_name,))
thread.daemon = True
thread.start()
elif params_set and self.classifier_input.get():
self.create_logging_window()
self.run_button_to_running()
logging.info('Load classifier output for experiment {} from the csv file: {}'.format(project_name,
self.csv_path.get()))
thread = threading.Thread(target=self.classifier_input_run, args=(base_path, project_name,
self.csv_path.get(),))
thread.daemon = True
thread.start()
else:
tkinter.messagebox.showwarning(title='Warning: parameters missing',
message='You need to enter the correct kind of parameters in all the '
'required fields and then try again')
def run_button_to_running(self):
self.run_button_text.set('Running...')
self.run_button.config(height=2, fg='gray', command=self.nothing)
def run_button_ready(self):
self.run_button_text.set('Run')
self.run_button.config(height=2, fg='green', command=self.run, font=(self.font, 24, 'bold'))
def run_from_beginning(self, base_path, project_name):
# TODO: Catch issues when wrong path is provided or another error/warning occurs in the stitcher => catch my custom Exception, display it to the user
stitcher = Stitcher(base_path, project_name, self.csv_folder_name.get(), self.output_folder.get())
stitcher.parse_create_csv_batches(batch_size=self.batch_size.get(), highmag_layer=self.highmag_layer.get())
stitcher.manage_batches(self.stitch_threshold.get(), self.eight_bit.get(), show_arrow=self.arrow_overlay.get(),
max_processes=self.max_processes.get(), enhance_contrast=self.contrast_enhance.get())
stitcher.combine_csvs(delete_batches=True)
logging.info('Finished processing the experiment')
self.run_button_ready()
def continue_run(self, base_path, project_name):
stitcher = Stitcher(base_path, project_name, self.csv_folder_name.get(), self.output_folder.get())
stitcher.manage_batches(self.stitch_threshold.get(), self.eight_bit.get(), show_arrow=self.arrow_overlay.get(),
max_processes=self.max_processes.get(), enhance_contrast=self.contrast_enhance.get())
stitcher.combine_csvs(delete_batches=True)
logging.info('Finished processing the experiment')
self.run_button_ready()
def classifier_input_run(self, base_path, project_name, csv_path):
stitcher = Stitcher(base_path, project_name, self.csv_folder_name.get(), self.output_folder.get())
stitcher.parse_create_classifier_csv_batches(batch_size=self.batch_size.get(), classifier_csv_path=csv_path,
highmag_layer=self.highmag_layer.get())
stitcher.manage_batches(self.stitch_threshold.get(), self.eight_bit.get(), show_arrow=self.arrow_overlay.get(),
max_processes=self.max_processes.get(), enhance_contrast=self.contrast_enhance.get())
stitcher.combine_csvs(delete_batches=True)
logging.info('Finished processing the experiment')
self.run_button_ready()
def create_logging_window(self):
# TODO: Check if the window already exists. Only make a new window if it doesn't exist yet
log_window = tk.Toplevel(self.master)
log_window.title('Log')
LoggingWindow(log_window)
def dummy(self, iterations):
"""Dummy run function to test the interface, e.g. locally on my Mac
Function just does some logging so that the interface can be tested.
Args:
iterations (int): Number of log messages to be produced
"""
logger = logging.getLogger(__name__)
for i in range(iterations):
logger.info('Running Dummy')
time.sleep(1)
for i in range(iterations):
logger.info('Running Dummy 2! =D')
time.sleep(1)
self.run_button_ready()
@staticmethod
def nothing():
"""If the run button has already been pressed, just do nothing on future presses until the function finishes
"""
pass
def ask_for_path(self):
path = tkinter.filedialog.askdirectory(title='Select folder containing the MapsProject.xml file')
self.project_path.set(path)
def ask_for_file(self):
path = tkinter.filedialog.askopenfilename(title='Select the classifier output',
filetypes=(("csv files", "*.csv"), ("all files", "*.*")))
self.csv_path.set(path)
def display_csv_picker(self):
if self.classifier_input.get():
self.csv_picker_label.grid(row=2, column=0, sticky=tk.E)
self.csv_picker_entry.grid(row=2, column=1, sticky=tk.W)
self.csv_picker_button.grid(row=2, column=2, sticky=tk.W)
else:
self.csv_picker_label.grid_remove()
self.csv_picker_entry.grid_remove()
self.csv_picker_button.grid_remove()
def check_all_parameters_set(self):
try:
params_set = len(self.project_path.get()) > 0
params_set = params_set and type(self.max_processes.get()) == int
params_set = params_set and type(self.eight_bit.get()) == bool
params_set = params_set and type(self.batch_size.get()) == int
params_set = params_set and len(self.output_folder.get()) > 0
params_set = params_set and len(self.csv_folder_name.get()) > 0
params_set = params_set and len(self.highmag_layer.get()) > 0
params_set = params_set and type(self.stitch_threshold.get()) == int
if self.classifier_input.get():
params_set = params_set and len(self.csv_path.get()) > 0
params_set = params_set and self.csv_path.get().endswith('.csv')
except _tkinter.TclError:
params_set = False
return params_set
def shutdown(self):
# Helper function to shut down all stitching processes when the interface is quit
if tk.messagebox.askokcancel("Quit", "Do you want to stop processing the experiment?"):
self.master.destroy()
```
#### File: ForkStitcher/fork-stitcher/multiprocessing_imagej.py
```python
import imagej
from multiprocessing import Pool
def f(x):
# ij = imagej.init('/Applications/Fiji.app')
print(x)
# ij = imagej.init('/Applications/Fiji.app')
with Pool(processes=2) as pool:
for i in range(10):
pool.apply_async(f, args=(i,))
pool.close()
pool.join()
``` |
{
"source": "jlugao/elephant-bot",
"score": 2
} |
#### File: handlers/bookmarks/handlers.py
```python
from telegram import ParseMode, Update
from telegram.ext import CallbackContext
from tgbot.models import User
from bookmarks.models import Bookmark
def return_bookmarks(update: Update, context: CallbackContext) -> None:
"""Show help info about all secret admins commands"""
user = User.get_user(update, context)
tags = context.args
bookmarks = Bookmark.objects.filter(user=user)
reply = "Bookmarks:\n"
for bookmark in bookmarks:
tags = "["
tags += ", ".join([str(tag) for tag in bookmark.tags.all()])
tags += "]"
reply += f" - <a href='{bookmark.url}'>{bookmark.url}</a>\n"
update.message.reply_text(
reply,
parse_mode=ParseMode.HTML,
)
```
#### File: handlers/raw_text/handlers.py
```python
from datetime import timedelta
from django.utils.timezone import now
from telegram import ParseMode, Update
from telegram.ext import CallbackContext
from . import static_text
from tgbot.models import User
from bookmarks.models import Bookmark
import re
URL_REGEX = r"[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)"
TAGS_REGEX = r"\[(.*)\]"
def raw_message(update: Update, context: CallbackContext) -> None:
"""Show help info about all secret admins commands"""
user = User.get_user(update, context)
if url := re.search(URL_REGEX, update.message.text, re.IGNORECASE):
url = url.group(0)
tags = re.findall(TAGS_REGEX, update.message.text)
if tags:
tags = tags[0].split(",")
bookmark, created = Bookmark.objects.get_or_create(url=url, user=user)
existing_tags = bookmark.tags.all()
new_tags = set(tags) - set(existing_tags)
bookmark.tags.add(*list(new_tags))
if not created:
update.message.reply_text("Bookmark already existed")
else:
update.message.reply_text("bookmark created")
tags = "["
tags += ", ".join([str(tag) for tag in bookmark.tags.all()])
tags += "]"
update.message.reply_text(f"Bookmark: {bookmark.url} \n Tags: {tags}")
else:
update.message.reply_text(static_text.raw_message_reply)
update.message.reply_text(update.message.text)
``` |
{
"source": "jlugomar/hypergraphlet-kernels",
"score": 3
} |
#### File: hypergraphlet_kernel_single_hypergraph/examples/runHypergraphletsRegressionSuite.py
```python
import os, sys, glob, copy, math, random
def runHypergraphletsRegressionSuite():
dataPATH = './data/'
outputPATH = './results/'
testCaseName = 'example'
node2nodeID = {'R':'0', 'A':'1', 'B':'2', 'C':'3'}
MAX_NODE_LABELS = 'NNNNN'
MAX_HYPEREDGE_LABELS = 'EEEEEEEEEEEEE'
HYPERGRAPHLETS_TYPES = 472
SAMPLES = 50
FLAG_rijk = '\t0\t1\t2\t3\t4'
FLAG_ijk = '\t1\t2\t3\t4'
SUBGRAPH_TEST = False
infilename = 'hypergraphlets_description.txt'
try:
#Open input file
infile = open(infilename, "r")
except IOError as e:
print ("<<ERROR>> Unable to open the file", infilename, "\nThis program will be quiting now.", e)
sys.exit()
hypergraph = {}
rijkHypergraphlets = []
ijkHypergraphlets = []
rijk_ijkHypergraphlets = []
#Iterate over file
for line in infile:
#Eliminate any unnecessary white spaces
line = line.strip()
hgType = int(line.split(':')[0])
hyperedges_desc = line.split(':')[1]
hyperedgesDesc = hyperedges_desc.split('|')
totalHyperedges = 0
temp = []
if hgType == 1:
totalNodes = 2
elif hgType > 1 and hgType < 11:
totalNodes = 3
else:
totalNodes = 4
for currHyperedge in hyperedgesDesc:
hyperedge = ''
for currNode in currHyperedge.split(','):
nodeID = node2nodeID[currNode]
hyperedge += '\t' + nodeID
temp.append(hyperedge)
totalHyperedges += 1
if currHyperedge == 'R,A,B,C' or currHyperedge == 'A,B,C':
totalNodes = 5
if currHyperedge == 'R,A,B,C':
temp.append(FLAG_rijk)
totalHyperedges += 1
rijkHypergraphlets.append(hgType)
else:
temp.append(FLAG_ijk)
totalHyperedges += 1
ijkHypergraphlets.append(hgType)
if (hgType in rijkHypergraphlets) and (hgType in ijkHypergraphlets):
rijk_ijkHypergraphlets.append(hgType)
hypergraph[hgType] = copy.copy(temp)
#Output test case for current hypergraphlet type
#Output hypergraph file
outfilenameHG = dataPATH + testCaseName + str(hgType) + '.hypergraph'
outfileHG = open(outfilenameHG, "w")
hyperedges = hypergraph[hgType]
random.shuffle(hyperedges)
for i in range(0, len(hyperedges)):
temp = hyperedges[i].strip().split('\t')
random.shuffle(temp)
permutedHyperedge = ''
for nodeID in temp:
permutedHyperedge += '\t' + nodeID
outlineHG = str(i) + permutedHyperedge + '\n'
outfileHG.writelines(outlineHG)
outfileHG.close()
#Output node labels file
outfilenameNL = dataPATH + testCaseName + str(hgType) + '.nlabels'
outfileNL = open(outfilenameNL, "w")
outlineNL = MAX_NODE_LABELS[0:totalNodes] + '\n'
outfileNL.writelines(outlineNL)
outfileNL.close()
#Output hyperedge labels file
outfilenameEL = dataPATH + testCaseName + str(hgType) + '.elabels'
outfileEL = open(outfilenameEL, "w")
outlineEL = MAX_HYPEREDGE_LABELS[0:totalHyperedges] + '\n'
outfileEL.writelines(outlineEL)
outfileEL.close()
#Close input file
infile.close()
print (len(rijkHypergraphlets), len(ijkHypergraphlets), len(rijk_ijkHypergraphlets))
casesFailed = 0
notFound = 0
for hgType in range(1, HYPERGRAPHLETS_TYPES):
failed = 0
for sampleRun in range(0, SAMPLES):
#Output hypergraph file
outfilenameHG = dataPATH + testCaseName + str(hgType) + '.hypergraph'
outfileHG = open(outfilenameHG, "w")
hyperedges = hypergraph[hgType]
random.shuffle(hyperedges)
for i in range(0, len(hyperedges)):
temp = hyperedges[i].strip().split('\t')
random.shuffle(temp)
permutedHyperedge = ''
for nodeID in temp:
permutedHyperedge += '\t' + nodeID
outlineHG = str(i) + permutedHyperedge + '\n'
outfileHG.writelines(outlineHG)
outfileHG.close()
#Run hyperkernel code
command = './run_hyperkernel -p examples.pos -n examples.neg -g ' + dataPATH + testCaseName + str(hgType) + ' -l ' + dataPATH + testCaseName + str(hgType) + ' -e ' + dataPATH + testCaseName + str(hgType) + ' -t 2 -z 0 -s ' + outputPATH + testCaseName + str(hgType) + '_shgk.svml '
os.system(command)
#Open SVML results file
resultFilename = outputPATH + testCaseName + str(hgType) + '_shgk.svml'
infileSVML = open(resultFilename, "r")
temp = infileSVML.readline().strip()
pairs = temp.split(' ')
typeFound = False
for i in range(1, (len(pairs) - 1)):
EXPECTED_COUNT = 1.0
countType = int(pairs[i].split(':')[0])
count = float(pairs[i].split(':')[1])
# print ("\t", sampleRun, hgType, countType, count)
if countType == hgType:
typeFound = True
if count != EXPECTED_COUNT and not SUBGRAPH_TEST:
failed += 1
print ('Type ', hgType, ' ... MISMATCH', pairs)
break
if (typeFound == False):
print ('Type ', hgType, ' ... NOT FOUND', countType, count)
failed += 1
notFound += 1
print (pairs)
return
infileSVML.close()
if failed > 0:
print ('Type ', hgType, ' ... FAILED on', (float(failed)/float(SAMPLES) * 100.0), '% of inputs.')
casesFailed += 1
else:
print ('Type ', hgType, ' ... PASSED on', SAMPLES, 'different inputs.')
print ("FAILED test cases ... ", casesFailed)
print ("Hypergraphlets NOT FOUND ... ", notFound)
return
if __name__ == '__main__':
runHypergraphletsRegressionSuite() #Alternate call, if you are using IDLE.
## runHypergraphletsRegressionSuite(sys.argv)
``` |
{
"source": "JLUiceman/djangoServer",
"score": 2
} |
#### File: webServer/api/views.py
```python
from .models import Article, Friend
from .serializers import ArticleSerializer, ArticleDetailSerializer, FriendSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
class ArticleList(generics.ListCreateAPIView):
lookup_field = 'article_type'
queryset = Article.objects.all()
serializer_class = ArticleSerializer
# def get_queryset(self):
# return Article.objects.get(article_type = self.request.GET.get('type', 'javascript'))
class ArticleDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Article.objects.all()
serializer_class = ArticleDetailSerializer
class FriendList(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'fullname'
queryset = Friend.objects.all()
serializer_class = FriendSerializer
# class ArticleList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
# queryset = Article.objects.all()
# serializer_class = ArticleSerializer
# def get(self, request, *args, **kwarg):
# return self.list(request, *args, **kwargs)
# def post(self, request, *args, **kwargs):
# return self.create(request, *args, **kwargs)
# class FriendList(APIView):
# """
# Retrieve, update or delete a snippet instance.
# """
# def get_object(self, pk):
# try:
# return Friend.objects.get(pk=pk)
# except Friend.DoesNotExist:
# raise Http404
# def get(self, request, pk, format=None):
# friend = self.get_object(pk)
# serializer = FriendSerializer(friend)
# return Response(serializer.data)
# @csrf_exempt
# def article_list(request, pk):
# """
# List all code snippets, or create a new snippet.
# """
# if request.method == 'GET':
# article = Article.objects.all()
# serializer = ArticleSerializer(article, many=True)
# return JsonResponse(serializer.data, safe=False)
# elif request.method == 'POST':
# data = JSONParser().parse(request)
# serializer = ArticleSerializer(data=data)
# if serializer.is_valid():
# serializer.save()
# return JsonResponse(serializer.data, status=201)
# return JsonResponse(serializer.errors, status=400)
# @csrf_exempt
# def article_detail(request, pk):
# """
# Retrieve, update or delete a code snippet.
# """
# try:
# article = Article.objects.get(pk=pk)
# except Article.DoesNotExist:
# return HttpResponse(status=404)
# if request.method == 'GET':
# serializer = ArticleSerializer(article)
# return JsonResponse(serializer.data)
# elif request.method == 'PUT':
# data = JSONParser().parse(request)
# serializer = ArticleSerializer(article, data=data)
# if serializer.is_valid():
# serializer.save()
# return JsonResponse(serializer.data)
# return JsonResponse(serializer.errors, status=400)
# elif request.method == 'DELETE':
# article.delete()
# return HttpResponse(status=204)
``` |
{
"source": "jlu-ilr-hydro/IPCC-Repots-Focus-Overview",
"score": 4
} |
#### File: jlu-ilr-hydro/IPCC-Repots-Focus-Overview/count_temp_ipcc.py
```python
import numpy as np
import pandas as pd
import os
def create_temp_dict():
"""Creates a dictionary for all the single temperatures to count and returns it"""
temp_dict = {}
for i in np.arange(0.5,10.5, 0.5):
# Test if it is a float or not to format it right
if i == int(i):
# Add an empty space at the beginnign to make sure this is not counting e.g. 1.5°C as 5°C
key = " " + str(int(i)) + "°C"
else:
key = " " + str(i )+ "°C"
temp_dict[key] = 0
return temp_dict
def get_all_string(report):
with open(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, 'r', encoding='utf-8') as f:
return f.read()
def count_temperatures(report):
"""counts all temperatures between 0.5°C and 10°C in 0.5°C steps"""
temp_dict = create_temp_dict()
report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0])
report_list = report_df[report_df.columns[0]].tolist()
report_str = " ".join([str(item) for item in report_list])
# count how often a temperature occures
for temp in temp_dict.keys():
number_of_occurences = report_str.count(temp)
print("Found " + temp + " " + str(number_of_occurences) + " time(s)")
temp_dict[temp] += number_of_occurences
# Save the results for the single pdf
temp_counts_pdf = pd.DataFrame.from_dict(temp_dict, orient="index")
temp_counts_pdf.to_csv("Results" + os.sep + "temperatures" + os.sep + "counts_" + report[:-4] + ".csv", sep=";")
def count_all_reports():
"""iterates over all reports"""
reports = [file for file in os.listdir(os.getcwd() + os.sep + "Raw IPCC Strings") if file[-4:] == ".csv" ]
for report in reports:
print("Starting with " + report)
count_temperatures(report)
count_all_reports()
```
#### File: jlu-ilr-hydro/IPCC-Repots-Focus-Overview/read_prepare_data.py
```python
import os
import pandas as pd
import numpy as np
def read_ipcc_counts_temp():
"""reads all counts of temperatures for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "temperatures")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "temperatures" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_ipcc_counts_rfc():
"""reads all counts of reasons of concern for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "reasons_for_concern")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "reasons_for_concern" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_false_positive():
"""reads in all the counted false/true positive rates for the temperatres in the
IPCC and calculates a true positive rate for each entry"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "false_positive_check_files")
all_df = pd.DataFrame()
for file in files:
# only read those files that contains the counting results
if "results" not in file:
continue
file_df = pd.read_csv("Results" + os.sep + "false_positive_check_files" + os.sep + file, sep=",", index_col=0)
# calculate the true positive rate
file_df["True Positive Rate [%]"] = (file_df["n true positive"]/(file_df["n true positive"]+file_df["n false positive"]))*100
# Arange the df for seaborn
file_df["Temperature [°C]"] = file_df.index
file_df.reset_index(inplace=True, drop=True)
all_df = pd.concat([all_df, file_df])
return all_df
def scale_counts(ipcc_counts):
"""scale the counts by overall sum"""
sums = ipcc_counts.sum(axis=1)
for col in ipcc_counts:
ipcc_counts[col] = ipcc_counts[col]/sums*100
return ipcc_counts
def read_meta():
"""reads in the meta data of the reports"""
meta = pd.read_csv("Reports" + os.sep + "meta_data_reports.tsv", sep="\t")
meta["Year"] = meta["Year"].astype("str")
return meta
def group_temps(ipcc_counts):
"""groups the temperatures into three categories"""
ipcc_counts["0.5°C - 2°C"] = ipcc_counts[" 0.5°C"] + ipcc_counts[" 1°C"] + ipcc_counts[" 1.5°C"] +ipcc_counts[" 2°C"]
ipcc_counts["2.5°C - 4°C"] = ipcc_counts[" 2.5°C"] + ipcc_counts[" 3°C"] + ipcc_counts[" 3.5°C"] +ipcc_counts[" 4°C"]
ipcc_counts["≥ 4.5°C"] = ipcc_counts[" 4.5°C"] + ipcc_counts[" 5°C"] + ipcc_counts[" 5.5°C"] +ipcc_counts[" 6°C"] +ipcc_counts[" 6.5°C"] + ipcc_counts[" 7°C"] + ipcc_counts[" 7.5°C"] +ipcc_counts[" 8°C"] + ipcc_counts[" 8.5°C"] + ipcc_counts[" 9°C"] + ipcc_counts[" 9.5°C"] +ipcc_counts[" 10°C"]
return ipcc_counts.iloc[:,20:]
def merge_counts_meta(ipcc_counts, meta):
"""merges the df with the counted temperatures/rfcs with the metadata"""
return pd.merge(meta, ipcc_counts, right_index=True, left_on="count_names")
def lookup_names():
""""Returns lookup dict for different files names to merge them"""
lookup_dict = {
"IPCC_AR6_WGI_Full_Report":"counts_IPCC_AR6_WGI_Full_Report_parsed",
"SROCC_FullReport_FINAL":"counts_SROCC_FullReport_FINAL_parsed",
"210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES":"counts_210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES_parsed",
"SR15_Full_Report_Low_Res":"counts_SR15_Full_Report_Low_Res_parsed",
"SYR_AR5_FINAL_full":"counts_SYR_AR5_FINAL_full_wcover_parsed",
"ipcc_wg3_ar5_full":"counts_ipcc_wg3_ar5_full_parsed",
"WGIIAR5-PartA_FINAL":"counts_WGIIAR5-PartA_FINAL_parsed",
"WGIIAR5-PartB_FINAL":"counts_WGIIAR5-PartB_FINAL_parsed",
"WG1AR5_all_final":"counts_WG1AR5_all_final_parsed",
"SREX_Full_Report-1":"counts_SREX_Full_Report-1_parsed",
"SRREN_Full_Report-1":"counts_SRREN_Full_Report-1_parsed",
"ar4_syr_full_report":"counts_ar4_syr_full_report_parsed",
"ar4_wg2_full_report":"counts_ar4_wg2_full_report_parsed",
"ar4_wg1_full_report-1":"counts_ar4_wg1_full_report-1_parsed",
"ar4_wg3_full_report-1":"counts_ar4_wg3_full_report-1_parsed",
"sroc_full-1":"counts_sroc_full-1_parsed",
"srccs_wholereport-1":"counts_srccs_wholereport-1_parsed",
"SYR_TAR_full_report":"counts_SYR_TAR_full_report_parsed",
"WGII_TAR_full_report-2":"counts_WGII_TAR_full_report-2_parsed",
"WGI_TAR_full_report":"counts_WGI_TAR_full_report_parsed",
"WGIII_TAR_full_report":"counts_WGIII_TAR_full_report_parsed",
"srl-en-1":"counts_srl-en-1_parsed",
"srtt-en-1":"counts_srtt-en-1_parsedd",
"emissions_scenarios-1":"counts_emissions_scenarios-1_parsed",
"av-en-1":"counts_av-en-1_parsed",
"The-Regional-Impact":"counts_The-Regional-Impact_parsed",
"2nd-assessment-en-1":"counts_2nd-assessment-en-1_parsed",
"ipcc_sar_wg_III_full_report":"counts_ipcc_sar_wg_III_full_report_parsed",
"ipcc_sar_wg_II_full_report":"counts_ipcc_sar_wg_II_full_report_parsed",
"ipcc_sar_wg_I_full_report":"counts_ipcc_sar_wg_I_full_report_parsed",
"climate_change_1994-2":"counts_climate_change_1994-2_parsed",
# "ipcc-technical-guidelines-1994n-1":"", # could not read in, but also contains no temp mentions
"ipcc_wg_I_1992_suppl_report_full_report":"counts_ipcc_wg_I_1992_suppl_report_full_report_parsed",
"ipcc_wg_II_1992_suppl_report_full_report":"counts_ipcc_wg_II_1992_suppl_report_full_report_parsed",
"ipcc_90_92_assessments_far_full_report":"counts_ipcc_90_92_assessments_far_full_report_parsed",
"ipcc_far_wg_III_full_report":"counts_ipcc_far_wg_III_full_report_parsed",
"ipcc_far_wg_II_full_report":"counts_ipcc_far_wg_II_full_report_parsed",
"ipcc_far_wg_I_full_report":"counts_ipcc_far_wg_I_full_report_parsed",
}
return lookup_dict
def create_temp_keys():
"""Creates a list of strings for all temperatures the paper looked at"""
temps = []
for i,temp in enumerate(np.arange(0.5,10.1,0.5)):
if i % 2 != 0:
temps.append(" "+str(int(temp))+"°C")
else:
temps.append(" "+str(temp)+"°C" )
return temps
def combine_all_raw_strings():
"""combines all raw strings into one big file to search through"""
reports = [file for file in os.listdir(os.getcwd() + os.sep + "Raw IPCC Strings") if file[-4:] == ".csv" ]
all_reports = " "
for report in reports:
print("Starting with " + report)
report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0])
report_list = report_df[report_df.columns[0]].tolist()
report_str = " ".join([str(item) for item in report_list])
all_reports += report_str
with open(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + "all_ipcc_strings.csv", 'w', encoding='utf-8') as f:
# this file is not included in the repository, as it is too large for Github
f.write(all_reports)
if __name__ == "__main__":
combine_all_raw_strings()
``` |
{
"source": "jlu-ilr-hydro/odmf",
"score": 2
} |
#### File: odmf/bin/interactive.py
```python
from odmf.config import conf
from odmf.tools import Path
from odmf import db
import pandas as pd
import os
import time
import numpy as np
import contextlib
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s | %(name)s | %(levelname)s | %(message)s'
)
@contextlib.contextmanager
def timeit(name='action'):
tstart = time.time()
yield
d = time.time() - tstart
print(f'------------ {name} took {d:0.3f} seconds')
session: db.orm.Session = db.Session()
q = session.query
person = db.base.ObjectGetter(db.Person, session)
ds = db.base.ObjectGetter(db.Dataset, session)
# Set default user
from odmf.webpage import auth
auth.users.load()
auth.users.set_default('philipp')
```
#### File: odmf/odmf/config.py
```python
import yaml
from pathlib import Path
import sys
import os
from logging import getLogger
from . import prefix, __version__
logger = getLogger(__name__)
class ConfigurationError(RuntimeError):
pass
def static_locations(*from_config):
paths = [Path(__file__).parent / 'static'] + [Path(p) for p in from_config]
filtered = []
[filtered.append(str(p)) for p in paths if p and p.exists() and p not in filtered]
return filtered
class Configuration:
"""
The configuration class. Change the configuration by providing a config.yml in the home directory
Mandatory fields are defined as (...), optional as None or with a default value
"""
datetime_default_timezone = 'Europe/Berlin'
database_type = 'postgres'
database_name = ''
database_username = ''
database_password = ''
database_host = '127.0.0.1'
static = [prefix]
media_image_path = 'webpage/media'
nav_background = '/media/gladbacherhof.jpg'
nav_left_logo = '/media/lfe-logo.png'
manual_measurements_pattern = '(.+\\/)*datafiles\\/lab\\/([a-zA-Z0-9]+\\/)*.*\\.(xls|xlsx)$'
map_default = {'lat': 50.5, 'lng': 8.55, 'type': 'hybrid', 'zoom': 15}
utm_zone = '32N'
upload_max_size = 25000000
server_port = 8080
google_maps_api_key = ''
woftester_receiver_mail = ['<EMAIL>']
woftester_sender_mail = '<EMAIL>'
cuahsi_wsdl_endpoint = 'http://fb09-pasig.umwelt.uni-giessen.de/wof/index.php/cuahsi_1_1.asmx?WSDL'
smtp_serverurl = 'mailout.uni-giessen.de'
root_url = '/'
datafiles = './datafiles'
preferences = './preferences'
description = 'A server for data-management for quantitative field research'
user = os.environ.get('USER') or os.environ.get('USERNAME')
def __bool__(self):
return ... not in vars(self).values()
def to_dict(self):
return {
k: v
for k, v in vars(self).items()
if (
not callable(v)
and not k.startswith('_')
and type(v) is not property
)
}
def update(self, conf_dict: dict):
unknown_keys = []
for k in conf_dict:
if hasattr(self, k):
setattr(self, k, conf_dict[k])
else:
unknown_keys.append(k)
if unknown_keys:
raise ConfigurationError(f'Your configuration contains unknown keys: {",".join(unknown_keys)}')
return self
def __init__(self, **kwargs):
vars(self).update({
k: v
for k, v in vars(type(self)).items()
if not k.startswith('_') and not callable(v)
})
self.update(kwargs)
self.home = str(Path(prefix).absolute())
self.static = static_locations(self.home, *self.static)
def abspath(self, relative_path: Path):
"""
Returns a pathlib.Path from the first fitting static location
:param relative_path: A relative path to a static ressource
"""
for static_home in reversed(self.static):
p = Path(static_home) / relative_path
if p.exists():
return p.absolute()
raise FileNotFoundError(f'{relative_path} not found in the static ressources')
def to_yaml(self, stream=sys.stdout):
"""
Exports the current configuration to a yaml file
:param stream: A stream to write to
"""
d = self.to_dict()
yaml.safe_dump(d, stream)
def google_maps_api(self, callback: str):
return f'https://maps.googleapis.com/maps/api/js?key={self.google_maps_api_key}&callback={callback}'
@property
def version(self):
return __version__
def load_config():
conf_file = Path(prefix) / 'config.yml'
logger.debug('Found config file:' + str(conf_file.absolute()))
if not conf_file.exists():
logger.warning(f'{conf_file.absolute().as_posix()} '
f'not found. Create a template with "odmf configure". Using incomplete configuration')
conf_dict = {}
else:
conf_dict = yaml.safe_load(conf_file.open()) or {}
logger.debug(f'loaded {conf_file.resolve()}')
conf = Configuration(**conf_dict)
if not conf:
logger.warning(', '.join(k for k, v in conf.to_dict().items() if v is ...) + ' are undefined')
return conf
def import_module_configuration(conf_module_filename):
"""
Migration utitlity to create a conf.yaml from the old ODMF 0.x conf.py module configuration
:param conf_module_filename: The conf.py configuration file
"""
code = compile(open(conf_module_filename).read(), 'conf.py', 'exec')
config = {}
exec(code, config)
def c(s: str):
return s.replace('CFG_', '').lower()
config = {
c(k): v
for k, v in config.items()
if k.upper() == k and k[0] != '_' and not callable(v)
}
config['database_type'] = config.pop('database', 'postgres')
conf = Configuration(**config)
return conf
conf = load_config()
```
#### File: odmf/dataimport/importlog.py
```python
import pandas as pd
from .. import db
import datetime
class LogImportError(RuntimeError):
pass
class LogImportStructError(LogImportError):
pass
class LogImportRowError(LogImportError):
"""
Error with additional information of the row in the log file
"""
def __init__(self, row, msg, is_valuetype_error=False):
RuntimeError.__init__(
self, "Could not import row %i:%s" % (row + 1, msg))
self.row = row
self.text = msg
self.is_valuetype_error = is_valuetype_error
def make_time_column_as_datetime(df: pd.DataFrame):
"""
Converts the time column to a datetime
Possible case:
1. The 'time' column contains 'datetime.time' objects -> Add to date column
2. A 'date' column exists 'time' column contains strings like '13:30' or a datetime like 2020-01-01 13:30 (wrong date)
-> convert and add to date column
3. No 'date' column, and the 'time' column contains already a datetime or a string representation of a datetime
-> convert to datetime
"""
def convert_time_column(c: pd.Series) -> pd.Series:
"""
Converts a column to_datetime and raises a LogImportStructError on failure
"""
try:
return pd.to_datetime(c, dayfirst=True)
except:
raise LogImportStructError(f'The column {c.name} is not convertible to a date')
if type(df.time[0]) is datetime.time:
df['date'] = convert_time_column(df['date'])
df['time'] = [pd.Timestamp(datetime.datetime.combine(d, t)) for d, t in zip(df['date'], df['time'])]
elif 'date' in df.columns:
df['date'] = convert_time_column(df['date'])
df['time'] = convert_time_column(df['time'])
df['time'] = df.date + (df.time - df.time.dt.normalize())
else:
df['time'] = convert_time_column(df['time'])
class LogbookImport:
"""
Imports from a defined xls file messages to the logbook and append values to datasets
Structure of the table (case insensitve):
[Date] | Time | Site | Dataset | Value | Message | [LogType] | [Sample]
"""
def __init__(self, filename, user, sheetname=0):
self.filename = filename
self.dataframe = df = pd.read_excel(filename, sheetname)
# Convert all column captions to lower case
self.dataframe.columns = [c.lower() for c in self.dataframe.columns]
# Check if all columns are present
if not all(c in df.columns for c in "time|site|dataset|value|logtype|message".split('|')):
raise LogImportStructError('The log excel sheet misses some of the follwing columns: '
'time|site|dataset|value|logtype|message')
make_time_column_as_datetime(df)
# Convert site and dataset to int, just to be sure
for c in ('site', 'dataset'):
df[c] = df[c].astype('Int64')
with db.session_scope() as session:
_user: db.Person = session.query(db.Person).get(user)
if not _user:
raise RuntimeError('%s is not a valid user' % user)
else:
self.user = _user.username
def __call__(self, commit=False):
logs = []
has_error = False
with db.session_scope() as session:
with session.no_autoflush:
for row, data in self.dataframe.iterrows():
try:
log = dict(row=row + 1,
error=False,
log=self.importrow(session, row + 1, data, commit)
)
except LogImportRowError as e:
has_error = True
log = dict(row=row + 1,
error=True,
log=e.text)
logs.append(log)
if commit:
session.commit()
else:
session.rollback()
return logs, not has_error
def recordexists(self, timeseries, time, timetolerance=30):
"""
Checks if a record at time exists in dataset
:param timeseries: A timeseries to be checked
:param time: The time for the record
:param timetolerance: the tolerance of the time in seconds
"""
td = datetime.timedelta(seconds=timetolerance)
return timeseries.records.filter(
db.sql.between(db.Record.time,
time - td,
time + td)).count() > 0
def logexists(self, session, site, time, timetolerance=30):
"""
Checks if a log at site and time exists in db
session: an open sqlalchemy session
site: A site
time: The time for the log
timetolerance: the tolerance of the time in seconds
"""
td = datetime.timedelta(seconds=timetolerance)
return session.query(db.Log)\
.filter(db.Log._site == site,
db.sql.between(db.Log.time,
time - td,
time + td)).count() > 0
def get_dataset(self, session, row, data) -> db.Timeseries:
"""Loads the dataset from a row and checks if it is manually measured and at the correct site"""
ds = session.query(db.Dataset).get(data.dataset)
if not ds:
raise LogImportRowError(row, f'Dataset {data.dataset} does not exist')
# check dataset is manual measurement
if ds.source is None or ds.source.sourcetype != 'manual':
raise LogImportRowError(row, f'{ds} is not a manually measured dataset, '
'if the dataset is correct please change '
'the type of the datasource to manual'
)
# check site
if ds.site.id != data.site:
raise LogImportRowError(row, f'Dataset ds:{ds.id} is not located at #{data.site}')
return ds
def row_to_record(self, session, row, data):
"""
Creates a new db.Record object from a row of the log format data file
"""
# load and check dataset
ds = self.get_dataset(session, row, data)
time = data.time.to_pydatetime()
value = data.value
# Check for duplicate
if self.recordexists(ds, time):
raise LogImportRowError(row, f'{ds} has already a record at {time}')
# Check if the value is in range
if not ds.valuetype.inrange(value):
raise LogImportRowError(row, f'{value:0.5g}{ds.valuetype.unit} is not accepted for {ds.valuetype}')
# Create Record
comment = data.message if pd.notna(data.message) else None
sample = data.get('sample')
record = db.Record(value=value, time=time, dataset=ds, comment=comment, sample=sample)
# Extend dataset timeframe if necessary
ds.start, ds.end = min(ds.start, time), max(ds.end, time)
return record, f'Add value {value:g} {ds.valuetype.unit} to {ds} ({time})'
def row_to_log(self, session, row, data):
"""
Creates a new db.Log object from a row without dataset
"""
time = data.time.to_pydatetime()
site = session.query(db.Site).get(data.site)
user = session.query(db.Person).get(self.user)
if not site:
raise LogImportRowError(row, f'Log: Site #{data.site} not found')
if pd.isnull(data.get('message')):
raise LogImportRowError(row, 'No message to log')
if self.logexists(session, data.site, time):
raise LogImportRowError(
row, f'Log for {time} at {site} exists already')
else:
log = db.Log(user=user,
time=time,
message=data.get('message'),
type=data.get('logtype'),
site=site)
return log, f'Log: {log}'
def importrow(self, session: db.Session, row, data, commit=False):
"""
Imports a row from the log-excelfile, either as record to a dataset
or as a log entry. The decision is made on the basis of the data given.
"""
# Get time from row
if pd.isnull(data.time):
raise LogImportRowError(row, 'Time not readable')
if pd.isnull(data.site):
raise LogImportRowError(row, 'Site is missing')
result = msg = None
if pd.notna(data.dataset):
if pd.isnull(data.value):
raise LogImportRowError(row, f'No value given to store in ds:{data.dataset}')
# Dataset given, import as record
result, msg = self.row_to_record(session, row, data)
elif pd.notna(data.value):
raise LogImportRowError(row, 'A value is given, but no dataset to store it')
elif pd.notna(data.message):
# No dataset but Message is given -> import as log
result, msg = self.row_to_log(session, row, data)
if result and commit:
session.add(result)
return msg
if __name__ == '__main__':
import os
os.chdir('instances/schwingbach')
from odmf.dataimport import importlog as il
li = il.LogbookImport('datafiles/test_log.xls', 'philipp')
res, is_ok = li()
print(len(res), 'OK' if is_ok else 'NOT OK!')
```
#### File: odmf/dataimport/pandas_import.py
```python
from .base import ImportDescription, ImportColumn
import typing
from .. import db
import pandas as pd
import re
import datetime
from ..config import conf
from odmf.tools import Path
from logging import getLogger
logger = getLogger(__name__)
class DataImportError(RuntimeError):
...
class ColumnDataset:
"""
A combination of an ImportColumn and a Dataset, used for import.
The datasets are created in the db if the column is not for appending.
The datasets start and end time is already adjusted
"""
id: int
dataset: db.Dataset
column: ImportColumn
idescr: ImportDescription
record_count: int = 0
def __init__(self, session, idescr: ImportDescription, col: ImportColumn,
id: int, user: db.Person, site: db.Site, inst: db.Datasource,
valuetypes: typing.Dict[int, db.ValueType], raw: db.Quality,
start: datetime.datetime, end: datetime.datetime,
filename: typing.Optional[str] = None
):
self.column = col
self.idescr = idescr
assert not col.ds_column, "Cannot create a ColumnDataset for a column with variable dataset target"
if col.append:
try:
self.dataset: db.Timeseries = session.query(db.Dataset).get(int(col.append))
assert self.dataset.type == 'timeseries'
self.dataset.start = min(start, self.dataset.start)
self.dataset.end = max(end, self.dataset.end)
self.record_count = self.dataset.maxrecordid()
except (TypeError, ValueError):
raise DataImportError(
f'{idescr.filename}:{col.name} wants to append data ds:{col.append}. This dataset does not exist')
else:
# New dataset with metadata from above
self.dataset = db.Timeseries(
id=id, measured_by=user,
valuetype=valuetypes[col.valuetype],
site=site, name=col.name,
filename=filename, comment=col.comment, source=inst, quality=raw,
start=start, end=end, level=col.level,
access=col.access if col.access is not None else 1,
# Get timezone from descriptor or, if not present from global conf
timezone=idescr.timezone or conf.datetime_default_timezone,
project=idescr.project)
session.add(self.dataset)
self.id = self.dataset.id
def to_record_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Prepares nad populates a dataframe with the layout of the db.Record-Table
df: The dataframe containing all data to import
"""
# Remove all values, where one of the columns is outside its minvalue / maxvalue range
values_ok = check_column_values(self.column, df)
col_df = pd.DataFrame(df.time)
col_df = col_df[values_ok]
col_df['dataset'] = self.id
col_df['id'] = df[values_ok].index + self.record_count + 1
col_df['value'] = df[self.column.name]
if 'sample' in df.columns:
col_df['sample'] = df['sample']
return col_df
def __repr__(self):
return f'ColumnDataset: {self.column} -> ds:{self.dataset.id}'
def columndatasets_from_description(
session, idescr: ImportDescription,
user: str, siteid: int, filepath: Path = None,
start: datetime.datetime = None, end: datetime.datetime = None
) -> typing.List[ColumnDataset]:
"""
Creates fitting ColumnDataset combinations for an ImportDescription
"""
with session.no_autoflush:
# Get instrument, user and site object from db
inst = session.query(db.Datasource).get(idescr.instrument)
user = session.query(db.Person).get(user)
site = session.query(db.Site).get(siteid)
# Get "raw" as data quality, to use as a default value
raw = session.query(db.Quality).get(0)
# Get all the relevant valuetypes (vt) from db as a dict for fast look up
valuetypes = {
vt.id: vt for vt in
session.query(db.ValueType).filter(
db.ValueType.id.in_([col.valuetype for col in idescr.columns])
)
}
newid = db.newid(db.Dataset, session)
newdatasets = []
appendatasets = []
for col in idescr.columns:
if col.append:
appendatasets.append(
ColumnDataset(
session, idescr, col, None,
user, site, inst, valuetypes, raw,
start, end, filepath.name
)
)
elif not col.ds_column:
newdatasets.append(
ColumnDataset(
session, idescr, col,
newid + len(newdatasets),
user, site, inst, valuetypes, raw,
start, end, filepath.name
)
)
return appendatasets + newdatasets
def _make_time_column_as_datetime(df: pd.DataFrame, fmt=None):
"""
Converts the time column to a datetime
Possible cases:
1. The 'time' column contains 'datetime.time' objects -> Add to date column
2. A 'date' column exists 'time' column contains strings like '13:30' or like 2020-01-01 13:30 (wrong date)
-> convert and add to date column
3. No 'date' column, and the 'time' column contains already a datetime or a string representation of a datetime
-> convert to datetime
"""
def convert_time_column(c: pd.Series) -> pd.Series:
"""
Converts a column to_datetime and raises a LogImportStructError on failure
"""
# First try the given format, second try with a "free" format. Needed eg. for a two column format
for timeformat in [fmt, None]:
try:
return pd.to_datetime(c, dayfirst=True, infer_datetime_format=True, format=timeformat)
except Exception as e: # difficult to get more specific, as Pandas Exception model is a bit strange
# Some sensors believe 24:00 is a valid time, pandas not
if any('24:' in a for a in e.args): # Checks if the error message contains 24
# Deal with 24:00 in a datetime string
problems = c.str.contains('24:00') # Mark the problems
# Make dates by replacing 24:00 with 00:00
changed = pd.to_datetime(c.str.replace('24:00', '00:00')) # Convert to datetime
# Change date from eg. 2.12.2020 24:00 -> 3.12.2020 00:00
changed[problems] += datetime.timedelta(days=1)
return changed
raise DataImportError(f'The column {c.name} is not convertible to a date')
if 'time' in df.columns:
if type(df['time'][0]) is datetime.time:
df['date'] = convert_time_column(df['date'])
df['time'] = [pd.Timestamp(datetime.datetime.combine(d, t)) for d, t in zip(df['date'], df['time'])]
else:
df['date'] = convert_time_column(df['date'])
df['time'] = convert_time_column(df['time'])
df['time'] = df['date'] + (df['time'] - df['time'].dt.normalize())
else:
df['time'] = convert_time_column(df['date'])
del df['date']
def check_column_values(col: ImportColumn, df: pd.DataFrame):
try:
return df[col.name].between(col.minvalue, col.maxvalue)
except TypeError:
return pd.Series(False, index=df.index)
def _load_excel(idescr: ImportDescription, filepath: Path) -> pd.DataFrame:
"""
loads data from excel, called by load_dataframe
"""
columns, names = idescr.get_column_names()
try:
df = pd.read_excel(
filepath.absolute,
sheet_name=idescr.worksheet or 0,
header=None,
skiprows=idescr.skiplines, skipfooter=idescr.skipfooter,
na_values=idescr.nodata
)
df = df[columns]
df.columns = names
return df
except FileNotFoundError:
raise DataImportError(f'{filepath} does not exist')
except Exception as e:
raise DataImportError(f'{filepath} read error. Is this an excel file? Underlying message: {str(e)}')
def _load_csv(idescr: ImportDescription, filepath: Path) -> pd.DataFrame:
"""
Loads the data from a csv like file
called by load_dataframe
"""
encoding = idescr.encoding or 'utf-8'
columns, names = idescr.get_column_names()
try:
df = pd.read_csv(
filepath.absolute, header=None,
skiprows=idescr.skiplines, skipfooter=idescr.skipfooter or 0,
delimiter=idescr.delimiter, decimal=idescr.decimalpoint,
na_values=idescr.nodata, skipinitialspace=True,
encoding=encoding, engine='python', quotechar='"'
)
df = df[columns]
df.columns = names
return df
except FileNotFoundError:
raise DataImportError(f'{filepath} does not exist')
except UnicodeDecodeError:
raise DataImportError(
f'{filepath} could not be read as {encoding} encoding. Specify correct encoding (eg. windows-1252) in {idescr.filename}'
)
except Exception as e:
raise DataImportError(f'{filepath} read error. Is this a seperated text file? Underlying message: {str(e)}')
def load_dataframe(
idescr: ImportDescription,
filepath: typing.Union[Path, str]
) -> pd.DataFrame:
"""
Loads a pandas dataframe from a data file (csv or xls[x]) using an import description
"""
if type(filepath) is not Path:
filepath = Path(filepath)
if re.match(r'.*\.xls[xmb]?$', filepath.name):
df = _load_excel(idescr, filepath)
if df.empty:
raise DataImportError(
f'No data to import found in {filepath}. If this file was generated by a third party program '
f'(eg. logger software), open in excel and save as a new .xlsx - file')
else:
df = _load_csv(idescr, filepath)
_make_time_column_as_datetime(df, idescr.dateformat)
for col in idescr.columns:
# Apply the difference operation
if col.difference:
df[col.name] = df[col.name].diff()
try:
df[col.name] *= col.factor
except TypeError:
...
return df
def get_statistics(idescr: ImportDescription, df: pd.DataFrame) \
-> typing.Tuple[typing.Dict[str, typing.Dict[str, float]], datetime.datetime, datetime.datetime]:
"""
Creates some statistics for a dataframe
"""
res = {}
startdate = df['time'].min().to_pydatetime()
enddate = df['time'].max().to_pydatetime()
for col in idescr.columns:
s = df[col.name]
res[col.name] = {}
# If the column is not float some of the stats don't make sense, just skip them
try:
res[col.name]['start'] = startdate
res[col.name]['end'] = enddate
res[col.name]['n'] = s.size
res[col.name]['n_out_of_range'] = len(s) - check_column_values(col, df).sum()
res[col.name]['min'] = s.min()
res[col.name]['max'] = s.max()
res[col.name]['sum'] = s.sum()
res[col.name]['mean'] = s.mean()
except (TypeError, ValueError):
...
return res, startdate, enddate
def get_dataframe_for_ds_column(session, column: ImportColumn, data: pd.DataFrame):
"""
To be used for columns with ds_column:
Creates a dataframe in the layout of the record table and adjusts all start / end dates of the fitting datasets
------------- Untested -----------------
"""
assert column.ds_column, "no ds_column available"
missing_ds = []
ds_ids = data['dataset for ' + column.name]
newids = {}
def get_newid_range(ds: db.Timeseries):
start = ds.maxrecordid() + 1
end = start + (ds_ids == ds.id).sum()
return range(start, end)
for dsid in ds_ids.unique():
# type(dsid) --> np.int64
dsid = int(dsid)
# int conversion is necessary to prevent
# (psycopg2.ProgrammingError) can't adapt type 'numpy.int64'
ds = session.query(db.Dataset).get(dsid)
if ds:
# Filter data for the current ds
ds_data = data[ds_ids == dsid]
newids[dsid] = get_newid_range(ds)
ds.start = min(ds.start, ds_data.date.min().to_pydatetime())
ds.end = max(ds.end, ds_data.date.max().to_pydatetime())
else:
missing_ds.append(dsid)
if missing_ds:
raise DataImportError(f'{column.name} misses the following datasets {missing_ds!s}.')
col_df = pd.DataFrame(data.time)
col_df['dataset'] = data['dataset for ' + column.name]
col_df['id'] = data.index
col_df['value'] = data[column.name]
if 'sample' in data.columns:
col_df['sample'] = data['sample']
return col_df[~pd.isna(col_df['value'])]
def _get_recordframe(session: db.Session, idescr: ImportDescription,
datasets: typing.List[ColumnDataset], df: pd.DataFrame):
"""
Returns a single dataframe in the Layout of the record table including all records to export
"""
return pd.concat([
cds.to_record_dataframe(df)
for cds in datasets
] + [
get_dataframe_for_ds_column(session, col, df)
for col in idescr.columns
if col.ds_column
])
def submit(session: db.Session, idescr: ImportDescription, filepath: Path, user: str, siteid: int):
"""
Loads tabular data from a file, creates or loads necessary datasets and imports the data as records
"""
messages = []
df = load_dataframe(idescr, filepath)
logger.debug(f'loaded {filepath}, got {len(df)} rows with {len(df.columns)} columns')
if len(df) == 0:
raise DataImportError(f'No records to import from {filepath} with {idescr.filename}.')
# Load all datasets for appending and create new datasets
datasets = columndatasets_from_description(
session, idescr, user=user,
siteid=siteid, filepath=filepath,
start=df.time.min().to_pydatetime(),
end=df.time.max().to_pydatetime()
)
# make datasets available in the session
session.flush()
logger.debug(f'created or referenced {len(datasets)} datasets')
messages.extend(
f'ds{cds.id} : Import {cds.column} from file:{filepath}'
for cds in datasets
)
recordframe = _get_recordframe(session, idescr, datasets, df)
logger.info(f'insert {len(recordframe)} records into {len(recordframe.dataset.unique())} datasets')
conn = session.connection()
recordframe.to_sql('record', conn, if_exists='append', index=False, method='multi', chunksize=1000)
return messages
```
#### File: odmf/db/base.py
```python
import sqlalchemy as sql
import sqlalchemy.orm as orm
from sqlalchemy.ext.declarative import declarative_base
import os.path as op
from ..config import conf
from contextlib import contextmanager
from logging import info
from functools import total_ordering
def newid(cls, session):
"""Creates a new id for all mapped classes with an field called id, which is of integer type"""
max_id = session.query(sql.func.max(cls.id)).select_from(cls).scalar()
if max_id is not None:
return max_id + 1
else:
return 1
def connect():
info(f"Connecting with database {conf.database_name} at {conf.database_host} ..." )
import psycopg2
return psycopg2.connect(user=conf.database_username,
host=conf.database_host,
password=conf.database_password,
database=conf.database_name)
# FIXME: allow test suite to load sqlite
# TODO: allow test suite to load postgres and import all sql files (compliance test for sql)
if conf.database_type == 'postgres':
engine = sql.create_engine('postgresql://', creator=connect)
elif conf.database_type == 'postgres-local':
engine = sql.create_engine(f'postgresql:///{conf.database_name}')
elif conf.database_type == 'sqlite':
if op.exists(conf.sqlite_path):
engine = sql.create_engine('sqlite:///%s' % conf.sqlite_path)
else:
raise RuntimeError('Couldn\'t find offline database at \'%s\'.' % conf.sqlite_path)
Session = orm.sessionmaker(bind=engine)
@contextmanager
def session_scope() -> orm.Session:
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def table(obj) -> sql.Table:
"""
Returns the sql.Table of a ORM object
"""
try:
return getattr(obj, '__table__')
except AttributeError:
raise TypeError(f'{obj!r} is not a mapper class')
@total_ordering
class Base(object):
"""Hooks into SQLAlchemy's magic to make :meth:`__repr__`s."""
def __repr__(self):
def reprs():
for col in table(self).c:
try:
yield col.name, str(getattr(self, col.name))
except Exception as e:
yield col.name, f'<unknown value: {type(e)}>'
def formats(seq):
for key, value in seq:
yield f'{key}={value}'
args = ', '.join(formats(reprs()))
classy = type(self).__name__
return f'{classy}({args})'
def __lt__(self, other):
if isinstance(other, type(self)) and hasattr('id', self):
return self.id < other.id
else:
raise TypeError(
f'\'<\' not supported between instances of {self.__class__.__name__} and {other.__class__.__name__}')
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
return hash(repr(self.__class__.__name__))
def session(self):
return Session.object_session(self)
@classmethod
def query(cls, session):
return session.query(cls)
@classmethod
def get(cls, session, id):
return session.query(cls).get(id)
Base = declarative_base(cls=Base)
metadata = Base.metadata
def primarykey():
return sql.Column(sql.Integer, primary_key=True)
def stringcol():
return sql.Column(sql.String)
class ObjectGetter:
"""
A helper class for interactive environments for simple access to orm-objects
Usage:
>>> ds = ObjectGetter(db.Dataset, session)
>>> print(ds[10])
>>> ds.q.filter_by(measured_by='philipp')
"""
def __init__(self, cls: type, session: orm.Session):
self.cls = cls
self.session = session
@property
def q(self) -> orm.Query:
return self.session.query(self.cls)
def __getitem__(self, item):
return self.q.get(item)
def __repr__(self):
return 'ObjectGetter(' + self.cls.__name__ + ')'
```
#### File: odmf/odmf/odmf.py
```python
import click
import humanize
import sys
import os
from textwrap import dedent
import logging
logger = logging.getLogger(__name__)
@click.group()
def cli():
...
@cli.command()
@click.argument('workdir', default='.')
@click.option('--autoreload/--no-autoreload', '-a', default=False, show_default=True,
help='Switch autoreload on for an automatic restart of the server if the code changes')
def start(workdir, autoreload):
"""
Starts a cherrypy server, with WORKDIR as the working directory (local ressources and configuration)
"""
os.chdir(workdir)
# coloredlogs.install(level='DEBUG', stream=sys.stdout)
logger.info(f"interpreter: {sys.executable}")
logger.info(f"workdir: {os.getcwd()}")
from .tools import server
server.prepare_workdir()
server.start(autoreload)
@cli.command()
@click.option('--dbname', help='Name of the database', prompt='database name')
@click.option('--dbuser', help='Name of the database user', prompt='database user', default='')
@click.option('--dbpass', help='Password for the user', prompt='database password', default='')
@click.option('--dbhost', default='',
help='IP-Adress or DNS-Hostname of the database host. Default: localhost', prompt='database hostname:')
@click.option('--port', default=8080, help='Port to run the standalone server', type=int, prompt='server port')
def configure(dbname, dbuser, dbpass, dbhost, port):
"""
Creates a new configuraton file (./config.yml) using the given database credentials.
"""
if dbuser and not dbhost:
dbhost = '127.0.0.1'
new_config = dict(database_name=dbname, database_username=dbuser, database_password=<PASSWORD>,
database_host=dbhost, server_port=port)
import yaml
from pathlib import Path
conf_file = Path('config.yml')
with conf_file.open('w') as f:
yaml.dump(new_config, stream=f)
from .config import conf
conf.to_yaml(conf_file.open('w'))
print('New config.yml written')
@cli.command()
def systemd_unit():
"""
Creates a systemd service file and a /etc/sudoers.d file to allow non-sudoers to start / restart / stop the service
"""
from .tools.systemctl import make_service
# Writes the service files and returns a text explaining how to install the systemd service
print(make_service())
@cli.command()
@click.option('--new_admin_pass', '-p', help='Password of the new admin',
prompt=True, hide_input=True, confirmation_prompt=True)
def make_db(new_admin_pass):
"""
Creates in the database: all tables, a user odmf.admin
and fills the data-quality table with some usable input
"""
from .tools import create_db as cdb
cdb.create_all_tables()
print('created tables')
cdb.add_admin(new_admin_pass)
print('created admin user odmf.admin')
cdb.add_quality_data(cdb.quality_data)
print('added quality levels')
@cli.command()
def apache2_conf():
"""
Creates an apache2 .conf file to run this odmf instance as a wsgi server.
Use as:
"""
from config import conf
name = conf.root_url.replace('/', '')
txt = dedent(f'''
ProxyPass {conf.root_url} http://127.0.0.1:{conf.server_port}{conf.root_url}
ProxyPassReverse /{conf.root_url}/ http://127.0.0.1:{conf.server_port}{conf.root_url}
''')
with open(f'odmf-{name}.conf') as f:
f.write(txt)
@cli.command()
def test_config():
"""
Tests the configuration and prints it, if it works
"""
from .config import conf
conf.to_yaml()
@cli.command()
def test_db():
"""
Tests if the system can be connected to the database
"""
from . import db
import sqlalchemy.orm as orm
with db.session_scope() as session:
tables = [
(n, c)
for n, c in vars(db).items()
if (isinstance(c, type) and
issubclass(c, db.Base) and
c is not db.Base
)
]
for name, table in tables:
print(f'db.{name}: ', end='')
q: orm.Query = session.query(table)
print(f'{humanize.intword(q.count())} {name} objects in database', end=' - ')
print(repr(q.first()))
@cli.command()
def test_static():
from pathlib import Path
from .config import conf
candidates = Path(sys.prefix), Path(__file__).parents[2], Path(conf.static)
for c in candidates:
p = c / 'odmf.static'
if p.exists():
if all((p / d).exists() for d in ('templates', 'datafiles', 'media')):
logger.info(f'OK: Global static files found at: {p}\n')
break
else:
logger.warning(f'Incomplete static file directory found at: {p}, searching further\n')
else:
logger.warning(f'{p} - does not exist\n')
@cli.command()
@click.argument('filename')
def import_config(filename):
"""
Imports a configuration from a conf.py file
"""
from .config import import_module_configuration
conf = import_module_configuration(filename)
conf.to_yaml(open('config.yml', 'w'))
@cli.command()
@click.option('--only_navigatable/--any', '-n', default=False)
@click.option('--level', '-l', type=int, help='Admission level (0-4)', default=0)
def uri_tree(only_navigatable, level):
"""
Prints the tree of available resources of odmf
"""
import yaml
from .webpage import Root
from .webpage.lib import Resource
if not only_navigatable:
level = None
res = Resource(Root()).create_tree(navigatable_for=level, recursive=True)
for r in res.walk():
print(f'{r.uri}: {r.doc}')
@cli.command()
def interactive():
"""
Launches an IPython shell with odmf related symbols. Needs IPython
"""
from textwrap import dedent
from IPython import embed
from .config import conf
from . import db
import pandas as pd
import numpy as np
greeting = """
Imported modules
----------------
pd, np, conf, db
Defined symbols
---------------
session: a SQLAlchemy session to load Database objects
q: a shortcut for session.query
ds: An ObjectGetter for datasets
person: An ObjectGetter for persons
site: An ObjectGetter for sites
Usage of a ObjectGetters:
Get dataset with id=1
>>>ds_one = ds[1]
Query sites:
>>>site.q.filter(db.Site.lat > 50.5).count()
"""
with db.session_scope() as session:
q = session.query
ds = db.base.ObjectGetter(db.Dataset, session)
person = db.base.ObjectGetter(db.Person, session)
site = db.base.ObjectGetter(db.Site, session)
embed(colors='Neutral', header=dedent(greeting))
@cli.command()
@click.option('--verbose/--terse', '-v', default=False)
def version(verbose: bool):
"""
Prints the actual odmf version
"""
from . import __version__
print('odmf', __version__)
if verbose:
import sys
print('Python executable:', sys.executable)
print('Python version:', sys.version)
if __name__ == '__main__':
cli()
```
#### File: odmf/plot/draw_plotly.py
```python
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import io
from . import Plot, Line
from ..config import conf
def _draw_line(line: Line, start, end) -> go.Scatter:
data = line.load(start, end)
mode = ''
linestyle = None
marker = None
if line.linestyle:
mode = 'lines'
dash_dict = {'-': 'solid', ':': 'dot', '.': 'dot', '--': 'dash', '-.': 'dashdot'}
linestyle = {'color': line.color, 'dash': dash_dict[line.linestyle], 'width': line.linewidth}
if line.marker:
mode = 'lines+markers' if mode else 'markers'
symboldict = {'o': 'circle', 'x': 'x-thin', ',': 'line-ns', '+': 'cross-thin', '*': 'asterisk', '.': 'circle'}
if line.marker in symbols:
symbol = line.marker
else:
symbol = symboldict.get(line.marker, 'circle')
marker = {'color': line.color, 'symbol': symbol}
return go.Scatter(x=data.index, y=data, mode=mode, line=linestyle, marker=marker, name=line.name)
def _make_figure(plot: Plot) -> go.Figure:
rows = -(-len(plot.subplots) // plot.columns)
fig = make_subplots(rows, plot.columns, shared_xaxes=True)
subplot_positions = sum(([i] * len(sp.lines) for i, sp in enumerate(plot.subplots)), [])
rows = [1 + i // plot.columns for i in subplot_positions]
cols = [1 + i % plot.columns for i in subplot_positions]
for i, sp in enumerate(plot.subplots):
row, col = 1 + i // plot.columns, 1 + i % plot.columns
if sp.ylim:
fig.update_yaxes(range=list(sp.ylim), row=row, col=col)
fig.update_yaxes()
fig.add_traces(
[
_draw_line(l, plot.start, plot.end)
for l in plot.lines()
],
rows=rows,
cols=cols
)
fig.update_yaxes()
fig.update_layout(width=plot.size[0], height=plot.size[1], template='none')
return fig
def to_image(plot: Plot, format: str) -> bytes:
"""
Draws the plot and returns a byte string containing the image
"""
fig = _make_figure(plot)
return fig.to_image(format=format)
def to_html(plot: Plot)->bytes:
"""
Draws the plot to include into an html page, here as svg.
Alternative could be as an <img> element with base64 data
"""
fig = _make_figure(plot)
return fig.to_html(include_plotlyjs=conf.root_url + '/media/lib/plotly.min.js').encode('utf-8')
symbols = [
"circle", "circle-open", "circle-dot", "circle-open-dot",
"square", "square-open", "square-dot", "square-open-dot",
"diamond", "diamond-open", "diamond-dot", "diamond-open-dot",
"cross", "cross-open", "cross-dot", "cross-open-dot", "x",
"x-open", "x-dot", "x-open-dot", "triangle-up",
"triangle-up-open", "triangle-up-dot", "triangle-up-open-dot",
"triangle-down", "triangle-down-open", "triangle-down-dot",
"triangle-down-open-dot", "triangle-left", "triangle-left-open",
"triangle-left-dot", "triangle-left-open-dot", "triangle-right",
"triangle-right-open", "triangle-right-dot", "triangle-right-open-dot",
"triangle-ne", "triangle-ne-open", "triangle-ne-dot",
"triangle-ne-open-dot", "triangle-se", "triangle-se-open",
"triangle-se-dot", "triangle-se-open-dot", "triangle-sw", "triangle-sw-open", "triangle-sw-dot", "triangle-sw-open-dot" ,
"triangle-nw", "triangle-nw-open", "triangle-nw-dot" ,
"triangle-nw-open-dot", "pentagon", "pentagon-open", "pentagon-dot",
"pentagon-open-dot", "hexagon", "hexagon-open", "hexagon-dot", "hexagon-open-dot", "hexagon2", "hexagon2-open", "hexagon2-dot", "hexagon2-open-dot", "octagon", "octagon-open",
"octagon-dot", "octagon-open-dot", "star", "star-open",
"star-dot", "star-open-dot", "hexagram", "hexagram-open",
"hexagram-dot", "hexagram-open-dot", "star-triangle-up", "star-triangle-up-open", "star-triangle-up-dot", "star-triangle-up-open-dot" ,
"star-triangle-down", "star-triangle-down-open", "star-triangle-down-dot",
"star-triangle-down-open-dot", "star-square", "star-square-open", "star-square-dot", "star-square-open-dot", "star-diamond" ,
"star-diamond-open", "star-diamond-dot", "star-diamond-open-dot" ,
"diamond-tall", "diamond-tall-open", "diamond-tall-dot" ,
"diamond-tall-open-dot", "diamond-wide", "diamond-wide-open" ,
"diamond-wide-dot", "diamond-wide-open-dot", "hourglass" ,
"hourglass-open", "bowtie", "bowtie-open", "circle-cross" ,
"circle-cross-open", "circle-x", "circle-x-open", "square-cross" ,
"square-cross-open", "square-x", "square-x-open", "diamond-cross",
"diamond-cross-open", "diamond-x", "diamond-x-open",
"cross-thin", "cross-thin-open", "x-thin", "x-thin-open",
"asterisk", "asterisk-open", "hash", "hash-open",
"hash-dot", "hash-open-dot", "y-up", "y-up-open", "y-down",
"y-down-open", "y-left", "y-left-open", "y-right", "y-right-open", "line-ew", "line-ew-open", "line-ns", "line-ns-open", "line-ne", "line-ne-open", "line-nw" ,
"line-nw-open", "arrow-up", "arrow-up-open", "arrow-down" ,
"arrow-down-open", "arrow-left", "arrow-left-open", "arrow-right" ,
"arrow-right-open", "arrow-bar-up", "arrow-bar-up-open" ,
"arrow-bar-down", "arrow-bar-down-open", "arrow-bar-left" ,
"arrow-bar-left-open", "arrow-bar-right", "arrow-bar-right-open"
]
```
#### File: odmf/plot/plot.py
```python
from datetime import datetime, timedelta
from .. import db
def asdict(obj):
"""
Creates a dictionary representation from an object
"""
if hasattr(obj, '__jdict__'):
return obj.__jdict__()
elif (not type(obj) is dict) and hasattr(obj, '__iter__'):
return [asdict(o) for o in obj]
elif hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return obj
class Line:
"""
Represents a single line of a subplot
"""
def __init__(self, subplot, valuetype, site, instrument=None, level=None,
color='', marker='', linestyle='-', linewidth=1,
transformation=None, aggregatefunction='mean', name=None):
"""
Create a Line:
@param subplot: The Subplot to which this line belongs
@param valuetype: The valuetype id of the line
@param site: the site id of the line
@param instrument: the instrument of the line
@param color: the color of the line (k,b,g,r,y,m,c)
@param linestyle: the line style (-,--,:,-.)
@param marker: the marker of the line data points (o,x,+,|,. etc.)
@param transformation: Not used
"""
self.subplot = subplot
self.marker = marker
self.color = color
self.linewidth = linewidth
self.linestyle = linestyle
self.valuetypeid = valuetype
self.siteid = site
self.instrumentid = instrument
self.level = level
self.transformation = transformation
self.aggregatefunction = aggregatefunction
self.name = name or self.generate_name()
if not (linestyle or marker):
raise ValueError('Lines need either a linestyle or a marker for the creation')
def generate_name(self):
"""
Generates a name for the line from its meta data
"""
with db.session_scope() as session:
instrument = self.instrument(session)
valuetype = self.valuetype(session)
if instrument:
name = '%s at #%i%s using %s' % (valuetype, self.siteid,
'(%gm)' % self.level if self.level is not None else '',
instrument.name)
else:
name = '%s at #%i%s' % (valuetype, self.siteid,
'(%gm)' % self.level if self.level is not None else '')
if self.subplot.plot.aggregate:
name += ' (%s/%s)' % (self.aggregatefunction,
self.subplot.plot.aggregate)
return name
def getdatasets(self, session, userlevel=10):
"""
Loads the datasets for this line
"""
datasets = session.query(db.Dataset).filter(
db.Dataset._valuetype == self.valuetypeid,
db.Dataset._site == self.siteid,
db.Dataset.start <= self.subplot.plot.end,
db.Dataset.end >= self.subplot.plot.start,
db.Dataset.access <= userlevel
)
if self.instrumentid:
datasets = datasets.filter(db.Dataset._source == self.instrumentid)
if self.level is not None:
datasets = datasets.filter(db.Dataset.level == self.level)
return datasets.order_by(db.Dataset.start).all()
def load(self, start=None, end=None):
"""
Loads the records into an array
"""
with db.session_scope() as session:
start = start or self.subplot.plot.start
end = end or self.subplot.plot.end
datasets = self.getdatasets(session)
if not datasets:
raise ValueError("No data to compute")
group = db.DatasetGroup([ds.id for ds in datasets], start, end)
series = group.asseries(session, self.name)
if self.subplot.plot.aggregate:
sampler = series.resample(self.subplot.plot.aggregate)
series = sampler.aggregate(self.aggregatefunction or 'mean')
# There were problems with arrays from length 0
if series.empty:
raise ValueError("No data to compute")
series.name = str(self)
return series
def valuetype(self, session):
return session.query(db.ValueType).get(
int(self.valuetypeid)) if self.valuetypeid else None
def site(self, session):
return session.query(db.Site).get(int(self.siteid)) if self.siteid else None
def instrument(self, session):
return session.query(db.Datasource).get(
int(self.instrumentid)) if self.instrumentid else None
def export_csv(self, stream, start=None, end=None):
"""
Exports the line as csv file
"""
data = self.load(start, end)
data.to_csv(stream, encoding='utf-8-sig', index_label='time')
def __jdict__(self):
"""
Returns a dictionary of the line
"""
return dict(valuetype=self.valuetypeid or None,
site=self.siteid or None,
instrument=self.instrumentid or None,
level=self.level,
color=self.color, linestyle=self.linestyle, marker=self.marker, linewidth=self.linewidth,
transformation=self.transformation,
aggregatefunction=self.aggregatefunction, name=self.name)
def __str__(self):
"""
Returns a string representation
"""
return self.name
def __repr__(self):
return f"plot.Line({self.valuetypeid}@#{self.siteid},'{self.color}{self.linestyle}{self.marker}')"
class Subplot:
"""
Represents a subplot of the plot
"""
def __init__(self, plot, ylim=None, logsite: int=None, ylabel=None, lines=None):
"""
Create the subplot with Plot.addtimeplot
"""
self.plot = plot
self.lines = [
Line(self, **ldict)
for ldict in lines
]
self.ylim = ylim
self.logsite = logsite
self.ylabel = ylabel
def __iter__(self):
return iter(self.lines)
def get_logs(self):
with db.session_scope() as session:
# Get logbook entries for logsite during the plot-time
logs = session.query(db.Log).filter_by(_site=self.logsite).filter(
db.Log.time >= self.plot.start).filter(db.Log.time <= self.plot.end)
return [
(log.time, log.type, str(log))
for log in logs
]
def get_ylabel(self):
"""
Gets the label for the y axis of the subplot
"""
if self.ylabel:
return self.ylabel
elif self.lines:
with db.session_scope() as session:
l = self.lines[0]
valuetype = session.query(db.ValueType).get(l.valuetypeid)
return f'{valuetype.name} [{valuetype.unit}]'
else:
return 'unknown'
def __jdict__(self):
"""
Returns a dictionary with the properties of this plot
"""
return dict(lines=asdict(self.lines),
ylim=self.ylim, logsite=self.logsite)
class Plot:
"""
Represents a full plot (matplotlib figure)
"""
def __init__(self, height=None, width=None, columns=None, start=None, end=None, **kwargs):
"""
@param size: A tuple (width,height), the size of the plot in inches (with 100dpi)
@param columns: number of subplot columns
@param start: Date for the beginning x axis
@param end: Date of the end of the x axis
"""
self.start = start or datetime.today() - timedelta(days=365)
self.end = end or datetime.today()
self.size = (width or 640, height or 480)
self.columns = columns or 1
self.subplots = []
self.name = kwargs.pop('name', '')
self.aggregate = kwargs.pop('aggregate', '')
self.description = kwargs.pop('description', '')
self.subplots = [
Subplot(self, **spargs)
for i, spargs in enumerate(kwargs.pop('subplots', []))
]
self.args = kwargs
def lines(self):
return [line for sp in self.subplots for line in sp.lines]
def fontsize(self, em):
"""
Returns the fontsize relative to the figure height. 1 em equals 1/60 of the height
"""
return em * self.size[1] / 60
def __jdict__(self):
"""
Creates a dictionary with all properties of the plot, the subplots and their lines
"""
return dict(width=self.size[0], height=self.size[1], columns=self.columns,
start=self.start, end=self.end,
subplots=asdict(self.subplots),
aggregate=self.aggregate,
description=self.description)
```
#### File: odmf/tools/create_db.py
```python
import sys
sys.path.append('.')
from odmf import db
from getpass import getpass
from logging import warning
def create_all_tables():
"""
Creates all database table necessary for the database from the codebase
:return:
"""
db.Base.metadata.create_all(db.engine)
def add_admin(password=None):
"""
Add an odmf.admin role to the person table
:param password: The password of the admin. If missing you will be prompted
:return:
"""
from odmf.webpage.auth import hashpw
password = password or getpass("Enter admin password:")
with db.session_scope() as session:
if session.query(db.Person).get('odmf.admin'):
warning('odmf.admin exists already')
else:
user = db.Person(username='odmf.admin', firstname='odmf', surname='admin',
access_level=4)
user.password = <PASSWORD>(password)
session.add(user)
def add_quality_data(data):
"""
Adds the data quality items to the Quality Table
:param data: A list of dicts (quality_data below)
"""
with db.session_scope() as session:
for q in data:
session.add(db.Quality(**q))
quality_data = [
{
"comment": "Raw, unprocessed data",
"name": "raw",
"id": 0
},
{
"comment": "Raw data, but with adjustments to the data format (eg. date and timestamps corrected, NoData changed to Null)",
"name": "formal checked",
"id": 1
},
{
"comment": "Checked and recommended for further processing",
"name": "quality checked ok",
"id": 2
},
{
"comment": "Calculated value",
"name": "derived value",
"id": 10
},
{
"comment": "Dataset is calibrated against manual measurements",
"name": "calibrated",
"id": 3
}
]
if __name__ == '__main__':
create_all_tables()
add_admin()
add_quality_data(quality_data)
```
#### File: odmf/tools/__init__.py
```python
from __future__ import annotations
import os
import os.path as op
import typing
from ..config import conf
__all__ = ['mail', 'Path']
class Path(object):
def __init__(self, *path: str):
self.datapath = op.realpath(conf.datafiles)
if path:
if str(path[0]).startswith('/'):
self.absolute = op.realpath(op.join(*path))
else:
self.absolute = op.realpath(op.join(self.datapath, *path))
self.name = op.relpath(self.absolute, self.datapath).replace('\\', '/')
else:
self.absolute = self.datapath
self.name = '/'
@property
def basename(self)->str:
return op.basename(self.absolute)
@property
def href(self)->str:
return f'{conf.root_url}/download/{self.name}'
@property
def markdown(self)->str:
if self.islegal():
return 'file:' + str(self)
else:
return ''
@property
def raw_url(self)->str:
return f'{conf.root_url}/datafiles/{self.name}'
def __bool__(self):
return op.exists(self.absolute)
def __str__(self):
return self.name
def formatsize(self)->str:
size = op.getsize(self.absolute)
unit = 0
units = "B KB MB GB".split()
while size > 1024 and unit < 3:
size = size / 1024.
unit += 1
return "%5.4g %s" % (size, units[unit])
def islegal(self) -> bool:
return self.absolute.startswith(self.datapath)
def __lt__(self, other):
return ('%s' % self) < ('%s' % other)
def __eq__(self, other):
return ('%s' % self) == ('%s' % other)
def __gt__(self, other):
return ('%s' % self) > ('%s' % other)
def __add__(self, fn):
return Path(op.join(self.absolute, fn))
def make(self):
os.makedirs(self.absolute, mode=0o770)
def breadcrumbs(self) -> str:
res = [self]
p = op.dirname(self.absolute)
while self.datapath in p:
res.insert(0, Path(p))
p = op.dirname(p)
return res
def child(self, filename):
return Path(op.join(self.absolute, filename))
def isdir(self):
return op.isdir(self.absolute)
def isroot(self):
return self.absolute == self.datapath
def isfile(self):
return op.isfile(self.absolute)
def exists(self):
return op.exists(self.absolute)
def parent(self):
return Path(op.dirname(self.absolute))
def ishidden(self):
return self.basename.startswith('.') or self.basename == 'index.html'
def listdir(self) -> (typing.List[Path], typing.List[Path]):
"""
Lists all members of the path in
2 lists:
directories, files: The subdirectories and the files in path
"""
files = []
directories = []
if self.isdir() and self.islegal():
for fn in os.listdir(self.absolute):
if not fn.startswith('.'):
child = self.child(fn)
if child.isdir():
directories.append(child)
elif child.isfile():
files.append(child)
return directories, files
else:
return [], []
def isempty(self) -> bool:
"""
Returns True, if self isdir and has no entries
"""
dirs, files = self.listdir()
files = [f for f in files
if not f.ishidden()
]
return not bool(dirs or files)
def up(self) -> str:
return op.dirname(self.name)
def delete(self):
os.unlink(self.absolute)
```
#### File: webpage/filemanager/filehandlers.py
```python
from ...tools import Path
from .. import lib as web
import re
from ...config import conf
from ..markdown import MarkDown
markdown = MarkDown()
class BaseFileHandler:
def __init__(self, pattern: str = ''):
self.pattern = re.compile(pattern, re.IGNORECASE)
def matches(self, path: Path):
"""
Checks if a path matches the file pattern
"""
return bool(self.pattern.search(path.absolute))
def to_html(self, path) -> str:
"""
Converts a string to a html text
Overwrite for different handles
"""
raise NotImplementedError
def __call__(self, path: Path):
return self.to_html(path)
class TextFileHandler(BaseFileHandler):
def __init__(self, pattern: str):
super().__init__(pattern)
def render(self, source) -> str:
return '\n<pre>\n' + source + '\n</pre>\n'
def to_html(self, path) -> str:
"""
Converts a string to a html text by creating surrounding pre tags.
Overwrite for different handles
"""
with open(path.absolute) as f:
source = f.read()
return web.render('textfile_editor.html', html=self.render(source), source=source, path=path).render()
class PlotFileHandler(BaseFileHandler):
def render(self, source) -> str:
return '\n<pre>\n' + source + '\n</pre>\n'
def to_html(self, path) -> str:
"""
redirects to plot using the plot file
"""
raise web.redirect(conf.root_url + '/plot', f=str(path))
class MarkDownFileHandler(TextFileHandler):
def render(self, source) -> str:
return markdown(source)
class ExcelFileHandler(BaseFileHandler):
def to_html(self, path: Path) -> str:
import pandas as pd
with open(path.absolute, 'rb') as f:
df = pd.read_excel(f)
html = df.to_html(classes=['table'])
return html
class CsvFileHandler(BaseFileHandler):
def to_html(self, path: Path) -> str:
import pandas as pd
try:
df = pd.read_csv(path.absolute, sep=None)
return df.to_html(classes=['table'])
except:
with open(path.absolute, 'r') as f:
return '\n<pre>\n' + f.read() + '\n</pre>\n'
class ImageFileHandler(BaseFileHandler):
def to_html(self, path: Path) -> str:
return f'''
<img class="handler-generated" src="{path.raw_url}" style="max-width: 100%"/>
'''
class PdfFileHandler(BaseFileHandler):
def to_html(self, path) -> str:
return f'''
<object id="pdf-iframe" data="{path.raw_url}" type="application/pdf"></iframe>
'''
class MultiHandler(BaseFileHandler):
handlers = [
MarkDownFileHandler(r'\.(md|wiki)$'),
PlotFileHandler(r'\.plot$'),
ExcelFileHandler(r'\.xls.?$'),
CsvFileHandler(r'\.csv$'),
PdfFileHandler(r'\.pdf$'),
ImageFileHandler(r'\.(jpg|jpeg|png|svg|gif)$'),
TextFileHandler(''),
]
def to_html(self, path: Path) -> str:
for h in self.handlers:
if h.matches(path):
try:
return h(path)
except web.HTTPRedirect:
raise
except UnicodeDecodeError as e:
pass
return None
``` |
{
"source": "jluini/habla-taller",
"score": 3
} |
#### File: habla-taller/01_acustica/ej1.py
```python
import numpy as np
# Definimos una función senoidal simple.
def ondasimple(t):
A = 1.0 # amplitud
f = 500.0 # frequencia
Phi = 0.0 # fase
return A * np.sin(2 * np.pi * f * t + Phi)
# Generamos 16000 puntos a 16kHz.
ts = np.arange(16000.0) / 16000.0
# Armamos una onda senoidal discretizada.
mionda = []
for t in ts:
mionda.append(ondasimple(t))
mionda = np.array(mionda)
# Graficamos la onda.
import matplotlib.pyplot as pyplot
pyplot.clf()
pyplot.plot(ts[0:100], mionda[0:100])
pyplot.savefig('mionda.png')
# La guardamos como wav.
import scipy.io.wavfile
#MAL: wavdata = np.array(mionda, dtype=np.int16) * 10000
wavdata = np.array(mionda * 10000.0, dtype=np.int16)
scipy.io.wavfile.write('mionda.wav', 16000, wavdata)
# Ejercicios:
#
# 1. Generar un archivo wav para cada nota musical Do, Re, Mi,
# Fa, Sol, La, Si. Consultar las frecuencias en
# http://www.phy.mtu.edu/~suits/notefreqs.html
# Tomar como referencia La = 440Hz.
#
# 2. Buscar la frecuencia más aguda y más grave que pueden percibir.
#
# 3. Percepcion relativa. Escuchar la diferencia entre dos tonos graves
# separados por 100Hz (ej: 200 y 300Hz) y dos tonos agudos separados
# también por 100Hz (ej: 1200 y 1300Hz).
#
# 4. Crear una onda cuadrada a 500 Hz, modificando ondasimple(t) de modo
# que devuelva solamente 1 o -1. Generar un wav y comparar con una
# senoidal de la misma frecuencia.
#
# 5. Repetir el punto anterior para 100Hz y para 1000Hz. ¿En algún caso
# suenan parecidas las ondas senoidales y cuadradas? (Más allá de las
# diferencias de volumen).
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.